summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Richardson <alexrichardson@google.com>2024-03-01 19:08:56 -0800
committerAlex Richardson <alexrichardson@google.com>2024-03-01 19:08:56 -0800
commit54cdd2c2b0fd83272705d184e3d632ebbe291cfe (patch)
tree413adc951ab41639e0f5e3b78155343f9ab5f6b3
parentd82e93e7f129d9e8b72570efdf4a15d6ec3d4336 (diff)
parent73dfc7bbadddeb2930b11e4ad07f9a8e8b498cc7 (diff)
Created using spr 1.3.4 [skip ci]
-rw-r--r--.github/new-prs-labeler.yml3
-rw-r--r--bolt/include/bolt/Core/BinaryFunction.h8
-rw-r--r--bolt/include/bolt/Core/MCPlusBuilder.h9
-rw-r--r--bolt/lib/Core/BinaryContext.cpp2
-rw-r--r--bolt/lib/Core/BinaryEmitter.cpp2
-rw-r--r--bolt/lib/Core/BinaryFunction.cpp26
-rw-r--r--bolt/lib/Core/Exceptions.cpp5
-rw-r--r--bolt/lib/Core/MCPlusBuilder.cpp19
-rw-r--r--bolt/lib/Rewrite/LinuxKernelRewriter.cpp14
-rw-r--r--bolt/test/runtime/X86/instrument-wrong-target.s (renamed from bolt/test/runtime/instrument-wrong-target.s)4
-rw-r--r--clang-tools-extra/clang-tidy/modernize/CMakeLists.txt1
-rw-r--r--clang-tools-extra/clang-tidy/modernize/ModernizeTidyModule.cpp3
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseDesignatedInitializersCheck.cpp184
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseDesignatedInitializersCheck.h40
-rw-r--r--clang-tools-extra/clang-tidy/utils/CMakeLists.txt1
-rw-r--r--clang-tools-extra/clang-tidy/utils/DesignatedInitializers.cpp195
-rw-r--r--clang-tools-extra/clang-tidy/utils/DesignatedInitializers.h42
-rw-r--r--clang-tools-extra/clangd/CMakeLists.txt1
-rw-r--r--clang-tools-extra/clangd/InlayHints.cpp170
-rw-r--r--clang-tools-extra/clangd/tool/CMakeLists.txt1
-rw-r--r--clang-tools-extra/clangd/unittests/CMakeLists.txt1
-rw-r--r--clang-tools-extra/docs/ReleaseNotes.rst6
-rw-r--r--clang-tools-extra/docs/clang-tidy/checks/list.rst1
-rw-r--r--clang-tools-extra/docs/clang-tidy/checks/modernize/use-designated-initializers.rst62
-rw-r--r--clang-tools-extra/include-cleaner/lib/WalkAST.cpp5
-rw-r--r--clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp10
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/modernize/use-designated-initializers.cpp203
-rw-r--r--clang/docs/LanguageExtensions.rst8
-rw-r--r--clang/docs/LibASTMatchersReference.html14
-rw-r--r--clang/docs/ReleaseNotes.rst26
-rwxr-xr-xclang/docs/tools/dump_ast_matchers.py2
-rw-r--r--clang/include/clang/APINotes/APINotesWriter.h13
-rw-r--r--clang/include/clang/ASTMatchers/ASTMatchers.h16
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h9
-rw-r--r--clang/include/clang/Basic/Builtins.td14
-rw-r--r--clang/include/clang/Basic/DiagnosticDriverKinds.td1
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td20
-rw-r--r--clang/include/clang/Basic/TargetInfo.h36
-rw-r--r--clang/include/clang/Driver/Driver.h3
-rw-r--r--clang/include/clang/Driver/Options.td31
-rw-r--r--clang/include/clang/Frontend/CompilerInstance.h3
-rw-r--r--clang/include/clang/Frontend/CompilerInvocation.h5
-rw-r--r--clang/include/clang/InstallAPI/Context.h50
-rw-r--r--clang/include/clang/InstallAPI/Frontend.h104
-rw-r--r--clang/include/clang/InstallAPI/HeaderFile.h23
-rw-r--r--clang/include/clang/InstallAPI/Visitor.h60
-rw-r--r--clang/include/clang/Sema/Scope.h14
-rw-r--r--clang/lib/APINotes/APINotesReader.cpp8
-rw-r--r--clang/lib/AST/DeclCXX.cpp15
-rw-r--r--clang/lib/AST/Interp/ByteCodeEmitter.cpp8
-rw-r--r--clang/lib/AST/Interp/ByteCodeEmitter.h2
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.cpp57
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.h29
-rw-r--r--clang/lib/AST/Interp/Context.cpp46
-rw-r--r--clang/lib/AST/Interp/Context.h1
-rw-r--r--clang/lib/AST/Interp/EvalEmitter.cpp7
-rw-r--r--clang/lib/AST/Interp/EvalEmitter.h5
-rw-r--r--clang/lib/AST/Interp/EvaluationResult.h3
-rw-r--r--clang/lib/AST/Interp/Interp.cpp30
-rw-r--r--clang/lib/AST/Interp/Interp.h50
-rw-r--r--clang/lib/AST/Interp/Opcodes.td8
-rw-r--r--clang/lib/AST/Interp/Pointer.h1
-rw-r--r--clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp25
-rw-r--r--clang/lib/Analysis/FlowSensitive/Transfer.cpp40
-rw-r--r--clang/lib/Analysis/UnsafeBufferUsage.cpp259
-rw-r--r--clang/lib/CodeGen/ABIInfo.cpp52
-rw-r--r--clang/lib/CodeGen/ABIInfo.h10
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp32
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp52
-rw-r--r--clang/lib/CodeGen/CGCall.cpp17
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp64
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp111
-rw-r--r--clang/lib/CodeGen/Targets/AArch64.cpp38
-rw-r--r--clang/lib/CodeGen/Targets/ARM.cpp8
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.cpp3
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp9
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp12
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.h3
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Linux.cpp17
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp55
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp19
-rw-r--r--clang/lib/Headers/__clang_hip_math.h72
-rw-r--r--clang/lib/Headers/emmintrin.h121
-rw-r--r--clang/lib/Headers/fmaintrin.h48
-rw-r--r--clang/lib/Headers/hlsl/hlsl_intrinsics.h102
-rw-r--r--clang/lib/Headers/mmintrin.h148
-rw-r--r--clang/lib/Headers/prfchwintrin.h16
-rw-r--r--clang/lib/Headers/smmintrin.h20
-rw-r--r--clang/lib/Headers/tmmintrin.h36
-rw-r--r--clang/lib/Index/IndexSymbol.cpp3
-rw-r--r--clang/lib/Index/IndexingAction.cpp2
-rw-r--r--clang/lib/InstallAPI/CMakeLists.txt4
-rw-r--r--clang/lib/InstallAPI/Frontend.cpp129
-rw-r--r--clang/lib/InstallAPI/Visitor.cpp158
-rw-r--r--clang/lib/Sema/JumpDiagnostics.cpp19
-rw-r--r--clang/lib/Sema/SemaChecking.cpp141
-rw-r--r--clang/lib/Sema/SemaDecl.cpp3
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp6
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp2
-rw-r--r--clang/lib/Sema/SemaDeclObjC.cpp8
-rw-r--r--clang/lib/Sema/SemaExpr.cpp1
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp8
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp3
-rw-r--r--clang/lib/Sema/SemaStmt.cpp30
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp14
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp297
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp8
-rw-r--r--clang/test/AST/Interp/arrays.cpp5
-rw-r--r--clang/test/AST/Interp/c.c8
-rw-r--r--clang/test/AST/Interp/cxx11.cpp8
-rw-r--r--clang/test/AST/Interp/cxx20.cpp187
-rw-r--r--clang/test/AST/Interp/cxx98.cpp3
-rw-r--r--clang/test/AST/Interp/functions.cpp10
-rw-r--r--clang/test/AST/Interp/lambda.cpp13
-rw-r--r--clang/test/AST/Interp/literals.cpp37
-rw-r--r--clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp17
-rw-r--r--clang/test/Analysis/stream-note.c67
-rw-r--r--clang/test/CXX/drs/dr18xx.cpp1
-rw-r--r--clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks.mm9
-rw-r--r--clang/test/CodeGen/PowerPC/aix-tls-model.cpp9
-rw-r--r--clang/test/CodeGen/aarch64-cpu-supports.c8
-rw-r--r--clang/test/CodeGen/attr-target-clones-aarch64.c20
-rw-r--r--clang/test/CodeGen/attr-target-version.c60
-rw-r--r--clang/test/CodeGen/builtins-hexagon.c2
-rw-r--r--clang/test/CodeGen/builtins.c28
-rw-r--r--clang/test/CodeGen/fat-lto-objects.c21
-rw-r--r--clang/test/CodeGenCXX/attr-target-clones-aarch64.cpp74
-rw-r--r--clang/test/CodeGenCXX/attr-target-version.cpp4
-rw-r--r--clang/test/CodeGenHLSL/builtins/abs.hlsl134
-rw-r--r--clang/test/CodeGenHLSL/builtins/ceil.hlsl69
-rw-r--r--clang/test/CodeGenHLSL/builtins/cos.hlsl69
-rw-r--r--clang/test/CodeGenHLSL/builtins/dot.hlsl160
-rw-r--r--clang/test/CodeGenHLSL/builtins/floor.hlsl69
-rw-r--r--clang/test/CodeGenHLSL/builtins/frac.hlsl53
-rw-r--r--clang/test/CodeGenHLSL/builtins/lerp-builtin.hlsl37
-rw-r--r--clang/test/CodeGenHLSL/builtins/lerp.hlsl83
-rw-r--r--clang/test/CodeGenHLSL/builtins/log.hlsl67
-rw-r--r--clang/test/CodeGenHLSL/builtins/log10.hlsl69
-rw-r--r--clang/test/CodeGenHLSL/builtins/log2.hlsl69
-rw-r--r--clang/test/CodeGenHLSL/builtins/max.hlsl214
-rw-r--r--clang/test/CodeGenHLSL/builtins/min.hlsl207
-rw-r--r--clang/test/CodeGenHLSL/builtins/pow.hlsl97
-rw-r--r--clang/test/CodeGenHLSL/builtins/round.hlsl53
-rw-r--r--clang/test/CodeGenHLSL/builtins/sin.hlsl59
-rw-r--r--clang/test/CodeGenHLSL/builtins/trunc.hlsl75
-rw-r--r--clang/test/Driver/aarch64-rdm.c3
-rw-r--r--clang/test/Driver/android-link.cpp3
-rw-r--r--clang/test/Driver/basic-block-address-map.c7
-rw-r--r--clang/test/Driver/darwin-header-search-libcxx.cpp2
-rw-r--r--clang/test/Driver/mingw-sysroot.cpp12
-rw-r--r--clang/test/Driver/no-canonical-prefixes.c2
-rw-r--r--clang/test/Driver/openmp-offload-gpu.c11
-rw-r--r--clang/test/Driver/program-path-priority.c13
-rw-r--r--clang/test/Driver/rocm-detect.hip4
-rw-r--r--clang/test/Driver/split-debug.c5
-rw-r--r--clang/test/InstallAPI/basic.test5
-rw-r--r--clang/test/InstallAPI/objcclasses.test85
-rw-r--r--clang/test/InstallAPI/variables.test63
-rw-r--r--clang/test/Misc/warning-flags.c3
-rw-r--r--clang/test/OpenMP/interop_codegen.cpp45
-rw-r--r--clang/test/OpenMP/scan_ast_print.cpp18
-rw-r--r--clang/test/Preprocessor/has_attribute.cpp4
-rw-r--r--clang/test/Preprocessor/riscv-target-features.c18
-rw-r--r--clang/test/Sema/aarch64-cpu-supports.c10
-rw-r--r--clang/test/Sema/aix-attr-tls_model.c2
-rw-r--r--clang/test/Sema/attr-target-clones-aarch64.c2
-rw-r--r--clang/test/Sema/builtin-cpu-supports.c6
-rw-r--r--clang/test/Sema/builtin-popcountg.c17
-rw-r--r--clang/test/SemaCXX/GH83461.cpp60
-rw-r--r--clang/test/SemaCXX/attr-declspec-ignored.cpp3
-rw-r--r--clang/test/SemaCXX/attr-gnu.cpp29
-rw-r--r--clang/test/SemaCXX/attr-target-version.cpp1
-rw-r--r--clang/test/SemaCXX/bool.cpp5
-rw-r--r--clang/test/SemaCXX/cxx03-cxx11-attr.cpp9
-rw-r--r--clang/test/SemaCXX/restrict-this.cpp69
-rw-r--r--clang/test/SemaCXX/undefined-internal.cpp10
-rw-r--r--clang/test/SemaCXX/warn-bool-conversion.cpp27
-rw-r--r--clang/test/SemaCXX/warn-unused-variables.cpp33
-rw-r--r--clang/test/SemaHLSL/BuiltIns/dot-errors.hlsl113
-rw-r--r--clang/test/SemaHLSL/BuiltIns/frac-errors.hlsl27
-rw-r--r--clang/test/SemaHLSL/BuiltIns/lerp-errors.hlsl96
-rw-r--r--clang/test/SemaHLSL/BuiltIns/round-errors.hlsl27
-rw-r--r--clang/test/SemaHLSL/OverloadResolutionBugs.hlsl64
-rw-r--r--clang/test/SemaHLSL/VectorOverloadResolution.hlsl2
-rw-r--r--clang/test/SemaOpenACC/no-branch-in-out.c224
-rw-r--r--clang/test/SemaOpenACC/no-branch-in-out.cpp185
-rw-r--r--clang/tools/clang-installapi/CMakeLists.txt1
-rw-r--r--clang/tools/clang-installapi/ClangInstallAPI.cpp79
-rw-r--r--clang/tools/clang-installapi/Options.cpp29
-rw-r--r--clang/tools/clang-installapi/Options.h8
-rw-r--r--clang/tools/driver/driver.cpp23
-rw-r--r--clang/unittests/Analysis/FlowSensitive/TestingSupport.h19
-rw-r--r--clang/unittests/Analysis/FlowSensitive/TransferTest.cpp82
-rw-r--r--clang/unittests/Format/FormatTest.cpp23
-rw-r--r--clang/utils/TableGen/ClangAttrEmitter.cpp12
-rw-r--r--compiler-rt/cmake/Modules/CompilerRTCompile.cmake11
-rw-r--r--compiler-rt/lib/asan/tests/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/builtins/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/fuzzer/tests/CMakeLists.txt4
-rw-r--r--compiler-rt/lib/gwp_asan/tests/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/interception/tests/CMakeLists.txt1
-rw-r--r--compiler-rt/lib/msan/tests/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/orc/tests/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/profile/InstrProfilingBuffer.c11
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h22
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp22
-rw-r--r--compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt1
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_common.h7
-rw-r--r--compiler-rt/lib/scudo/standalone/primary32.h106
-rw-r--r--compiler-rt/lib/scudo/standalone/primary64.h153
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h24
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/primary_test.cpp34
-rw-r--r--compiler-rt/lib/tsan/tests/CMakeLists.txt4
-rw-r--r--compiler-rt/lib/xray/tests/CMakeLists.txt2
-rw-r--r--compiler-rt/test/builtins/Unit/ctor_dtor.c10
-rw-r--r--compiler-rt/test/dfsan/reaches_function.c12
-rw-r--r--compiler-rt/test/hwasan/TestCases/longjmp-out-of-range.c20
-rw-r--r--compiler-rt/test/profile/instrprof-basic.c12
-rw-r--r--flang/docs/Directives.md6
-rw-r--r--flang/docs/Extensions.md19
-rw-r--r--flang/include/flang/Common/float128.h23
-rw-r--r--flang/include/flang/Evaluate/characteristics.h4
-rw-r--r--flang/include/flang/Evaluate/tools.h2
-rw-r--r--flang/include/flang/Lower/AbstractConverter.h6
-rw-r--r--flang/include/flang/Lower/LoweringOptions.def4
-rw-r--r--flang/include/flang/Optimizer/Builder/FIRBuilder.h3
-rw-r--r--flang/include/flang/Parser/dump-parse-tree.h1
-rw-r--r--flang/include/flang/Parser/parse-tree.h15
-rw-r--r--flang/include/flang/Parser/provenance.h1
-rw-r--r--flang/include/flang/Parser/source.h2
-rw-r--r--flang/include/flang/Runtime/reduction.h17
-rw-r--r--flang/include/flang/Semantics/expression.h16
-rw-r--r--flang/include/flang/Semantics/module-dependences.h51
-rw-r--r--flang/include/flang/Semantics/semantics.h3
-rw-r--r--flang/include/flang/Semantics/symbol.h9
-rw-r--r--flang/lib/Decimal/decimal-to-binary.cpp8
-rw-r--r--flang/lib/Evaluate/characteristics.cpp13
-rw-r--r--flang/lib/Evaluate/intrinsics.cpp12
-rw-r--r--flang/lib/Evaluate/tools.cpp7
-rw-r--r--flang/lib/Lower/Bridge.cpp24
-rw-r--r--flang/lib/Lower/HostAssociations.cpp64
-rw-r--r--flang/lib/Lower/OpenMP/DataSharingProcessor.cpp108
-rw-r--r--flang/lib/Lower/OpenMP/DataSharingProcessor.h38
-rw-r--r--flang/lib/Lower/OpenMP/OpenMP.cpp94
-rw-r--r--flang/lib/Lower/OpenMP/Utils.cpp6
-rw-r--r--flang/lib/Lower/OpenMP/Utils.h1
-rw-r--r--flang/lib/Optimizer/Builder/FIRBuilder.cpp8
-rw-r--r--flang/lib/Optimizer/Builder/IntrinsicCall.cpp35
-rw-r--r--flang/lib/Optimizer/Builder/MutableBox.cpp2
-rw-r--r--flang/lib/Optimizer/Builder/Runtime/Reduction.cpp25
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIRIntrinsics.cpp32
-rw-r--r--flang/lib/Parser/Fortran-parsers.cpp4
-rw-r--r--flang/lib/Parser/provenance.cpp18
-rw-r--r--flang/lib/Parser/source.cpp18
-rw-r--r--flang/lib/Parser/unparse.cpp10
-rw-r--r--flang/lib/Semantics/check-call.cpp31
-rw-r--r--flang/lib/Semantics/check-call.h2
-rw-r--r--flang/lib/Semantics/check-declarations.cpp6
-rw-r--r--flang/lib/Semantics/data-to-inits.cpp2
-rw-r--r--flang/lib/Semantics/expression.cpp48
-rw-r--r--flang/lib/Semantics/mod-file.cpp208
-rw-r--r--flang/lib/Semantics/mod-file.h11
-rw-r--r--flang/lib/Semantics/pointer-assignment.cpp3
-rw-r--r--flang/lib/Semantics/resolve-names.cpp466
-rw-r--r--flang/lib/Semantics/resolve-names.h4
-rw-r--r--flang/lib/Semantics/semantics.cpp2
-rw-r--r--flang/runtime/Float128Math/CMakeLists.txt10
-rw-r--r--flang/runtime/Float128Math/acos.cpp2
-rw-r--r--flang/runtime/Float128Math/acosh.cpp2
-rw-r--r--flang/runtime/Float128Math/asin.cpp2
-rw-r--r--flang/runtime/Float128Math/asinh.cpp2
-rw-r--r--flang/runtime/Float128Math/atan.cpp2
-rw-r--r--flang/runtime/Float128Math/atan2.cpp2
-rw-r--r--flang/runtime/Float128Math/atanh.cpp2
-rw-r--r--flang/runtime/Float128Math/cabs.cpp14
-rw-r--r--flang/runtime/Float128Math/ceil.cpp2
-rw-r--r--flang/runtime/Float128Math/cos.cpp2
-rw-r--r--flang/runtime/Float128Math/cosh.cpp2
-rw-r--r--flang/runtime/Float128Math/erf.cpp2
-rw-r--r--flang/runtime/Float128Math/erfc.cpp2
-rw-r--r--flang/runtime/Float128Math/exp.cpp2
-rw-r--r--flang/runtime/Float128Math/exponent.cpp26
-rw-r--r--flang/runtime/Float128Math/floor.cpp2
-rw-r--r--flang/runtime/Float128Math/fraction.cpp21
-rw-r--r--flang/runtime/Float128Math/hypot.cpp2
-rw-r--r--flang/runtime/Float128Math/j0.cpp2
-rw-r--r--flang/runtime/Float128Math/j1.cpp2
-rw-r--r--flang/runtime/Float128Math/jn.cpp2
-rw-r--r--flang/runtime/Float128Math/lgamma.cpp2
-rw-r--r--flang/runtime/Float128Math/llround.cpp2
-rw-r--r--flang/runtime/Float128Math/log.cpp2
-rw-r--r--flang/runtime/Float128Math/log10.cpp2
-rw-r--r--flang/runtime/Float128Math/lround.cpp2
-rw-r--r--flang/runtime/Float128Math/math-entries.h149
-rw-r--r--flang/runtime/Float128Math/mod-real.cpp24
-rw-r--r--flang/runtime/Float128Math/modulo-real.cpp24
-rw-r--r--flang/runtime/Float128Math/nearest.cpp23
-rw-r--r--flang/runtime/Float128Math/norm2.cpp35
-rw-r--r--flang/runtime/Float128Math/numeric-template-specs.h55
-rw-r--r--flang/runtime/Float128Math/pow.cpp2
-rw-r--r--flang/runtime/Float128Math/round.cpp2
-rw-r--r--flang/runtime/Float128Math/rrspacing.cpp21
-rw-r--r--flang/runtime/Float128Math/scale.cpp28
-rw-r--r--flang/runtime/Float128Math/set-exponent.cpp23
-rw-r--r--flang/runtime/Float128Math/sin.cpp2
-rw-r--r--flang/runtime/Float128Math/sinh.cpp2
-rw-r--r--flang/runtime/Float128Math/spacing.cpp21
-rw-r--r--flang/runtime/Float128Math/sqrt.cpp6
-rw-r--r--flang/runtime/Float128Math/tan.cpp2
-rw-r--r--flang/runtime/Float128Math/tanh.cpp2
-rw-r--r--flang/runtime/Float128Math/tgamma.cpp2
-rw-r--r--flang/runtime/Float128Math/trunc.cpp2
-rw-r--r--flang/runtime/Float128Math/y0.cpp2
-rw-r--r--flang/runtime/Float128Math/y1.cpp2
-rw-r--r--flang/runtime/Float128Math/yn.cpp2
-rw-r--r--flang/runtime/complex-reduction.c49
-rw-r--r--flang/runtime/complex-reduction.h13
-rw-r--r--flang/runtime/extrema.cpp158
-rw-r--r--flang/runtime/numeric-templates.h340
-rw-r--r--flang/runtime/numeric.cpp169
-rw-r--r--flang/runtime/product.cpp6
-rw-r--r--flang/runtime/reduction-templates.h103
-rw-r--r--flang/runtime/sum.cpp3
-rw-r--r--flang/runtime/tools.h11
-rw-r--r--flang/runtime/unit.cpp21
-rw-r--r--flang/runtime/unit.h10
-rw-r--r--flang/test/Driver/flang-experimental-polymorphism-flag.f904
-rw-r--r--flang/test/HLFIR/all-lowering.fir8
-rw-r--r--flang/test/HLFIR/any-lowering.fir6
-rw-r--r--flang/test/HLFIR/count-lowering-default-int-kinds.fir16
-rw-r--r--flang/test/HLFIR/count-lowering.fir6
-rw-r--r--flang/test/HLFIR/dot_product-lowering.fir1
-rw-r--r--flang/test/HLFIR/extents-of-shape-of.f9011
-rw-r--r--flang/test/HLFIR/matmul-lowering.fir2
-rw-r--r--flang/test/HLFIR/maxloc-lowering.fir137
-rw-r--r--flang/test/HLFIR/maxval-lowering.fir4
-rw-r--r--flang/test/HLFIR/minloc-lowering.fir137
-rw-r--r--flang/test/HLFIR/minval-lowering.fir6
-rw-r--r--flang/test/HLFIR/mul_transpose.f9027
-rw-r--r--flang/test/HLFIR/product-lowering.fir4
-rw-r--r--flang/test/HLFIR/sum-lowering.fir4
-rw-r--r--flang/test/HLFIR/transpose-lowering.fir2
-rw-r--r--flang/test/Integration/OpenMP/copyprivate.f9097
-rw-r--r--flang/test/Lower/HLFIR/allocatable-end-of-scope-dealloc.f902
-rw-r--r--flang/test/Lower/HLFIR/bindc_internal_proc.f904
-rw-r--r--flang/test/Lower/HLFIR/internal-procedures-2.f902
-rw-r--r--flang/test/Lower/HLFIR/internal-procedures-polymorphic.f9081
-rw-r--r--flang/test/Lower/HLFIR/internal-procedures.f9012
-rw-r--r--flang/test/Lower/Intrinsics/norm2.f9016
-rw-r--r--flang/test/Lower/Intrinsics/random.f904
-rw-r--r--flang/test/Lower/Intrinsics/ubound01.f902
-rw-r--r--flang/test/Lower/OpenACC/acc-routine04.f902
-rw-r--r--flang/test/Lower/OpenMP/FIR/delayed-privatization-firstprivate.f9029
-rw-r--r--flang/test/Lower/OpenMP/FIR/delayed-privatization-private.f9038
-rw-r--r--flang/test/Lower/OpenMP/FIR/threadprivate-use-association-2.f902
-rw-r--r--flang/test/Lower/OpenMP/delayed-privatization-firstprivate.f9029
-rw-r--r--flang/test/Lower/OpenMP/delayed-privatization-private-firstprivate.f9034
-rw-r--r--flang/test/Lower/OpenMP/delayed-privatization-private.f9028
-rw-r--r--flang/test/Lower/OpenMP/delayed-privatization-reduction.f9030
-rw-r--r--flang/test/Lower/OpenMP/threadprivate-commonblock-use.f902
-rw-r--r--flang/test/Lower/OpenMP/threadprivate-use-association-2-hlfir.f902
-rw-r--r--flang/test/Lower/PowerPC/ppc-vector-types.f9010
-rw-r--r--flang/test/Lower/array-temp.f902
-rwxr-xr-xflang/test/Lower/convert.f902
-rw-r--r--flang/test/Lower/dummy-arguments.f902
-rw-r--r--flang/test/Lower/dummy-procedure-character.f904
-rw-r--r--flang/test/Lower/equivalence-with-host-assoc.f9016
-rw-r--r--flang/test/Lower/explicit-interface-results-2.f9012
-rw-r--r--flang/test/Lower/forall/array-constructor.f908
-rw-r--r--flang/test/Lower/forall/character-1.f902
-rw-r--r--flang/test/Lower/global-initialization.f909
-rw-r--r--flang/test/Lower/host-associated-functions.f9016
-rw-r--r--flang/test/Lower/host-associated-globals.f9010
-rw-r--r--flang/test/Lower/host-associated.f9064
-rw-r--r--flang/test/Lower/module-and-internal-proc.f904
-rw-r--r--flang/test/Lower/parent-component.f9018
-rw-r--r--flang/test/Lower/polymorphic.f904
-rw-r--r--flang/test/Lower/program-units-fir-mangling.f9018
-rw-r--r--flang/test/Parser/assume-aligned.f9057
-rw-r--r--flang/test/Semantics/Inputs/dir1/modfile63a.mod6
-rw-r--r--flang/test/Semantics/Inputs/dir1/modfile63b.mod8
-rw-r--r--flang/test/Semantics/Inputs/dir2/modfile63a.mod6
-rw-r--r--flang/test/Semantics/Inputs/dir2/modfile63b.mod8
-rw-r--r--flang/test/Semantics/assign04.f907
-rw-r--r--flang/test/Semantics/bind-c03.f906
-rw-r--r--flang/test/Semantics/bind-c04.f902
-rw-r--r--flang/test/Semantics/call03.f9074
-rw-r--r--flang/test/Semantics/data21.f907
-rw-r--r--flang/test/Semantics/data22.f9017
-rw-r--r--flang/test/Semantics/getsymbols02.f902
-rw-r--r--flang/test/Semantics/implicit14.f9054
-rw-r--r--flang/test/Semantics/intrinsics03.f90125
-rw-r--r--flang/test/Semantics/modfile63.f9019
-rw-r--r--flang/test/Semantics/resolve61.f902
-rw-r--r--flang/test/Semantics/resolve91.f909
-rw-r--r--flang/test/Semantics/separate-mp02.f904
-rwxr-xr-xflang/test/Semantics/test_modfile.py2
-rw-r--r--flang/test/Semantics/typed-subr.f904
-rw-r--r--flang/unittests/Runtime/NumericalFormatTest.cpp2
-rw-r--r--libc/.clang-tidy2
-rw-r--r--libc/CMakeLists.txt2
-rw-r--r--libc/benchmarks/automemcpy/lib/CMakeLists.txt3
-rw-r--r--libc/cmake/modules/CheckCompilerFeatures.cmake2
-rw-r--r--libc/cmake/modules/LLVMLibCArchitectures.cmake9
-rw-r--r--libc/cmake/modules/compiler_features/check_float128.cpp4
-rw-r--r--libc/config/baremetal/api.td36
-rw-r--r--libc/config/baremetal/arm/entrypoints.txt3
-rw-r--r--libc/config/baremetal/arm/headers.txt1
-rw-r--r--libc/config/baremetal/riscv/entrypoints.txt3
-rw-r--r--libc/config/baremetal/riscv/headers.txt1
-rw-r--r--libc/config/linux/aarch64/entrypoints.txt2
-rw-r--r--libc/config/linux/riscv/entrypoints.txt2
-rw-r--r--libc/config/linux/x86_64/entrypoints.txt7
-rw-r--r--libc/docs/dev/code_style.rst4
-rw-r--r--libc/docs/dev/undefined_behavior.rst3
-rw-r--r--libc/docs/stdbit.rst12
-rw-r--r--libc/include/__llvm-libc-common.h6
-rw-r--r--libc/include/llvm-libc-macros/containerof-macro.h6
-rw-r--r--libc/include/llvm-libc-macros/fcntl-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/features-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/fenv-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/file-seek-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/float-macros.h7
-rw-r--r--libc/include/llvm-libc-macros/generic-error-number-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/gpu/time-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/inttypes-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/limits-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/fcntl-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sched-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/signal-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-ioctl-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-random-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-resource-macros.h5
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-socket-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-stat-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-time-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-wait-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/termios-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/time-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/unistd-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/math-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/null-macro.h6
-rw-r--r--libc/include/llvm-libc-macros/offsetof-macro.h6
-rw-r--r--libc/include/llvm-libc-macros/sched-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/signal-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/stdbit-macros.h22
-rw-r--r--libc/include/llvm-libc-macros/stdckdint-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/stdfix-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/stdio-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/stdlib-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-auxv-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-ioctl-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-mman-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-queue-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-random-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-resource-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-select-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-socket-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-stat-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-time-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-wait-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/termios-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/time-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/unistd-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/wchar-macros.h6
-rw-r--r--libc/include/llvm-libc-types/ACTION.h6
-rw-r--r--libc/include/llvm-libc-types/DIR.h6
-rw-r--r--libc/include/llvm-libc-types/ENTRY.h6
-rw-r--r--libc/include/llvm-libc-types/FILE.h6
-rw-r--r--libc/include/llvm-libc-types/__atexithandler_t.h6
-rw-r--r--libc/include/llvm-libc-types/__atfork_callback_t.h6
-rw-r--r--libc/include/llvm-libc-types/__bsearchcompare_t.h6
-rw-r--r--libc/include/llvm-libc-types/__call_once_func_t.h6
-rw-r--r--libc/include/llvm-libc-types/__exec_argv_t.h6
-rw-r--r--libc/include/llvm-libc-types/__exec_envp_t.h6
-rw-r--r--libc/include/llvm-libc-types/__futex_word.h6
-rw-r--r--libc/include/llvm-libc-types/__getoptargv_t.h6
-rw-r--r--libc/include/llvm-libc-types/__mutex_type.h6
-rw-r--r--libc/include/llvm-libc-types/__pthread_once_func_t.h6
-rw-r--r--libc/include/llvm-libc-types/__pthread_start_t.h6
-rw-r--r--libc/include/llvm-libc-types/__pthread_tss_dtor_t.h6
-rw-r--r--libc/include/llvm-libc-types/__qsortcompare_t.h6
-rw-r--r--libc/include/llvm-libc-types/__qsortrcompare_t.h6
-rw-r--r--libc/include/llvm-libc-types/__sighandler_t.h6
-rw-r--r--libc/include/llvm-libc-types/__thread_type.h6
-rw-r--r--libc/include/llvm-libc-types/blkcnt_t.h6
-rw-r--r--libc/include/llvm-libc-types/blksize_t.h6
-rw-r--r--libc/include/llvm-libc-types/cc_t.h6
-rw-r--r--libc/include/llvm-libc-types/clock_t.h6
-rw-r--r--libc/include/llvm-libc-types/clockid_t.h6
-rw-r--r--libc/include/llvm-libc-types/cnd_t.h6
-rw-r--r--libc/include/llvm-libc-types/cookie_io_functions_t.h6
-rw-r--r--libc/include/llvm-libc-types/cpu_set_t.h6
-rw-r--r--libc/include/llvm-libc-types/dev_t.h6
-rw-r--r--libc/include/llvm-libc-types/div_t.h6
-rw-r--r--libc/include/llvm-libc-types/double_t.h6
-rw-r--r--libc/include/llvm-libc-types/fd_set.h6
-rw-r--r--libc/include/llvm-libc-types/fenv_t.h6
-rw-r--r--libc/include/llvm-libc-types/fexcept_t.h6
-rw-r--r--libc/include/llvm-libc-types/float128.h8
-rw-r--r--libc/include/llvm-libc-types/float_t.h6
-rw-r--r--libc/include/llvm-libc-types/gid_t.h6
-rw-r--r--libc/include/llvm-libc-types/ino_t.h6
-rw-r--r--libc/include/llvm-libc-types/jmp_buf.h6
-rw-r--r--libc/include/llvm-libc-types/ldiv_t.h6
-rw-r--r--libc/include/llvm-libc-types/lldiv_t.h6
-rw-r--r--libc/include/llvm-libc-types/mode_t.h6
-rw-r--r--libc/include/llvm-libc-types/mtx_t.h6
-rw-r--r--libc/include/llvm-libc-types/nlink_t.h6
-rw-r--r--libc/include/llvm-libc-types/off64_t.h6
-rw-r--r--libc/include/llvm-libc-types/off_t.h6
-rw-r--r--libc/include/llvm-libc-types/once_flag.h6
-rw-r--r--libc/include/llvm-libc-types/pid_t.h6
-rw-r--r--libc/include/llvm-libc-types/posix_spawn_file_actions_t.h6
-rw-r--r--libc/include/llvm-libc-types/posix_spawnattr_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_attr_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_key_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_mutex_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_mutexattr_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_once_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_t.h6
-rw-r--r--libc/include/llvm-libc-types/rlim_t.h6
-rw-r--r--libc/include/llvm-libc-types/rpc_opcodes_t.h6
-rw-r--r--libc/include/llvm-libc-types/sa_family_t.h6
-rw-r--r--libc/include/llvm-libc-types/sig_atomic_t.h6
-rw-r--r--libc/include/llvm-libc-types/siginfo_t.h6
-rw-r--r--libc/include/llvm-libc-types/sigset_t.h6
-rw-r--r--libc/include/llvm-libc-types/size_t.h6
-rw-r--r--libc/include/llvm-libc-types/socklen_t.h6
-rw-r--r--libc/include/llvm-libc-types/speed_t.h6
-rw-r--r--libc/include/llvm-libc-types/ssize_t.h6
-rw-r--r--libc/include/llvm-libc-types/stack_t.h6
-rw-r--r--libc/include/llvm-libc-types/struct_dirent.h6
-rw-r--r--libc/include/llvm-libc-types/struct_epoll_data.h6
-rw-r--r--libc/include/llvm-libc-types/struct_epoll_event.h6
-rw-r--r--libc/include/llvm-libc-types/struct_hsearch_data.h6
-rw-r--r--libc/include/llvm-libc-types/struct_rlimit.h6
-rw-r--r--libc/include/llvm-libc-types/struct_rusage.h6
-rw-r--r--libc/include/llvm-libc-types/struct_sched_param.h6
-rw-r--r--libc/include/llvm-libc-types/struct_sigaction.h6
-rw-r--r--libc/include/llvm-libc-types/struct_sockaddr.h6
-rw-r--r--libc/include/llvm-libc-types/struct_sockaddr_un.h6
-rw-r--r--libc/include/llvm-libc-types/struct_stat.h6
-rw-r--r--libc/include/llvm-libc-types/struct_timespec.h6
-rw-r--r--libc/include/llvm-libc-types/struct_timeval.h6
-rw-r--r--libc/include/llvm-libc-types/struct_tm.h6
-rw-r--r--libc/include/llvm-libc-types/struct_utsname.h6
-rw-r--r--libc/include/llvm-libc-types/suseconds_t.h6
-rw-r--r--libc/include/llvm-libc-types/tcflag_t.h6
-rw-r--r--libc/include/llvm-libc-types/test_rpc_opcodes_t.h6
-rw-r--r--libc/include/llvm-libc-types/thrd_start_t.h6
-rw-r--r--libc/include/llvm-libc-types/thrd_t.h6
-rw-r--r--libc/include/llvm-libc-types/time_t.h6
-rw-r--r--libc/include/llvm-libc-types/tss_dtor_t.h6
-rw-r--r--libc/include/llvm-libc-types/tss_t.h6
-rw-r--r--libc/include/llvm-libc-types/uid_t.h6
-rw-r--r--libc/include/llvm-libc-types/union_sigval.h6
-rw-r--r--libc/include/llvm-libc-types/wchar_t.h6
-rw-r--r--libc/include/llvm-libc-types/wint_t.h6
-rw-r--r--libc/include/sys/queue.h6
-rw-r--r--libc/lib/CMakeLists.txt6
-rw-r--r--libc/spec/spec.td1
-rw-r--r--libc/spec/stdc.td40
-rw-r--r--libc/src/__support/CPP/CMakeLists.txt4
-rw-r--r--libc/src/__support/CPP/type_traits/is_fixed_point.h2
-rw-r--r--libc/src/__support/CPP/type_traits/is_floating_point.h6
-rw-r--r--libc/src/__support/FPUtil/CMakeLists.txt2
-rw-r--r--libc/src/__support/FPUtil/FPBits.h126
-rw-r--r--libc/src/__support/FPUtil/fpbits_str.h6
-rw-r--r--libc/src/__support/GPU/generic/utils.h6
-rw-r--r--libc/src/__support/GPU/utils.h6
-rw-r--r--libc/src/__support/HashTable/table.h6
-rw-r--r--libc/src/__support/OSUtil/gpu/io.h2
-rw-r--r--libc/src/__support/RPC/rpc_client.h2
-rw-r--r--libc/src/__support/RPC/rpc_util.h6
-rw-r--r--libc/src/__support/StringUtil/message_mapper.h6
-rw-r--r--libc/src/__support/StringUtil/platform_errors.h6
-rw-r--r--libc/src/__support/StringUtil/platform_signals.h6
-rw-r--r--libc/src/__support/StringUtil/tables/linux_extension_errors.h6
-rw-r--r--libc/src/__support/StringUtil/tables/linux_extension_signals.h6
-rw-r--r--libc/src/__support/StringUtil/tables/linux_platform_errors.h6
-rw-r--r--libc/src/__support/StringUtil/tables/linux_platform_signals.h6
-rw-r--r--libc/src/__support/StringUtil/tables/minimal_platform_errors.h6
-rw-r--r--libc/src/__support/StringUtil/tables/minimal_platform_signals.h6
-rw-r--r--libc/src/__support/StringUtil/tables/posix_errors.h6
-rw-r--r--libc/src/__support/StringUtil/tables/posix_signals.h6
-rw-r--r--libc/src/__support/StringUtil/tables/signal_table.h6
-rw-r--r--libc/src/__support/StringUtil/tables/stdc_errors.h6
-rw-r--r--libc/src/__support/StringUtil/tables/stdc_signals.h6
-rw-r--r--libc/src/__support/fixed_point/fx_bits.h8
-rw-r--r--libc/src/__support/fixed_point/fx_rep.h6
-rw-r--r--libc/src/__support/macros/properties/CMakeLists.txt4
-rw-r--r--libc/src/__support/macros/properties/types.h (renamed from libc/src/__support/macros/properties/float.h)29
-rw-r--r--libc/src/__support/memory_size.h5
-rw-r--r--libc/src/__support/threads/gpu/mutex.h6
-rw-r--r--libc/src/assert/assert.h3
-rw-r--r--libc/src/gpu/rpc_host_call.h2
-rw-r--r--libc/src/math/amdgpu/CMakeLists.txt40
-rw-r--r--libc/src/math/amdgpu/declarations.h6
-rw-r--r--libc/src/math/amdgpu/exp.cpp2
-rw-r--r--libc/src/math/amdgpu/expf.cpp2
-rw-r--r--libc/src/math/amdgpu/fmax.cpp4
-rw-r--r--libc/src/math/amdgpu/fmaxf.cpp6
-rw-r--r--libc/src/math/amdgpu/fmin.cpp6
-rw-r--r--libc/src/math/amdgpu/fminf.cpp6
-rw-r--r--libc/src/math/amdgpu/platform.h6
-rw-r--r--libc/src/math/ceilf128.h2
-rw-r--r--libc/src/math/copysignf128.h4
-rw-r--r--libc/src/math/fabsf128.h2
-rw-r--r--libc/src/math/fdimf128.h2
-rw-r--r--libc/src/math/floorf128.h2
-rw-r--r--libc/src/math/fmaxf128.h2
-rw-r--r--libc/src/math/fminf128.h2
-rw-r--r--libc/src/math/frexpf128.h2
-rw-r--r--libc/src/math/generic/CMakeLists.txt28
-rw-r--r--libc/src/math/generic/exp_utils.h6
-rw-r--r--libc/src/math/ilogbf128.h2
-rw-r--r--libc/src/math/ldexpf128.h2
-rw-r--r--libc/src/math/llogb.h2
-rw-r--r--libc/src/math/llogbf.h2
-rw-r--r--libc/src/math/llogbf128.h2
-rw-r--r--libc/src/math/llogbl.h2
-rw-r--r--libc/src/math/logbf128.h2
-rw-r--r--libc/src/math/nvptx/CMakeLists.txt40
-rw-r--r--libc/src/math/nvptx/declarations.h6
-rw-r--r--libc/src/math/nvptx/fmax.cpp6
-rw-r--r--libc/src/math/nvptx/fmaxf.cpp4
-rw-r--r--libc/src/math/nvptx/fmin.cpp6
-rw-r--r--libc/src/math/nvptx/fminf.cpp6
-rw-r--r--libc/src/math/nvptx/lround.cpp16
-rw-r--r--libc/src/math/nvptx/lroundf.cpp16
-rw-r--r--libc/src/math/nvptx/nvptx.h6
-rw-r--r--libc/src/math/roundf128.h2
-rw-r--r--libc/src/math/sqrtf128.h2
-rw-r--r--libc/src/math/truncf128.h2
-rw-r--r--libc/src/search/hsearch/global.h5
-rw-r--r--libc/src/stdbit/CMakeLists.txt1
-rw-r--r--libc/src/stdbit/stdc_has_single_bit_uc.cpp (renamed from libc/src/math/nvptx/llroundf.cpp)10
-rw-r--r--libc/src/stdbit/stdc_has_single_bit_uc.h (renamed from libc/src/math/amdgpu/lround.cpp)10
-rw-r--r--libc/src/stdbit/stdc_has_single_bit_ui.cpp (renamed from libc/src/math/nvptx/llround.cpp)10
-rw-r--r--libc/src/stdbit/stdc_has_single_bit_ui.h (renamed from libc/src/math/amdgpu/lroundf.cpp)10
-rw-r--r--libc/src/stdbit/stdc_has_single_bit_ul.cpp (renamed from libc/src/math/amdgpu/llround.cpp)10
-rw-r--r--libc/src/stdbit/stdc_has_single_bit_ul.h18
-rw-r--r--libc/src/stdbit/stdc_has_single_bit_ull.cpp20
-rw-r--r--libc/src/stdbit/stdc_has_single_bit_ull.h18
-rw-r--r--libc/src/stdbit/stdc_has_single_bit_us.cpp (renamed from libc/src/math/amdgpu/llroundf.cpp)10
-rw-r--r--libc/src/stdbit/stdc_has_single_bit_us.h18
-rw-r--r--libc/src/string/memory_utils/aarch64/inline_bcmp.h2
-rw-r--r--libc/src/string/memory_utils/aarch64/inline_memcmp.h2
-rw-r--r--libc/src/string/memory_utils/aarch64/inline_memcpy.h6
-rw-r--r--libc/src/string/memory_utils/generic/aligned_access.h4
-rw-r--r--libc/src/string/memory_utils/generic/byte_per_byte.h6
-rw-r--r--libc/src/string/memory_utils/op_aarch64.h10
-rw-r--r--libc/src/string/memory_utils/op_builtin.h16
-rw-r--r--libc/src/string/memory_utils/op_generic.h10
-rw-r--r--libc/src/string/memory_utils/riscv/inline_memmove.h6
-rw-r--r--libc/src/string/memory_utils/utils.h4
-rw-r--r--libc/src/string/memory_utils/x86_64/inline_bcmp.h2
-rw-r--r--libc/src/string/memory_utils/x86_64/inline_memcmp.h2
-rw-r--r--libc/test/UnitTest/ExecuteFunction.h6
-rw-r--r--libc/test/UnitTest/FPExceptMatcher.h6
-rw-r--r--libc/test/UnitTest/FPMatcher.h6
-rw-r--r--libc/test/UnitTest/LibcTest.h6
-rw-r--r--libc/test/UnitTest/MemoryMatcher.h6
-rw-r--r--libc/test/UnitTest/PlatformDefs.h6
-rw-r--r--libc/test/UnitTest/RoundingModeUtils.h6
-rw-r--r--libc/test/UnitTest/StringUtils.h6
-rw-r--r--libc/test/UnitTest/Test.h6
-rw-r--r--libc/test/include/stdbit_test.cpp13
-rw-r--r--libc/test/include/sys/queue_test.cpp2
-rw-r--r--libc/test/integration/src/spawn/test_binary_properties.h6
-rw-r--r--libc/test/src/__support/FPUtil/fpbits_test.cpp4
-rw-r--r--libc/test/src/__support/fixed_point/fx_bits_test.cpp77
-rw-r--r--libc/test/src/__support/uint_test.cpp4
-rw-r--r--libc/test/src/compiler/stack_chk_guard_test.cpp2
-rw-r--r--libc/test/src/math/FAbsTest.h5
-rw-r--r--libc/test/src/math/FMaxTest.h5
-rw-r--r--libc/test/src/math/FMinTest.h5
-rw-r--r--libc/test/src/math/FloorTest.h5
-rw-r--r--libc/test/src/math/RandUtils.h5
-rw-r--r--libc/test/src/math/RoundTest.h5
-rw-r--r--libc/test/src/math/TruncTest.h5
-rw-r--r--libc/test/src/math/differential_testing/Timer.h6
-rw-r--r--libc/test/src/math/in_float_range_test_helper.h6
-rw-r--r--libc/test/src/math/smoke/CeilTest.h5
-rw-r--r--libc/test/src/math/smoke/CopySignTest.h5
-rw-r--r--libc/test/src/math/smoke/FAbsTest.h5
-rw-r--r--libc/test/src/math/smoke/FMaxTest.h5
-rw-r--r--libc/test/src/math/smoke/FMinTest.h5
-rw-r--r--libc/test/src/math/smoke/FloorTest.h5
-rw-r--r--libc/test/src/math/smoke/RIntTest.h6
-rw-r--r--libc/test/src/math/smoke/RoundTest.h5
-rw-r--r--libc/test/src/math/smoke/RoundToIntegerTest.h6
-rw-r--r--libc/test/src/math/smoke/TruncTest.h5
-rw-r--r--libc/test/src/stdbit/CMakeLists.txt1
-rw-r--r--libc/test/src/stdbit/stdc_has_single_bit_uc_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_has_single_bit_ui_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_has_single_bit_ul_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_has_single_bit_ull_test.cpp20
-rw-r--r--libc/test/src/stdbit/stdc_has_single_bit_us_test.cpp20
-rw-r--r--libc/test/src/time/TmHelper.h6
-rw-r--r--libc/utils/MPFRWrapper/MPFRUtils.h6
-rw-r--r--libc/utils/gpu/loader/amdgpu/Loader.cpp12
-rw-r--r--libcxx/cmake/config-ix.cmake7
-rw-r--r--libcxx/docs/FeatureTestMacroTable.rst4
-rw-r--r--libcxx/docs/ReleaseNotes/19.rst8
-rw-r--r--libcxx/docs/Status/Cxx23Papers.csv4
-rw-r--r--libcxx/include/__assert81
-rw-r--r--libcxx/include/__atomic/aliases.h3
-rw-r--r--libcxx/include/__atomic/atomic_flag.h1
-rw-r--r--libcxx/include/__atomic/atomic_sync.h6
-rw-r--r--libcxx/include/__atomic/cxx_atomic_impl.h291
-rw-r--r--libcxx/include/__charconv/from_chars_integral.h1
-rw-r--r--libcxx/include/__charconv/to_chars_base_10.h1
-rw-r--r--libcxx/include/__charconv/to_chars_integral.h1
-rw-r--r--libcxx/include/__charconv/traits.h1
-rw-r--r--libcxx/include/__config89
-rw-r--r--libcxx/include/__format/formatter_floating_point.h1
-rw-r--r--libcxx/include/__numeric/saturation_arithmetic.h1
-rw-r--r--libcxx/include/__random/negative_binomial_distribution.h1
-rw-r--r--libcxx/include/__ranges/repeat_view.h1
-rw-r--r--libcxx/include/__stop_token/stop_state.h1
-rw-r--r--libcxx/include/__string/char_traits.h1
-rw-r--r--libcxx/include/__thread/support/pthread.h5
-rw-r--r--libcxx/include/__utility/integer_sequence.h87
-rw-r--r--libcxx/include/algorithm1
-rw-r--r--libcxx/include/any1
-rw-r--r--libcxx/include/array2
-rw-r--r--libcxx/include/atomic2
-rw-r--r--libcxx/include/barrier2
-rw-r--r--libcxx/include/bit1
-rw-r--r--libcxx/include/bitset1
-rw-r--r--libcxx/include/cassert1
-rw-r--r--libcxx/include/ccomplex1
-rw-r--r--libcxx/include/cctype1
-rw-r--r--libcxx/include/cerrno1
-rw-r--r--libcxx/include/cfenv1
-rw-r--r--libcxx/include/cfloat1
-rw-r--r--libcxx/include/charconv1
-rw-r--r--libcxx/include/chrono1
-rw-r--r--libcxx/include/cinttypes1
-rw-r--r--libcxx/include/ciso6461
-rw-r--r--libcxx/include/climits1
-rw-r--r--libcxx/include/clocale1
-rw-r--r--libcxx/include/cmath1
-rw-r--r--libcxx/include/codecvt1
-rw-r--r--libcxx/include/compare1
-rw-r--r--libcxx/include/complex1
-rw-r--r--libcxx/include/concepts1
-rw-r--r--libcxx/include/condition_variable1
-rw-r--r--libcxx/include/coroutine1
-rw-r--r--libcxx/include/csetjmp1
-rw-r--r--libcxx/include/csignal1
-rw-r--r--libcxx/include/cstdarg1
-rw-r--r--libcxx/include/cstdbool1
-rw-r--r--libcxx/include/cstddef1
-rw-r--r--libcxx/include/cstdint1
-rw-r--r--libcxx/include/cstdio1
-rw-r--r--libcxx/include/cstdlib1
-rw-r--r--libcxx/include/cstring1
-rw-r--r--libcxx/include/ctgmath1
-rw-r--r--libcxx/include/ctime1
-rw-r--r--libcxx/include/cuchar1
-rw-r--r--libcxx/include/cwchar1
-rw-r--r--libcxx/include/cwctype1
-rw-r--r--libcxx/include/deque2
-rw-r--r--libcxx/include/exception1
-rw-r--r--libcxx/include/execution1
-rw-r--r--libcxx/include/expected1
-rw-r--r--libcxx/include/experimental/__simd/scalar.h1
-rw-r--r--libcxx/include/experimental/__simd/vec_ext.h1
-rw-r--r--libcxx/include/experimental/iterator1
-rw-r--r--libcxx/include/experimental/propagate_const1
-rw-r--r--libcxx/include/experimental/simd2
-rw-r--r--libcxx/include/experimental/type_traits1
-rw-r--r--libcxx/include/experimental/utility1
-rw-r--r--libcxx/include/ext/hash_map1
-rw-r--r--libcxx/include/ext/hash_set1
-rw-r--r--libcxx/include/filesystem1
-rw-r--r--libcxx/include/format1
-rw-r--r--libcxx/include/forward_list1
-rw-r--r--libcxx/include/fstream2
-rw-r--r--libcxx/include/functional1
-rw-r--r--libcxx/include/future2
-rw-r--r--libcxx/include/initializer_list1
-rw-r--r--libcxx/include/iomanip1
-rw-r--r--libcxx/include/ios1
-rw-r--r--libcxx/include/iosfwd1
-rw-r--r--libcxx/include/iostream1
-rw-r--r--libcxx/include/istream1
-rw-r--r--libcxx/include/iterator1
-rw-r--r--libcxx/include/latch2
-rw-r--r--libcxx/include/libcxx.imp12
-rw-r--r--libcxx/include/limits1
-rw-r--r--libcxx/include/list2
-rw-r--r--libcxx/include/locale2
-rw-r--r--libcxx/include/map2
-rw-r--r--libcxx/include/memory1
-rw-r--r--libcxx/include/mutex1
-rw-r--r--libcxx/include/new2
-rw-r--r--libcxx/include/numbers1
-rw-r--r--libcxx/include/numeric1
-rw-r--r--libcxx/include/optional2
-rw-r--r--libcxx/include/ostream1
-rw-r--r--libcxx/include/print2
-rw-r--r--libcxx/include/queue1
-rw-r--r--libcxx/include/random1
-rw-r--r--libcxx/include/ranges1
-rw-r--r--libcxx/include/ratio1
-rw-r--r--libcxx/include/regex2
-rw-r--r--libcxx/include/scoped_allocator1
-rw-r--r--libcxx/include/semaphore2
-rw-r--r--libcxx/include/set2
-rw-r--r--libcxx/include/shared_mutex1
-rw-r--r--libcxx/include/span2
-rw-r--r--libcxx/include/sstream1
-rw-r--r--libcxx/include/stack1
-rw-r--r--libcxx/include/stdexcept1
-rw-r--r--libcxx/include/stop_token1
-rw-r--r--libcxx/include/streambuf1
-rw-r--r--libcxx/include/string2
-rw-r--r--libcxx/include/string_view2
-rw-r--r--libcxx/include/strstream1
-rw-r--r--libcxx/include/system_error1
-rw-r--r--libcxx/include/thread1
-rw-r--r--libcxx/include/tuple1
-rw-r--r--libcxx/include/type_traits2
-rw-r--r--libcxx/include/typeindex1
-rw-r--r--libcxx/include/typeinfo1
-rw-r--r--libcxx/include/unordered_map2
-rw-r--r--libcxx/include/unordered_set2
-rw-r--r--libcxx/include/utility1
-rw-r--r--libcxx/include/valarray2
-rw-r--r--libcxx/include/variant1
-rw-r--r--libcxx/include/vector2
-rw-r--r--libcxx/include/version5
-rw-r--r--libcxx/src/new.cpp1
-rw-r--r--libcxx/test/libcxx/assertions/customize_verbose_abort.link-time.pass.cpp1
-rw-r--r--libcxx/test/libcxx/assertions/headers_declare_verbose_abort.gen.py33
-rw-r--r--libcxx/test/libcxx/assertions/modes/none.pass.cpp1
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx23.csv1
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx26.csv1
-rw-r--r--libcxx/test/std/language.support/support.limits/support.limits.general/algorithm.version.compile.pass.cpp63
-rw-r--r--libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp63
-rw-r--r--libcxx/test/std/time/time.clock/time.clock.file/to_from_sys.pass.cpp3
-rw-r--r--libcxx/test/std/time/time.clock/time.clock.hires/now.pass.cpp3
-rw-r--r--libcxx/test/std/time/time.clock/time.clock.system/from_time_t.pass.cpp3
-rw-r--r--libcxx/test/std/time/time.clock/time.clock.system/now.pass.cpp3
-rw-r--r--libcxx/test/std/time/time.clock/time.clock.system/to_time_t.pass.cpp3
-rw-r--r--libcxx/test/std/time/time.point/time.point.nonmember/op_-duration.pass.cpp3
-rw-r--r--libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.pass.cpp19
-rw-r--r--libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.verify.cpp24
-rwxr-xr-xlibcxx/utils/generate_feature_test_macro_components.py7
-rw-r--r--libcxx/utils/generate_iwyu_mapping.py2
-rw-r--r--lld/ELF/Arch/LoongArch.cpp2
-rw-r--r--lld/ELF/Arch/PPC64.cpp3
-rw-r--r--lld/ELF/Arch/RISCV.cpp2
-rw-r--r--lld/MachO/Driver.cpp4
-rw-r--r--lld/MachO/InputFiles.cpp8
-rw-r--r--lld/MachO/ObjC.h2
-rw-r--r--lldb/bindings/CMakeLists.txt6
-rw-r--r--lldb/cmake/modules/AddLLDB.cmake6
-rw-r--r--lldb/cmake/modules/LLDBConfig.cmake2
-rw-r--r--lldb/include/lldb/Core/Debugger.h10
-rw-r--r--lldb/include/lldb/Core/Progress.h40
-rw-r--r--lldb/include/lldb/Interpreter/CommandOptionArgumentTable.h5
-rw-r--r--lldb/include/lldb/lldb-private-enumerations.h7
-rw-r--r--lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py3
-rw-r--r--lldb/source/Commands/CommandObjectTarget.cpp8
-rw-r--r--lldb/source/Commands/CommandObjectThread.cpp33
-rw-r--r--lldb/source/Commands/Options.td10
-rw-r--r--lldb/source/Core/Debugger.cpp19
-rw-r--r--lldb/source/Core/Progress.cpp56
-rw-r--r--lldb/source/Host/common/Editline.cpp5
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp256
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.h74
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp278
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.h5
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp2
-rw-r--r--lldb/source/Symbol/Symtab.cpp23
-rw-r--r--lldb/source/Symbol/Variable.cpp8
-rw-r--r--lldb/test/API/commands/command/script/TestCommandScript.py14
-rw-r--r--lldb/test/API/commands/command/script/cmd_file.lldb4
-rw-r--r--lldb/test/API/functionalities/completion/TestCompletion.py6
-rw-r--r--lldb/test/API/functionalities/completion/main.cpp13
-rw-r--r--lldb/test/API/lang/c/local_variables/TestLocalVariables.py1
-rw-r--r--lldb/test/API/lang/cpp/gmodules/alignment/Makefile4
-rw-r--r--lldb/test/API/lang/cpp/gmodules/alignment/TestPchAlignment.py60
-rw-r--r--lldb/test/API/lang/cpp/gmodules/alignment/main.cpp10
-rw-r--r--lldb/test/API/lang/cpp/gmodules/alignment/pch.h21
-rw-r--r--lldb/test/API/lit.cfg.py3
-rw-r--r--lldb/test/API/lit.site.cfg.py.in1
-rw-r--r--lldb/test/API/macosx/nslog/TestDarwinNSLogOutput.py5
-rw-r--r--lldb/test/API/repl/clang/TestClangREPL.py32
-rw-r--r--lldb/test/API/terminal/TestSTTYBeforeAndAfter.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py45
-rw-r--r--lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py12
-rw-r--r--lldb/test/CMakeLists.txt29
-rw-r--r--lldb/test/Shell/Commands/command-thread-backtrace.test14
-rw-r--r--lldb/test/Shell/Driver/TestHelp.test2
-rw-r--r--lldb/test/Shell/SymbolFile/Breakpad/symtab-sorted-by-size.test11
-rw-r--r--lldb/tools/debugserver/source/RNBRemote.cpp319
-rw-r--r--lldb/tools/debugserver/source/RNBRemote.h9
-rw-r--r--lldb/tools/driver/Driver.cpp15
-rw-r--r--lldb/tools/driver/Driver.h1
-rw-r--r--lldb/tools/lldb-dap/Watchpoint.cpp23
-rw-r--r--lldb/tools/lldb-dap/Watchpoint.h5
-rw-r--r--lldb/tools/lldb-dap/lldb-dap.cpp16
-rw-r--r--lldb/unittests/Core/ProgressReportTest.cpp93
-rw-r--r--lldb/use_lldb_suite_root.py4
-rw-r--r--lldb/utils/lldb-dotest/CMakeLists.txt1
-rwxr-xr-xlldb/utils/lldb-dotest/lldb-dotest.in5
-rw-r--r--llvm/CMakeLists.txt2
-rw-r--r--llvm/cmake/modules/AddLLVM.cmake17
-rw-r--r--llvm/docs/CMake.rst6
-rw-r--r--llvm/docs/CodeOfConduct.rst11
-rw-r--r--llvm/docs/CommandGuide/llvm-objcopy.rst9
-rw-r--r--llvm/docs/CommandGuide/llvm-readobj.rst14
-rw-r--r--llvm/docs/GlobalISel/GenericOpcode.rst8
-rw-r--r--llvm/docs/LangRef.rst8
-rw-r--r--llvm/docs/RISCVUsage.rst9
-rw-r--r--llvm/docs/ReleaseNotes.rst5
-rw-r--r--llvm/include/llvm/ADT/APFloat.h18
-rw-r--r--llvm/include/llvm/BinaryFormat/DXContainer.h2
-rw-r--r--llvm/include/llvm/BinaryFormat/DXContainerConstants.def74
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h1
-rw-r--r--llvm/include/llvm/CodeGen/ISDOpcodes.h2
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h6
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h7
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h36
-rw-r--r--llvm/include/llvm/DebugInfo/DIContext.h1
-rw-r--r--llvm/include/llvm/Demangle/ItaniumDemangle.h2
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h6
-rw-r--r--llvm/include/llvm/IR/DIBuilder.h7
-rw-r--r--llvm/include/llvm/IR/DebugInfoMetadata.h124
-rw-r--r--llvm/include/llvm/IR/InstrTypes.h13
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td2
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td7
-rw-r--r--llvm/include/llvm/IR/IntrinsicsDirectX.td7
-rw-r--r--llvm/include/llvm/IR/Module.h23
-rw-r--r--llvm/include/llvm/MC/MCExpr.h2
-rw-r--r--llvm/include/llvm/ObjCopy/ELF/ELFConfig.h3
-rw-r--r--llvm/include/llvm/Object/DXContainer.h8
-rw-r--r--llvm/include/llvm/ObjectYAML/DXContainerYAML.h14
-rw-r--r--llvm/include/llvm/Support/KnownBits.h3
-rw-r--r--llvm/include/llvm/Support/TypeSize.h2
-rw-r--r--llvm/include/llvm/Support/VirtualFileSystem.h5
-rw-r--r--llvm/include/llvm/Target/GenericOpcodes.td2
-rw-r--r--llvm/include/llvm/Target/GlobalISel/Combine.td33
-rw-r--r--llvm/include/llvm/TargetParser/AArch64TargetParser.h17
-rw-r--r--llvm/include/llvm/TargetParser/ARMTargetParser.def4
-rw-r--r--llvm/include/llvm/TargetParser/RISCVTargetParser.h3
-rw-r--r--llvm/include/llvm/TextAPI/Record.h2
-rw-r--r--llvm/include/llvm/TextAPI/RecordsSlice.h7
-rw-r--r--llvm/lib/AsmParser/LLParser.cpp23
-rw-r--r--llvm/lib/Bitcode/Reader/MetadataLoader.cpp18
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp5
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AccelTable.cpp5
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp14
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp74
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp37
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp100
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp20
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp152
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h5
-rw-r--r--llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp23
-rw-r--r--llvm/lib/CodeGen/TypePromotion.cpp125
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp29
-rw-r--r--llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp33
-rw-r--r--llvm/lib/IR/AsmWriter.cpp11
-rw-r--r--llvm/lib/IR/DIBuilder.cpp58
-rw-r--r--llvm/lib/IR/DebugInfo.cpp13
-rw-r--r--llvm/lib/IR/DebugInfoMetadata.cpp32
-rw-r--r--llvm/lib/IR/Instruction.cpp9
-rw-r--r--llvm/lib/IR/Instructions.cpp137
-rw-r--r--llvm/lib/IR/LLVMContextImpl.h15
-rw-r--r--llvm/lib/IR/Verifier.cpp5
-rw-r--r--llvm/lib/MC/MCExpr.cpp4
-rw-r--r--llvm/lib/MC/MCParser/AsmParser.cpp4
-rw-r--r--llvm/lib/MC/XCOFFObjectWriter.cpp3
-rw-r--r--llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp4
-rw-r--r--llvm/lib/Object/DXContainer.cpp8
-rw-r--r--llvm/lib/ObjectYAML/DXContainerYAML.cpp14
-rw-r--r--llvm/lib/Passes/PassBuilderPipelines.cpp23
-rw-r--r--llvm/lib/Support/ErrorHandling.cpp3
-rw-r--r--llvm/lib/Support/KnownBits.cpp16
-rw-r--r--llvm/lib/Support/Path.cpp2
-rw-r--r--llvm/lib/Support/RISCVISAInfo.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp26
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.h3
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrFormats.td1
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp52
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.h2
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td5
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td2
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp3
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp23
-rw-r--r--llvm/lib/Target/AArch64/SMEInstrFormats.td2
-rw-r--r--llvm/lib/Target/AArch64/SVEInstrFormats.td10
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCallingConv.td2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp73
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUGenRegisterBankInfo.def1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp21
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.h5
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp20
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp20
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp229
-rw-r--r--llvm/lib/Target/AMDGPU/BUFInstructions.td245
-rw-r--r--llvm/lib/Target/AMDGPU/DSInstructions.td132
-rw-r--r--llvm/lib/Target/AMDGPU/FLATInstructions.td23
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp57
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.h4
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h5
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td14
-rw-r--r--llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp47
-rw-r--r--llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp7
-rw-r--r--llvm/lib/Target/AMDGPU/SMInstructions.td9
-rw-r--r--llvm/lib/Target/AMDGPU/SOPInstructions.td1
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h17
-rw-r--r--llvm/lib/Target/AMDGPU/VOPInstructions.td1
-rw-r--r--llvm/lib/Target/ARM/ARM.td1
-rw-r--r--llvm/lib/Target/ARM/ARMFastISel.cpp3
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp10
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.cpp3
-rw-r--r--llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp27
-rw-r--r--llvm/lib/Target/DirectX/DXIL.td353
-rw-r--r--llvm/lib/Target/DirectX/DXILOpBuilder.cpp25
-rw-r--r--llvm/lib/Target/DirectX/DXILOpBuilder.h3
-rw-r--r--llvm/lib/Target/DirectX/DXILOpLowering.cpp3
-rw-r--r--llvm/lib/Target/DirectX/DXILShaderFlags.cpp2
-rw-r--r--llvm/lib/Target/DirectX/DXILShaderFlags.h6
-rw-r--r--llvm/lib/Target/Hexagon/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonLoopAlign.cpp216
-rw-r--r--llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp9
-rw-r--r--llvm/lib/Target/Hexagon/HexagonTargetMachine.h1
-rw-r--r--llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp13
-rw-r--r--llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp4
-rw-r--r--llvm/lib/Target/PowerPC/PPC.h6
-rw-r--r--llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp58
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp121
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.h13
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstr64Bit.td12
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp1
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.td12
-rw-r--r--llvm/lib/Target/PowerPC/PPCTLSDynamicCall.cpp119
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp8
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h1
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp6
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h5
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp25
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp7
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.td10
-rw-r--r--llvm/lib/Target/RISCV/RISCVSchedSiFive7.td1
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp21
-rw-r--r--llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp440
-rw-r--r--llvm/lib/Target/Sparc/Sparc.td3
-rw-r--r--llvm/lib/Target/WebAssembly/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssembly.h2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp7
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp5
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyRefTypeMem2Local.cpp91
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp26
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp10
-rw-r--r--llvm/lib/Target/X86/X86CompressEVEX.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ExpandPseudo.cpp49
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.cpp67
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp10
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp114
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h4
-rw-r--r--llvm/lib/Target/X86/X86InstrCompiler.td81
-rw-r--r--llvm/lib/Target/X86/X86InstrSystem.td2
-rw-r--r--llvm/lib/Target/X86/X86InstrUtils.td1
-rw-r--r--llvm/lib/Target/X86/X86InstrVMX.td4
-rw-r--r--llvm/lib/TargetParser/AArch64TargetParser.cpp15
-rw-r--r--llvm/lib/TargetParser/RISCVTargetParser.cpp24
-rw-r--r--llvm/lib/TextAPI/RecordVisitor.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/FunctionImport.cpp19
-rw-r--r--llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp108
-rw-r--r--llvm/lib/Transforms/Scalar/ConstraintElimination.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp14
-rw-r--r--llvm/lib/Transforms/Scalar/Reassociate.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp98
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp66
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp220
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h8
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll28
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll12
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll12
-rw-r--r--llvm/test/Assembler/debug-info.ll16
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-2-icmps-of-0-and-or.mir1244
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/abs.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/aes.ll41
-rw-r--r--llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/and-mask-removal.ll18
-rw-r--r--llvm/test/CodeGen/AArch64/callbr-asm-outputs-indirect-isel.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/emit_fneg_with_non_register_operand.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/live-debugvalues-sve.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/load.ll318
-rw-r--r--llvm/test/CodeGen/AArch64/misched-fusion-aes.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/peephole-insvigpr.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/setcc_knownbits.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/shufflevector.ll565
-rw-r--r--llvm/test/CodeGen/AArch64/signed-truncation-check.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sme-disable-gisel-fisel.ll65
-rw-r--r--llvm/test/CodeGen/AArch64/store.ll342
-rw-r--r--llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret.ll42
-rw-r--r--llvm/test/CodeGen/AArch64/sve-localstackalloc.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-pfalse-machine-cse.mir6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-pseudos-expand-undef.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-cmpeq.mir10
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-whilege.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-whilegt.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-whilehi.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-whilehs.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-whilele.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-whilelo.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-whilels.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-whilelt.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-whilerw.mir16
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-whilewr.mir16
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1_copy_pnr.mir12
-rw-r--r--llvm/test/CodeGen/AArch64/typepromotion-overflow.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir66
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir177
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir161
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir56
-rw-r--r--llvm/test/CodeGen/AMDGPU/calling-conventions.ll1214
-rw-r--r--llvm/test/CodeGen/AMDGPU/clamp.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null-vector.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll272
-rw-r--r--llvm/test/CodeGen/AMDGPU/ctlz.ll27
-rw-r--r--llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll77
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_i128.ll3003
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-scratch.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmaxnum.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fminnum.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/iglp.opt.reentry.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.addrspacecast.nonnull.ll69
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-work-group-id-intrinsics-hsa.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/omod.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/rem_i128.ll3014
-rw-r--r--llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir57
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-mark-last-scratch-load.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/wait-before-stores-with-scope_sys.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/wait-before-stores-with-scope_sys.mir43
-rw-r--r--llvm/test/CodeGen/DirectX/frac.ll34
-rw-r--r--llvm/test/CodeGen/DirectX/round.ll31
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-balign.ll91
-rw-r--r--llvm/test/CodeGen/Hexagon/loop_align_count.ll116
-rw-r--r--llvm/test/CodeGen/Hexagon/loop_align_count.mir131
-rw-r--r--llvm/test/CodeGen/Hexagon/v6-haar-balign32.ll117
-rw-r--r--llvm/test/CodeGen/M68k/pipeline.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll64
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-tls-gd-double.ll110
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-tls-gd-int.ll94
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-tls-gd-longlong.ll346
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-tls-ld-xcoff-reloc-large.ll349
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-tls-local-dynamic.ll396
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-tls-xcoff-reloc-large.ll217
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-tls-xcoff-reloc.ll219
-rw-r--r--llvm/test/CodeGen/PowerPC/atomicrmw-uinc-udec-wrap.ll272
-rw-r--r--llvm/test/CodeGen/PowerPC/crsave.ll324
-rw-r--r--llvm/test/CodeGen/PowerPC/ctrloops-pseudo.ll40
-rw-r--r--llvm/test/CodeGen/PowerPC/expand-isel-to-branch.ll12
-rw-r--r--llvm/test/CodeGen/PowerPC/fp-strict-fcmp-spe.ll260
-rw-r--r--llvm/test/CodeGen/PowerPC/fp-to-int-to-fp.ll103
-rw-r--r--llvm/test/CodeGen/PowerPC/fptoui-be-crash.ll78
-rw-r--r--llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll124
-rw-r--r--llvm/test/CodeGen/PowerPC/funnel-shift.ll605
-rw-r--r--llvm/test/CodeGen/PowerPC/i1-to-double.ll52
-rw-r--r--llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll15
-rw-r--r--llvm/test/CodeGen/PowerPC/pr43976.ll18
-rw-r--r--llvm/test/CodeGen/PowerPC/pr49509.ll35
-rw-r--r--llvm/test/CodeGen/PowerPC/save-crbp-ppc32svr4.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/select-cc-no-isel.ll20
-rw-r--r--llvm/test/CodeGen/PowerPC/select.ll99
-rw-r--r--llvm/test/CodeGen/PowerPC/select_const.ll225
-rw-r--r--llvm/test/CodeGen/PowerPC/smulfixsat.ll36
-rw-r--r--llvm/test/CodeGen/PowerPC/spe.ll176
-rw-r--r--llvm/test/CodeGen/PowerPC/srem-seteq-illegal-types.ll29
-rw-r--r--llvm/test/CodeGen/PowerPC/umulfixsat.ll20
-rw-r--r--llvm/test/CodeGen/PowerPC/umulo-128-legalisation-lowering.ll180
-rw-r--r--llvm/test/CodeGen/PowerPC/urem-seteq-illegal-types.ll74
-rw-r--r--llvm/test/CodeGen/PowerPC/varargs.ll39
-rw-r--r--llvm/test/CodeGen/PowerPC/wide-scalar-shift-by-byte-multiple-legalization.ll58
-rw-r--r--llvm/test/CodeGen/PowerPC/wide-scalar-shift-legalization.ll58
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-rmw.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-signext.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/attributes.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/machine-combiner.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/pr69586.ll170
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll860
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll34
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll34
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll559
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll144
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll273
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll64
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll64
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll1150
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll64
-rw-r--r--llvm/test/CodeGen/RISCV/typepromotion-overflow.ll5
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll4
-rw-r--r--llvm/test/CodeGen/WebAssembly/multivalue-dont-move-def-past-use.mir2
-rw-r--r--llvm/test/CodeGen/WebAssembly/multivalue-stackify.ll2
-rw-r--r--llvm/test/CodeGen/WebAssembly/multivalue.ll10
-rw-r--r--llvm/test/CodeGen/WebAssembly/multivalue_libcall.ll2
-rw-r--r--llvm/test/CodeGen/WebAssembly/ref-type-mem2local.ll57
-rw-r--r--llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll11
-rw-r--r--llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll109
-rw-r--r--llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll13
-rw-r--r--llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/apx/compress-evex.mir9
-rw-r--r--llvm/test/CodeGen/X86/apx/sub.ll36
-rw-r--r--llvm/test/CodeGen/X86/avx-cmp.ll27
-rw-r--r--llvm/test/CodeGen/X86/avx512-insert-extract.ll18
-rw-r--r--llvm/test/CodeGen/X86/avx512-vec-cmp.ll27
-rw-r--r--llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll12
-rw-r--r--llvm/test/CodeGen/X86/cmov-fp.ll548
-rw-r--r--llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll219
-rw-r--r--llvm/test/CodeGen/X86/cmp.ll52
-rw-r--r--llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll47
-rw-r--r--llvm/test/CodeGen/X86/cvt16.ll1
-rw-r--r--llvm/test/CodeGen/X86/dagcombine-and-setcc.ll3
-rw-r--r--llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll306
-rw-r--r--llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll7
-rw-r--r--llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll4
-rw-r--r--llvm/test/CodeGen/X86/fp-roundeven.ll1
-rw-r--r--llvm/test/CodeGen/X86/fpclamptosat_vec.ll124
-rw-r--r--llvm/test/CodeGen/X86/half.ll109
-rw-r--r--llvm/test/CodeGen/X86/horizontal-reduce-umax.ll2
-rw-r--r--llvm/test/CodeGen/X86/horizontal-reduce-umin.ll2
-rw-r--r--llvm/test/CodeGen/X86/inline-asm-memop.ll27
-rw-r--r--llvm/test/CodeGen/X86/inline-spiller-impdef-on-implicit-def-regression.ll70
-rw-r--r--llvm/test/CodeGen/X86/kshift.ll12
-rw-r--r--llvm/test/CodeGen/X86/lsr-addrecloops.ll98
-rw-r--r--llvm/test/CodeGen/X86/movmsk-cmp.ll97
-rw-r--r--llvm/test/CodeGen/X86/or-branch.ll9
-rw-r--r--llvm/test/CodeGen/X86/peephole-na-phys-copy-folding.ll142
-rw-r--r--llvm/test/CodeGen/X86/pr31088.ll20
-rw-r--r--llvm/test/CodeGen/X86/pr33747.ll17
-rw-r--r--llvm/test/CodeGen/X86/pr34605.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr37025.ll38
-rw-r--r--llvm/test/CodeGen/X86/pr38795.ll131
-rw-r--r--llvm/test/CodeGen/X86/pr38803.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr43509.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr46455.ll5
-rw-r--r--llvm/test/CodeGen/X86/pr57340.ll188
-rw-r--r--llvm/test/CodeGen/X86/pr78897.ll4
-rw-r--r--llvm/test/CodeGen/X86/prefer-fpext-splat.ll2
-rw-r--r--llvm/test/CodeGen/X86/select-of-fp-constants.ll2
-rw-r--r--llvm/test/CodeGen/X86/select-of-half-constants.ll8
-rw-r--r--llvm/test/CodeGen/X86/setcc-logic.ll76
-rw-r--r--llvm/test/CodeGen/X86/setcc-lowering.ll97
-rw-r--r--llvm/test/CodeGen/X86/srem-seteq-illegal-types.ll12
-rw-r--r--llvm/test/CodeGen/X86/swifterror.ll10
-rw-r--r--llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll119
-rw-r--r--llvm/test/CodeGen/X86/tail-opts.ll5
-rw-r--r--llvm/test/CodeGen/X86/tailcall-extract.ll8
-rw-r--r--llvm/test/CodeGen/X86/test-shrink-bug.ll36
-rw-r--r--llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll1
-rw-r--r--llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll24
-rw-r--r--llvm/test/CodeGen/X86/vector-half-conversions.ll44
-rw-r--r--llvm/test/CodeGen/X86/vector-popcnt-128-ult-ugt.ll976
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll20
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll20
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-umax.ll2
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-umin.ll2
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll6
-rw-r--r--llvm/test/CodeGen/X86/vselect.ll26
-rw-r--r--llvm/test/CodeGen/X86/x86-32-intrcc.ll10
-rw-r--r--llvm/test/CodeGen/X86/x86-64-intrcc-uintr.ll8
-rw-r--r--llvm/test/CodeGen/X86/x86-64-intrcc.ll3
-rw-r--r--llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll52
-rw-r--r--llvm/test/DebugInfo/AArch64/ptrauth.ll70
-rw-r--r--llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test5
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/asan-funclet.ll459
-rw-r--r--llvm/test/MC/AArch64/cfi-bad-nesting-darwin.s6
-rw-r--r--llvm/test/MC/AMDGPU/gfx1011_err.s2
-rw-r--r--llvm/test/MC/AMDGPU/gfx1030_err.s2
-rw-r--r--llvm/test/MC/AMDGPU/gfx10_err_pos.s10
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_unsupported.s12
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_ds_alias.s6
-rw-r--r--llvm/test/MC/AMDGPU/gfx940_asm_features.s10
-rw-r--r--llvm/test/MC/AMDGPU/gfx940_err.s12
-rw-r--r--llvm/test/MC/AMDGPU/sopk-err.s173
-rw-r--r--llvm/test/MC/AMDGPU/sopk.s49
-rw-r--r--llvm/test/MC/Disassembler/X86/apx/IgnoreW.txt118
-rw-r--r--llvm/test/MC/RISCV/rv32zacas-invalid.s2
-rw-r--r--llvm/test/MC/RISCV/rv32zacas-valid.s12
-rw-r--r--llvm/test/MC/RISCV/rv64zacas-valid.s6
-rw-r--r--llvm/test/MC/RISCV/rvzabha-zacas-valid.s12
-rw-r--r--llvm/test/TableGen/HwModeEncodeDecode3.td168
-rw-r--r--llvm/test/ThinLTO/X86/visibility-elf.ll6
-rw-r--r--llvm/test/ThinLTO/X86/visibility-macho.ll4
-rw-r--r--llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-pointer-cmps.ll6
-rw-r--r--llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-cmps.ll24
-rw-r--r--llvm/test/Transforms/ConstraintElimination/zext-for-per-formula-reasoning.ll4
-rw-r--r--llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll7
-rw-r--r--llvm/test/Transforms/FunctionImport/funcimport.ll23
-rw-r--r--llvm/test/Transforms/Inline/inline_stats.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/maxnum.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/minnum.ll4
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/min-max.ll68
-rw-r--r--llvm/test/Transforms/LoopRotate/oz-disable.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/no-fold-tail-by-masking-iv-external-uses.ll159
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll90
-rw-r--r--llvm/test/Transforms/MemProfContextDisambiguation/inlined3.ll3
-rw-r--r--llvm/test/Transforms/PhaseOrdering/enable-loop-header-duplication-oz.ll57
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll653
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll18
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_clear_undefs.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-inseltpoison.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/multi-nodes-to-shuffle.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll115
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reorder-node.ll48
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/scatter-vectorize-reused-pointer.ll22
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/split-load8_2-unord.ll65
-rw-r--r--llvm/test/Transforms/SROA/vector-promotion.ll364
-rw-r--r--llvm/test/Transforms/TypePromotion/ARM/icmps.ll7
-rw-r--r--llvm/test/Transforms/TypePromotion/ARM/wrapping.ll10
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll15
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll.expected13
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/global_remove_same.test4
-rw-r--r--llvm/test/tools/llvm-objcopy/ELF/set-symbol-visibility.test311
-rw-r--r--llvm/test/tools/llvm-readobj/XCOFF/loader-section-relocation.test32
-rw-r--r--llvm/test/tools/llvm-readobj/XCOFF/loader-section-symbol.test13
-rw-r--r--llvm/test/tools/llvm-readobj/XCOFF/relocations.test22
-rw-r--r--llvm/test/tools/llvm-readobj/XCOFF/symbols.test9
-rw-r--r--llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp14
-rw-r--r--llvm/tools/llvm-objcopy/ObjcopyOptions.cpp42
-rw-r--r--llvm/tools/llvm-objcopy/ObjcopyOpts.td11
-rw-r--r--llvm/tools/llvm-profgen/PerfReader.cpp11
-rw-r--r--llvm/tools/llvm-readobj/XCOFFDumper.cpp22
-rw-r--r--llvm/tools/obj2yaml/dxcontainer2yaml.cpp4
-rw-r--r--llvm/unittests/ADT/APFloatTest.cpp10
-rw-r--r--llvm/unittests/CodeGen/AArch64SelectionDAGTest.cpp68
-rw-r--r--llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp80
-rw-r--r--llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp153
-rw-r--r--llvm/unittests/IR/MetadataTest.cpp85
-rw-r--r--llvm/unittests/IR/VerifierTest.cpp28
-rw-r--r--llvm/unittests/Support/ErrorTest.cpp26
-rw-r--r--llvm/unittests/Support/KnownBitsTest.cpp35
-rw-r--r--llvm/unittests/Support/RISCVISAInfoTest.cpp2
-rw-r--r--llvm/unittests/Support/TypeSizeTest.cpp1
-rw-r--r--llvm/unittests/TargetParser/TargetParserTest.cpp7
-rw-r--r--llvm/utils/TableGen/AsmMatcherEmitter.cpp89
-rw-r--r--llvm/utils/TableGen/CodeEmitterGen.cpp8
-rw-r--r--llvm/utils/TableGen/CodeGenHwModes.h5
-rw-r--r--llvm/utils/TableGen/CodeGenTarget.cpp2
-rw-r--r--llvm/utils/TableGen/DXILEmitter.cpp408
-rw-r--r--llvm/utils/TableGen/DecoderEmitter.cpp10
-rw-r--r--llvm/utils/TableGen/SearchableTableEmitter.cpp9
-rw-r--r--llvm/utils/TableGen/X86DisassemblerTables.cpp2
-rw-r--r--llvm/utils/UpdateTestChecks/common.py15
-rw-r--r--llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/modernize/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/utils/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang/lib/InstallAPI/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/lldb/test/BUILD.gn3
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/Hexagon/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/WebAssembly/BUILD.gn1
-rwxr-xr-xllvm/utils/update_test_checks.py10
-rw-r--r--llvm/utils/vim/syntax/mir.vim2
-rw-r--r--mlir/CMakeLists.txt2
-rw-r--r--mlir/docs/Dialects/GPU.md1
-rw-r--r--mlir/docs/PassManagement.md3
-rw-r--r--mlir/docs/PatternRewriter.md2
-rw-r--r--mlir/examples/CMakeLists.txt1
-rw-r--r--mlir/examples/transform-opt/CMakeLists.txt26
-rw-r--r--mlir/examples/transform-opt/README.md40
-rw-r--r--mlir/examples/transform-opt/mlir-transform-opt.cpp389
-rw-r--r--mlir/include/mlir/Config/mlir-config.h.cmake4
-rw-r--r--mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.td6
-rw-r--r--mlir/include/mlir/Dialect/AMDGPU/Transforms/Passes.td9
-rw-r--r--mlir/include/mlir/Dialect/AMDGPU/Transforms/Transforms.h10
-rw-r--r--mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h20
-rw-r--r--mlir/include/mlir/Dialect/EmitC/IR/EmitC.td179
-rw-r--r--mlir/include/mlir/Dialect/GPU/IR/GPUOps.td15
-rw-r--r--mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td43
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h10
-rw-r--r--mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td42
-rw-r--r--mlir/include/mlir/Dialect/Transform/Transforms/Passes.td24
-rw-r--r--mlir/include/mlir/Dialect/Vector/IR/VectorOps.td10
-rw-r--r--mlir/include/mlir/IR/PatternMatch.h2
-rw-r--r--mlir/include/mlir/InitAllPasses.h3
-rw-r--r--mlir/include/mlir/Transforms/DialectConversion.h3
-rw-r--r--mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp2
-rw-r--r--mlir/lib/Conversion/AffineToStandard/CMakeLists.txt3
-rw-r--r--mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp3
-rw-r--r--mlir/lib/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp3
-rw-r--r--mlir/lib/Dialect/AMDGPU/Transforms/OptimizeSharedMemory.cpp57
-rw-r--r--mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp54
-rw-r--r--mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp98
-rw-r--r--mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp5
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp3
-rw-r--r--mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp25
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp114
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp3
-rw-r--r--mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp5
-rw-r--r--mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp65
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp18
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp12
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp100
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp35
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h27
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp2
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp12
-rw-r--r--mlir/lib/Dialect/Tensor/IR/TensorOps.cpp16
-rw-r--r--mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp95
-rw-r--r--mlir/lib/Dialect/Vector/IR/VectorOps.cpp74
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp44
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp126
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp74
-rw-r--r--mlir/lib/Target/Cpp/TranslateToCpp.cpp80
-rw-r--r--mlir/lib/Target/LLVM/NVVM/Target.cpp9
-rw-r--r--mlir/lib/Target/LLVMIR/DebugTranslation.cpp3
-rw-r--r--mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp198
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleTranslation.cpp5
-rw-r--r--mlir/lib/Transforms/Utils/DialectConversion.cpp131
-rw-r--r--mlir/test/CMakeLists.txt1
-rw-r--r--mlir/test/Conversion/AffineToStandard/lower-affine.mlir54
-rw-r--r--mlir/test/Dialect/AMDGPU/optimize_shmem_reads_writes.mlir50
-rw-r--r--mlir/test/Dialect/AMDGPU/transform_optimize_shmem_reads_writes.mlir46
-rw-r--r--mlir/test/Dialect/Affine/access-analysis.mlir67
-rw-r--r--mlir/test/Dialect/Arith/expand-ops.mlir45
-rw-r--r--mlir/test/Dialect/EmitC/invalid_ops.mlir24
-rw-r--r--mlir/test/Dialect/EmitC/ops.mlir17
-rw-r--r--mlir/test/Dialect/GPU/ops.mlir15
-rw-r--r--mlir/test/Dialect/Linalg/flatten-elementwise.mlir99
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize.mlir2
-rw-r--r--mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir45
-rw-r--r--mlir/test/Dialect/SparseTensor/invalid.mlir10
-rw-r--r--mlir/test/Dialect/SparseTensor/roundtrip.mlir17
-rw-r--r--mlir/test/Dialect/Tensor/canonicalize.mlir35
-rw-r--r--mlir/test/Dialect/Transform/include/test-interpreter-external-concurrent-source.mlir25
-rw-r--r--mlir/test/Dialect/Transform/include/test-interpreter-external-source.mlir14
-rw-r--r--mlir/test/Dialect/Transform/multi-arg-top-level-ops.mlir63
-rw-r--r--mlir/test/Dialect/Transform/multi-arg-top-level-params.mlir39
-rw-r--r--mlir/test/Dialect/Transform/multi-arg-top-level-values.mlir32
-rw-r--r--mlir/test/Dialect/Transform/test-interpreter-debug.mlir58
-rw-r--r--mlir/test/Dialect/Transform/test-interpreter-external-concurrent.mlir4
-rw-r--r--mlir/test/Dialect/Transform/test-interpreter-external.mlir4
-rw-r--r--mlir/test/Dialect/Transform/test-interpreter.mlir1
-rw-r--r--mlir/test/Dialect/Transform/test-pass-application.mlir78
-rw-r--r--mlir/test/Dialect/Transform/test-pattern-application.mlir384
-rw-r--r--mlir/test/Dialect/Transform/test-pdl-extension.mlir68
-rw-r--r--mlir/test/Dialect/Transform/transform-state-extension.mlir102
-rw-r--r--mlir/test/Dialect/Vector/canonicalize.mlir73
-rw-r--r--mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir42
-rw-r--r--mlir/test/Dialect/Vector/vector-transforms.mlir45
-rw-r--r--mlir/test/Dialect/Vector/vector-warp-distribute.mlir25
-rw-r--r--mlir/test/Examples/transform-opt/empty.mlir12
-rw-r--r--mlir/test/Examples/transform-opt/external-decl.mlir18
-rw-r--r--mlir/test/Examples/transform-opt/external-def.mlir8
-rw-r--r--mlir/test/Examples/transform-opt/pass.mlir19
-rw-r--r--mlir/test/Examples/transform-opt/self-contained.mlir21
-rw-r--r--mlir/test/Examples/transform-opt/syntax-error.mlir5
-rw-r--r--mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-compare-results-i16.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir10
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir8
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir45
-rwxr-xr-xmlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir85
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir18
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir24
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir24
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir171
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir23
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir237
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir83
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir114
-rwxr-xr-xmlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir269
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir46
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/Emulated/test-setArmSVLBits.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir16
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir16
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir12
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir4
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/Emulated/test-setArmVLBits.mlir17
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/arrays-of-scalable-vectors.mlir12
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-contraction.mlir18
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/test-print-str.mlir4
-rw-r--r--mlir/test/Target/Cpp/bitwise_operators.mlir20
-rw-r--r--mlir/test/Target/Cpp/logical_operators.mlir14
-rw-r--r--mlir/test/Target/LLVMIR/attribute-alias-scopes.mlir44
-rw-r--r--mlir/test/Target/LLVMIR/erase-dangling-constants.mlir73
-rw-r--r--mlir/test/Target/LLVMIR/openmp-llvm.mlir37
-rw-r--r--mlir/test/Target/LLVMIR/openmp-private.mlir142
-rw-r--r--mlir/test/lib/Dialect/Affine/CMakeLists.txt2
-rw-r--r--mlir/test/lib/Dialect/Affine/TestAccessAnalysis.cpp83
-rw-r--r--mlir/test/lib/Dialect/Test/TestPatterns.cpp1
-rw-r--r--mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp6
-rw-r--r--mlir/test/lit.cfg.py1
-rw-r--r--mlir/test/mlir-cpu-runner/expand-arith-ops.mlir47
-rw-r--r--mlir/test/python/dialects/gpu/dialect.py2
-rw-r--r--mlir/tools/mlir-opt/mlir-opt.cpp2
-rw-r--r--mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp7
-rw-r--r--openmp/CMakeLists.txt12
-rw-r--r--openmp/libomptarget/plugins-nextgen/CMakeLists.txt1
-rw-r--r--openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt1
-rw-r--r--openmp/libomptarget/plugins-nextgen/cuda/CMakeLists.txt1
-rw-r--r--openmp/libomptarget/src/CMakeLists.txt1
-rw-r--r--utils/bazel/llvm-project-overlay/clang/BUILD.bazel3
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel8
-rw-r--r--utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl1
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel12
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel19
1608 files changed, 38112 insertions, 19996 deletions
diff --git a/.github/new-prs-labeler.yml b/.github/new-prs-labeler.yml
index 8ed976fbdddc..9a580c6d6984 100644
--- a/.github/new-prs-labeler.yml
+++ b/.github/new-prs-labeler.yml
@@ -869,6 +869,9 @@ backend:SystemZ:
third-party:unittests:
- third-party/unittests/**
+third-party:benchmark:
+ - third-party/benchmark/**
+
llvm:binary-utilities:
- llvm/docs/CommandGuide/llvm-*
- llvm/include/llvm/BinaryFormat/**
diff --git a/bolt/include/bolt/Core/BinaryFunction.h b/bolt/include/bolt/Core/BinaryFunction.h
index a177178769e4..c170fa6397cc 100644
--- a/bolt/include/bolt/Core/BinaryFunction.h
+++ b/bolt/include/bolt/Core/BinaryFunction.h
@@ -2056,6 +2056,14 @@ public:
/// Returns false if disassembly failed.
Error disassemble();
+ /// An external interface to register a branch while the function is in
+ /// disassembled state. Allows to make custom modifications to the
+ /// disassembler. E.g., a pre-CFG pass can add an instruction and register
+ /// a branch that will later be used during the CFG construction.
+ ///
+ /// Return a label at the branch destination.
+ MCSymbol *registerBranch(uint64_t Src, uint64_t Dst);
+
Error handlePCRelOperand(MCInst &Instruction, uint64_t Address,
uint64_t Size);
diff --git a/bolt/include/bolt/Core/MCPlusBuilder.h b/bolt/include/bolt/Core/MCPlusBuilder.h
index eeb7609ff6b5..6bb76d1b917d 100644
--- a/bolt/include/bolt/Core/MCPlusBuilder.h
+++ b/bolt/include/bolt/Core/MCPlusBuilder.h
@@ -1183,11 +1183,16 @@ public:
bool clearOffset(MCInst &Inst) const;
/// Return the label of \p Inst, if available.
- MCSymbol *getLabel(const MCInst &Inst) const;
+ MCSymbol *getInstLabel(const MCInst &Inst) const;
+
+ /// Set the label of \p Inst or return the existing label for the instruction.
+ /// This label will be emitted right before \p Inst is emitted to MCStreamer.
+ MCSymbol *getOrCreateInstLabel(MCInst &Inst, const Twine &Name,
+ MCContext *Ctx) const;
/// Set the label of \p Inst. This label will be emitted right before \p Inst
/// is emitted to MCStreamer.
- bool setLabel(MCInst &Inst, MCSymbol *Label) const;
+ void setInstLabel(MCInst &Inst, MCSymbol *Label) const;
/// Get instruction size specified via annotation.
std::optional<uint32_t> getSize(const MCInst &Inst) const;
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index d544ece13a83..b29ebbbfa18c 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -1967,7 +1967,7 @@ void BinaryContext::printInstruction(raw_ostream &OS, const MCInst &Instruction,
OS << " # Offset: " << *Offset;
if (std::optional<uint32_t> Size = MIB->getSize(Instruction))
OS << " # Size: " << *Size;
- if (MCSymbol *Label = MIB->getLabel(Instruction))
+ if (MCSymbol *Label = MIB->getInstLabel(Instruction))
OS << " # Label: " << *Label;
MIB->printAnnotations(Instruction, OS);
diff --git a/bolt/lib/Core/BinaryEmitter.cpp b/bolt/lib/Core/BinaryEmitter.cpp
index d4b668c1d7e7..97d19b75200f 100644
--- a/bolt/lib/Core/BinaryEmitter.cpp
+++ b/bolt/lib/Core/BinaryEmitter.cpp
@@ -489,7 +489,7 @@ void BinaryEmitter::emitFunctionBody(BinaryFunction &BF, FunctionFragment &FF,
if (!EmitCodeOnly) {
// A symbol to be emitted before the instruction to mark its location.
- MCSymbol *InstrLabel = BC.MIB->getLabel(Instr);
+ MCSymbol *InstrLabel = BC.MIB->getInstLabel(Instr);
if (opts::UpdateDebugSections && BF.getDWARFUnit()) {
LastLocSeen = emitLineInfo(BF, Instr.getLoc(), LastLocSeen,
diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp
index 54f2f9d972a4..ce4dd29f542b 100644
--- a/bolt/lib/Core/BinaryFunction.cpp
+++ b/bolt/lib/Core/BinaryFunction.cpp
@@ -1424,7 +1424,7 @@ add_instruction:
InstrMapType::iterator II = Instructions.find(Offset);
assert(II != Instructions.end() && "reference to non-existing instruction");
- BC.MIB->setLabel(II->second, Label);
+ BC.MIB->setInstLabel(II->second, Label);
}
// Reset symbolizer for the disassembler.
@@ -1445,6 +1445,16 @@ add_instruction:
return Error::success();
}
+MCSymbol *BinaryFunction::registerBranch(uint64_t Src, uint64_t Dst) {
+ assert(CurrentState == State::Disassembled &&
+ "Cannot register branch unless function is in disassembled state.");
+ assert(containsAddress(Src) && containsAddress(Dst) &&
+ "Cannot register external branch.");
+ MCSymbol *Target = getOrCreateLocalLabel(Dst);
+ TakenBranches.emplace_back(Src - getAddress(), Dst - getAddress());
+ return Target;
+}
+
bool BinaryFunction::scanExternalRefs() {
bool Success = true;
bool DisassemblyFailed = false;
@@ -1759,13 +1769,6 @@ void BinaryFunction::postProcessJumpTables() {
}
}
}
-
- // Remove duplicates branches. We can get a bunch of them from jump tables.
- // Without doing jump table value profiling we don't have use for extra
- // (duplicate) branches.
- llvm::sort(TakenBranches);
- auto NewEnd = std::unique(TakenBranches.begin(), TakenBranches.end());
- TakenBranches.erase(NewEnd, TakenBranches.end());
}
bool BinaryFunction::validateExternallyReferencedOffsets() {
@@ -2128,6 +2131,13 @@ Error BinaryFunction::buildCFG(MCPlusBuilder::AllocatorIdTy AllocatorId) {
// e.g. exit(3), etc. Otherwise we'll see a false fall-through
// blocks.
+ // Remove duplicates branches. We can get a bunch of them from jump tables.
+ // Without doing jump table value profiling we don't have a use for extra
+ // (duplicate) branches.
+ llvm::sort(TakenBranches);
+ auto NewEnd = std::unique(TakenBranches.begin(), TakenBranches.end());
+ TakenBranches.erase(NewEnd, TakenBranches.end());
+
for (std::pair<uint32_t, uint32_t> &Branch : TakenBranches) {
LLVM_DEBUG(dbgs() << "registering branch [0x"
<< Twine::utohexstr(Branch.first) << "] -> [0x"
diff --git a/bolt/lib/Core/Exceptions.cpp b/bolt/lib/Core/Exceptions.cpp
index 54618aeb95cc..82bddf76d5b8 100644
--- a/bolt/lib/Core/Exceptions.cpp
+++ b/bolt/lib/Core/Exceptions.cpp
@@ -408,12 +408,11 @@ void BinaryFunction::updateEHRanges() {
// Same symbol is used for the beginning and the end of the range.
MCSymbol *EHSymbol;
- if (MCSymbol *InstrLabel = BC.MIB->getLabel(Instr)) {
+ if (MCSymbol *InstrLabel = BC.MIB->getInstLabel(Instr)) {
EHSymbol = InstrLabel;
} else {
std::unique_lock<llvm::sys::RWMutex> Lock(BC.CtxMutex);
- EHSymbol = BC.Ctx->createNamedTempSymbol("EH");
- BC.MIB->setLabel(Instr, EHSymbol);
+ EHSymbol = BC.MIB->getOrCreateInstLabel(Instr, "EH", BC.Ctx.get());
}
// At this point we could be in one of the following states:
diff --git a/bolt/lib/Core/MCPlusBuilder.cpp b/bolt/lib/Core/MCPlusBuilder.cpp
index 44e5f88d8950..bd9bd0c45922 100644
--- a/bolt/lib/Core/MCPlusBuilder.cpp
+++ b/bolt/lib/Core/MCPlusBuilder.cpp
@@ -12,6 +12,7 @@
#include "bolt/Core/MCPlusBuilder.h"
#include "bolt/Core/MCPlus.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrDesc.h"
@@ -266,17 +267,29 @@ bool MCPlusBuilder::clearOffset(MCInst &Inst) const {
return true;
}
-MCSymbol *MCPlusBuilder::getLabel(const MCInst &Inst) const {
+MCSymbol *MCPlusBuilder::getInstLabel(const MCInst &Inst) const {
if (std::optional<int64_t> Label =
getAnnotationOpValue(Inst, MCAnnotation::kLabel))
return reinterpret_cast<MCSymbol *>(*Label);
return nullptr;
}
-bool MCPlusBuilder::setLabel(MCInst &Inst, MCSymbol *Label) const {
+MCSymbol *MCPlusBuilder::getOrCreateInstLabel(MCInst &Inst, const Twine &Name,
+ MCContext *Ctx) const {
+ MCSymbol *Label = getInstLabel(Inst);
+ if (Label)
+ return Label;
+
+ Label = Ctx->createNamedTempSymbol(Name);
+ setAnnotationOpValue(Inst, MCAnnotation::kLabel,
+ reinterpret_cast<int64_t>(Label));
+ return Label;
+}
+
+void MCPlusBuilder::setInstLabel(MCInst &Inst, MCSymbol *Label) const {
+ assert(!getInstLabel(Inst) && "Instruction already has assigned label.");
setAnnotationOpValue(Inst, MCAnnotation::kLabel,
reinterpret_cast<int64_t>(Label));
- return true;
}
std::optional<uint32_t> MCPlusBuilder::getSize(const MCInst &Inst) const {
diff --git a/bolt/lib/Rewrite/LinuxKernelRewriter.cpp b/bolt/lib/Rewrite/LinuxKernelRewriter.cpp
index 6377c1197253..0d7dc1070ce7 100644
--- a/bolt/lib/Rewrite/LinuxKernelRewriter.cpp
+++ b/bolt/lib/Rewrite/LinuxKernelRewriter.cpp
@@ -770,11 +770,8 @@ Error LinuxKernelRewriter::rewriteORCTables() {
continue;
// Issue label for the instruction.
- MCSymbol *Label = BC.MIB->getLabel(Inst);
- if (!Label) {
- Label = BC.Ctx->createTempSymbol("__ORC_");
- BC.MIB->setLabel(Inst, Label);
- }
+ MCSymbol *Label =
+ BC.MIB->getOrCreateInstLabel(Inst, "__ORC_", BC.Ctx.get());
if (Error E = emitORCEntry(0, *ErrorOrState, Label))
return E;
@@ -908,11 +905,8 @@ Error LinuxKernelRewriter::readStaticCalls() {
BC.MIB->addAnnotation(*Inst, "StaticCall", EntryID);
- MCSymbol *Label = BC.MIB->getLabel(*Inst);
- if (!Label) {
- Label = BC.Ctx->createTempSymbol("__SC_");
- BC.MIB->setLabel(*Inst, Label);
- }
+ MCSymbol *Label =
+ BC.MIB->getOrCreateInstLabel(*Inst, "__SC_", BC.Ctx.get());
StaticCallEntries.push_back({EntryID, BF, Label});
}
diff --git a/bolt/test/runtime/instrument-wrong-target.s b/bolt/test/runtime/X86/instrument-wrong-target.s
index b25c924ffbcc..343d93a89ed1 100644
--- a/bolt/test/runtime/instrument-wrong-target.s
+++ b/bolt/test/runtime/X86/instrument-wrong-target.s
@@ -1,8 +1,8 @@
# Test that BOLT errs when trying to instrument a binary with a different
# architecture than the one BOLT is built for.
-# REQUIRES: x86_64-linux,bolt-runtime
-# REQUIRES: target-x86_64 && aarch64-registered-target
+# REQUIRES: system-linux,bolt-runtime
+# REQUIRES: aarch64-registered-target
# RUN: llvm-mc -triple aarch64 -filetype=obj %s -o %t.o
# RUN: ld.lld -q -pie -o %t.exe %t.o
diff --git a/clang-tools-extra/clang-tidy/modernize/CMakeLists.txt b/clang-tools-extra/clang-tidy/modernize/CMakeLists.txt
index 28ca52f46943..6852db6c2ee3 100644
--- a/clang-tools-extra/clang-tidy/modernize/CMakeLists.txt
+++ b/clang-tools-extra/clang-tidy/modernize/CMakeLists.txt
@@ -31,6 +31,7 @@ add_clang_library(clangTidyModernizeModule
UseBoolLiteralsCheck.cpp
UseConstraintsCheck.cpp
UseDefaultMemberInitCheck.cpp
+ UseDesignatedInitializersCheck.cpp
UseEmplaceCheck.cpp
UseEqualsDefaultCheck.cpp
UseEqualsDeleteCheck.cpp
diff --git a/clang-tools-extra/clang-tidy/modernize/ModernizeTidyModule.cpp b/clang-tools-extra/clang-tidy/modernize/ModernizeTidyModule.cpp
index 654f4bd0c6ba..e96cf274f58c 100644
--- a/clang-tools-extra/clang-tidy/modernize/ModernizeTidyModule.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/ModernizeTidyModule.cpp
@@ -32,6 +32,7 @@
#include "UseBoolLiteralsCheck.h"
#include "UseConstraintsCheck.h"
#include "UseDefaultMemberInitCheck.h"
+#include "UseDesignatedInitializersCheck.h"
#include "UseEmplaceCheck.h"
#include "UseEqualsDefaultCheck.h"
#include "UseEqualsDeleteCheck.h"
@@ -68,6 +69,8 @@ public:
CheckFactories.registerCheck<MakeSharedCheck>("modernize-make-shared");
CheckFactories.registerCheck<MakeUniqueCheck>("modernize-make-unique");
CheckFactories.registerCheck<PassByValueCheck>("modernize-pass-by-value");
+ CheckFactories.registerCheck<UseDesignatedInitializersCheck>(
+ "modernize-use-designated-initializers");
CheckFactories.registerCheck<UseStartsEndsWithCheck>(
"modernize-use-starts-ends-with");
CheckFactories.registerCheck<UseStdNumbersCheck>(
diff --git a/clang-tools-extra/clang-tidy/modernize/UseDesignatedInitializersCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseDesignatedInitializersCheck.cpp
new file mode 100644
index 000000000000..ebc5338d0a7b
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/modernize/UseDesignatedInitializersCheck.cpp
@@ -0,0 +1,184 @@
+//===--- UseDesignatedInitializersCheck.cpp - clang-tidy ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UseDesignatedInitializersCheck.h"
+#include "../utils/DesignatedInitializers.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Stmt.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/ASTMatchers/ASTMatchersMacros.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/Lexer.h"
+
+using namespace clang::ast_matchers;
+
+namespace clang::tidy::modernize {
+
+static constexpr char IgnoreSingleElementAggregatesName[] =
+ "IgnoreSingleElementAggregates";
+static constexpr bool IgnoreSingleElementAggregatesDefault = true;
+
+static constexpr char RestrictToPODTypesName[] = "RestrictToPODTypes";
+static constexpr bool RestrictToPODTypesDefault = false;
+
+static constexpr char IgnoreMacrosName[] = "IgnoreMacros";
+static constexpr bool IgnoreMacrosDefault = true;
+
+namespace {
+
+struct Designators {
+
+ Designators(const InitListExpr *InitList) : InitList(InitList) {
+ assert(InitList->isSyntacticForm());
+ };
+
+ unsigned size() { return getCached().size(); }
+
+ std::optional<llvm::StringRef> operator[](const SourceLocation &Location) {
+ const auto &Designators = getCached();
+ const auto Result = Designators.find(Location);
+ if (Result == Designators.end())
+ return {};
+ const llvm::StringRef Designator = Result->getSecond();
+ return (Designator.front() == '.' ? Designator.substr(1) : Designator)
+ .trim("\0"); // Trim NULL characters appearing on Windows in the
+ // name.
+ }
+
+private:
+ using LocationToNameMap = llvm::DenseMap<clang::SourceLocation, std::string>;
+
+ std::optional<LocationToNameMap> CachedDesignators;
+ const InitListExpr *InitList;
+
+ LocationToNameMap &getCached() {
+ return CachedDesignators ? *CachedDesignators
+ : CachedDesignators.emplace(
+ utils::getUnwrittenDesignators(InitList));
+ }
+};
+
+unsigned getNumberOfDesignated(const InitListExpr *SyntacticInitList) {
+ return llvm::count_if(*SyntacticInitList, [](auto *InitExpr) {
+ return isa<DesignatedInitExpr>(InitExpr);
+ });
+}
+
+AST_MATCHER(CXXRecordDecl, isAggregate) { return Node.isAggregate(); }
+
+AST_MATCHER(CXXRecordDecl, isPOD) { return Node.isPOD(); }
+
+AST_MATCHER(InitListExpr, isFullyDesignated) {
+ if (const InitListExpr *SyntacticForm =
+ Node.isSyntacticForm() ? &Node : Node.getSyntacticForm())
+ return getNumberOfDesignated(SyntacticForm) == SyntacticForm->getNumInits();
+ return true;
+}
+
+AST_MATCHER(InitListExpr, hasMoreThanOneElement) {
+ return Node.getNumInits() > 1;
+}
+
+} // namespace
+
+UseDesignatedInitializersCheck::UseDesignatedInitializersCheck(
+ StringRef Name, ClangTidyContext *Context)
+ : ClangTidyCheck(Name, Context), IgnoreSingleElementAggregates(Options.get(
+ IgnoreSingleElementAggregatesName,
+ IgnoreSingleElementAggregatesDefault)),
+ RestrictToPODTypes(
+ Options.get(RestrictToPODTypesName, RestrictToPODTypesDefault)),
+ IgnoreMacros(
+ Options.getLocalOrGlobal(IgnoreMacrosName, IgnoreMacrosDefault)) {}
+
+void UseDesignatedInitializersCheck::registerMatchers(MatchFinder *Finder) {
+ const auto HasBaseWithFields =
+ hasAnyBase(hasType(cxxRecordDecl(has(fieldDecl()))));
+ Finder->addMatcher(
+ initListExpr(
+ hasType(cxxRecordDecl(RestrictToPODTypes ? isPOD() : isAggregate(),
+ unless(HasBaseWithFields))
+ .bind("type")),
+ IgnoreSingleElementAggregates ? hasMoreThanOneElement() : anything(),
+ unless(isFullyDesignated()))
+ .bind("init"),
+ this);
+}
+
+void UseDesignatedInitializersCheck::check(
+ const MatchFinder::MatchResult &Result) {
+ const auto *InitList = Result.Nodes.getNodeAs<InitListExpr>("init");
+ const auto *Type = Result.Nodes.getNodeAs<CXXRecordDecl>("type");
+ if (!Type || !InitList)
+ return;
+ const auto *SyntacticInitList = InitList->getSyntacticForm();
+ if (!SyntacticInitList)
+ return;
+ Designators Designators{SyntacticInitList};
+ const unsigned NumberOfDesignated = getNumberOfDesignated(SyntacticInitList);
+ if (SyntacticInitList->getNumInits() - NumberOfDesignated >
+ Designators.size())
+ return;
+
+ // If the whole initializer list is un-designated, issue only one warning and
+ // a single fix-it for the whole expression.
+ if (0 == NumberOfDesignated) {
+ if (IgnoreMacros && InitList->getBeginLoc().isMacroID())
+ return;
+ {
+ DiagnosticBuilder Diag =
+ diag(InitList->getLBraceLoc(),
+ "use designated initializer list to initialize %0");
+ Diag << Type << InitList->getSourceRange();
+ for (const Stmt *InitExpr : *SyntacticInitList) {
+ const auto Designator = Designators[InitExpr->getBeginLoc()];
+ if (Designator && !Designator->empty())
+ Diag << FixItHint::CreateInsertion(InitExpr->getBeginLoc(),
+ ("." + *Designator + "=").str());
+ }
+ }
+ diag(Type->getBeginLoc(), "aggregate type is defined here",
+ DiagnosticIDs::Note);
+ return;
+ }
+
+ // In case that only a few elements are un-designated (not all as before), the
+ // check offers dedicated issues and fix-its for each of them.
+ for (const auto *InitExpr : *SyntacticInitList) {
+ if (isa<DesignatedInitExpr>(InitExpr))
+ continue;
+ if (IgnoreMacros && InitExpr->getBeginLoc().isMacroID())
+ continue;
+ const auto Designator = Designators[InitExpr->getBeginLoc()];
+ if (!Designator || Designator->empty()) {
+ // There should always be a designator. If there's unexpectedly none, we
+ // at least report a generic diagnostic.
+ diag(InitExpr->getBeginLoc(), "use designated init expression")
+ << InitExpr->getSourceRange();
+ } else {
+ diag(InitExpr->getBeginLoc(),
+ "use designated init expression to initialize field '%0'")
+ << InitExpr->getSourceRange() << *Designator
+ << FixItHint::CreateInsertion(InitExpr->getBeginLoc(),
+ ("." + *Designator + "=").str());
+ }
+ }
+}
+
+void UseDesignatedInitializersCheck::storeOptions(
+ ClangTidyOptions::OptionMap &Opts) {
+ Options.store(Opts, IgnoreSingleElementAggregatesName,
+ IgnoreSingleElementAggregates);
+ Options.store(Opts, RestrictToPODTypesName, RestrictToPODTypes);
+ Options.store(Opts, IgnoreMacrosName, IgnoreMacros);
+}
+
+} // namespace clang::tidy::modernize
diff --git a/clang-tools-extra/clang-tidy/modernize/UseDesignatedInitializersCheck.h b/clang-tools-extra/clang-tidy/modernize/UseDesignatedInitializersCheck.h
new file mode 100644
index 000000000000..0a496f51b957
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/modernize/UseDesignatedInitializersCheck.h
@@ -0,0 +1,40 @@
+//===--- UseDesignatedInitializersCheck.h - clang-tidy ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MODERNIZE_USEDESIGNATEDINITIALIZERSCHECK_H
+#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MODERNIZE_USEDESIGNATEDINITIALIZERSCHECK_H
+
+#include "../ClangTidyCheck.h"
+
+namespace clang::tidy::modernize {
+
+/// Finds initializer lists for aggregate type that could be
+/// written as designated initializers instead.
+///
+/// For the user-facing documentation see:
+/// http://clang.llvm.org/extra/clang-tidy/checks/modernize/use-designated-initializers.html
+class UseDesignatedInitializersCheck : public ClangTidyCheck {
+public:
+ UseDesignatedInitializersCheck(StringRef Name, ClangTidyContext *Context);
+ void registerMatchers(ast_matchers::MatchFinder *Finder) override;
+ void check(const ast_matchers::MatchFinder::MatchResult &Result) override;
+ void storeOptions(ClangTidyOptions::OptionMap &Opts) override;
+
+ std::optional<TraversalKind> getCheckTraversalKind() const override {
+ return TK_IgnoreUnlessSpelledInSource;
+ }
+
+private:
+ bool IgnoreSingleElementAggregates;
+ bool RestrictToPODTypes;
+ bool IgnoreMacros;
+};
+
+} // namespace clang::tidy::modernize
+
+#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MODERNIZE_USEDESIGNATEDINITIALIZERSCHECK_H
diff --git a/clang-tools-extra/clang-tidy/utils/CMakeLists.txt b/clang-tools-extra/clang-tidy/utils/CMakeLists.txt
index 88638d4acd55..f0160fa9df74 100644
--- a/clang-tools-extra/clang-tidy/utils/CMakeLists.txt
+++ b/clang-tools-extra/clang-tidy/utils/CMakeLists.txt
@@ -7,6 +7,7 @@ add_clang_library(clangTidyUtils
Aliasing.cpp
ASTUtils.cpp
DeclRefExprUtils.cpp
+ DesignatedInitializers.cpp
ExceptionAnalyzer.cpp
ExceptionSpecAnalyzer.cpp
ExprSequence.cpp
diff --git a/clang-tools-extra/clang-tidy/utils/DesignatedInitializers.cpp b/clang-tools-extra/clang-tidy/utils/DesignatedInitializers.cpp
new file mode 100644
index 000000000000..6faeb7a0b76e
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/utils/DesignatedInitializers.cpp
@@ -0,0 +1,195 @@
+//===--- DesignatedInitializers.cpp - clang-tidy --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides utilities for designated initializers.
+///
+//===----------------------------------------------------------------------===//
+
+#include "DesignatedInitializers.h"
+#include "clang/AST/DeclCXX.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/ScopeExit.h"
+
+namespace clang::tidy::utils {
+
+namespace {
+
+/// Returns true if Name is reserved, like _Foo or __Vector_base.
+static inline bool isReservedName(llvm::StringRef Name) {
+ // This doesn't catch all cases, but the most common.
+ return Name.size() >= 2 && Name[0] == '_' &&
+ (isUppercase(Name[1]) || Name[1] == '_');
+}
+
+// Helper class to iterate over the designator names of an aggregate type.
+//
+// For an array type, yields [0], [1], [2]...
+// For aggregate classes, yields null for each base, then .field1, .field2,
+// ...
+class AggregateDesignatorNames {
+public:
+ AggregateDesignatorNames(QualType T) {
+ if (!T.isNull()) {
+ T = T.getCanonicalType();
+ if (T->isArrayType()) {
+ IsArray = true;
+ Valid = true;
+ return;
+ }
+ if (const RecordDecl *RD = T->getAsRecordDecl()) {
+ Valid = true;
+ FieldsIt = RD->field_begin();
+ FieldsEnd = RD->field_end();
+ if (const auto *CRD = llvm::dyn_cast<CXXRecordDecl>(RD)) {
+ BasesIt = CRD->bases_begin();
+ BasesEnd = CRD->bases_end();
+ Valid = CRD->isAggregate();
+ }
+ OneField = Valid && BasesIt == BasesEnd && FieldsIt != FieldsEnd &&
+ std::next(FieldsIt) == FieldsEnd;
+ }
+ }
+ }
+ // Returns false if the type was not an aggregate.
+ operator bool() { return Valid; }
+ // Advance to the next element in the aggregate.
+ void next() {
+ if (IsArray)
+ ++Index;
+ else if (BasesIt != BasesEnd)
+ ++BasesIt;
+ else if (FieldsIt != FieldsEnd)
+ ++FieldsIt;
+ }
+ // Print the designator to Out.
+ // Returns false if we could not produce a designator for this element.
+ bool append(std::string &Out, bool ForSubobject) {
+ if (IsArray) {
+ Out.push_back('[');
+ Out.append(std::to_string(Index));
+ Out.push_back(']');
+ return true;
+ }
+ if (BasesIt != BasesEnd)
+ return false; // Bases can't be designated. Should we make one up?
+ if (FieldsIt != FieldsEnd) {
+ llvm::StringRef FieldName;
+ if (const IdentifierInfo *II = FieldsIt->getIdentifier())
+ FieldName = II->getName();
+
+ // For certain objects, their subobjects may be named directly.
+ if (ForSubobject &&
+ (FieldsIt->isAnonymousStructOrUnion() ||
+ // std::array<int,3> x = {1,2,3}. Designators not strictly valid!
+ (OneField && isReservedName(FieldName))))
+ return true;
+
+ if (!FieldName.empty() && !isReservedName(FieldName)) {
+ Out.push_back('.');
+ Out.append(FieldName.begin(), FieldName.end());
+ return true;
+ }
+ return false;
+ }
+ return false;
+ }
+
+private:
+ bool Valid = false;
+ bool IsArray = false;
+ bool OneField = false; // e.g. std::array { T __elements[N]; }
+ unsigned Index = 0;
+ CXXRecordDecl::base_class_const_iterator BasesIt;
+ CXXRecordDecl::base_class_const_iterator BasesEnd;
+ RecordDecl::field_iterator FieldsIt;
+ RecordDecl::field_iterator FieldsEnd;
+};
+
+// Collect designator labels describing the elements of an init list.
+//
+// This function contributes the designators of some (sub)object, which is
+// represented by the semantic InitListExpr Sem.
+// This includes any nested subobjects, but *only* if they are part of the
+// same original syntactic init list (due to brace elision). In other words,
+// it may descend into subobjects but not written init-lists.
+//
+// For example: struct Outer { Inner a,b; }; struct Inner { int x, y; }
+// Outer o{{1, 2}, 3};
+// This function will be called with Sem = { {1, 2}, {3, ImplicitValue} }
+// It should generate designators '.a:' and '.b.x:'.
+// '.a:' is produced directly without recursing into the written sublist.
+// (The written sublist will have a separate collectDesignators() call later).
+// Recursion with Prefix='.b' and Sem = {3, ImplicitValue} produces '.b.x:'.
+void collectDesignators(const InitListExpr *Sem,
+ llvm::DenseMap<SourceLocation, std::string> &Out,
+ const llvm::DenseSet<SourceLocation> &NestedBraces,
+ std::string &Prefix) {
+ if (!Sem || Sem->isTransparent())
+ return;
+ assert(Sem->isSemanticForm());
+
+ // The elements of the semantic form all correspond to direct subobjects of
+ // the aggregate type. `Fields` iterates over these subobject names.
+ AggregateDesignatorNames Fields(Sem->getType());
+ if (!Fields)
+ return;
+ for (const Expr *Init : Sem->inits()) {
+ auto Next = llvm::make_scope_exit([&, Size(Prefix.size())] {
+ Fields.next(); // Always advance to the next subobject name.
+ Prefix.resize(Size); // Erase any designator we appended.
+ });
+ // Skip for a broken initializer or if it is a "hole" in a subobject that
+ // was not explicitly initialized.
+ if (!Init || llvm::isa<ImplicitValueInitExpr>(Init))
+ continue;
+
+ const auto *BraceElidedSubobject = llvm::dyn_cast<InitListExpr>(Init);
+ if (BraceElidedSubobject &&
+ NestedBraces.contains(BraceElidedSubobject->getLBraceLoc()))
+ BraceElidedSubobject = nullptr; // there were braces!
+
+ if (!Fields.append(Prefix, BraceElidedSubobject != nullptr))
+ continue; // no designator available for this subobject
+ if (BraceElidedSubobject) {
+ // If the braces were elided, this aggregate subobject is initialized
+ // inline in the same syntactic list.
+ // Descend into the semantic list describing the subobject.
+ // (NestedBraces are still correct, they're from the same syntactic
+ // list).
+ collectDesignators(BraceElidedSubobject, Out, NestedBraces, Prefix);
+ continue;
+ }
+ Out.try_emplace(Init->getBeginLoc(), Prefix);
+ }
+}
+
+} // namespace
+
+llvm::DenseMap<SourceLocation, std::string>
+getUnwrittenDesignators(const InitListExpr *Syn) {
+ assert(Syn->isSyntacticForm());
+
+ // collectDesignators needs to know which InitListExprs in the semantic tree
+ // were actually written, but InitListExpr::isExplicit() lies.
+ // Instead, record where braces of sub-init-lists occur in the syntactic form.
+ llvm::DenseSet<SourceLocation> NestedBraces;
+ for (const Expr *Init : Syn->inits())
+ if (auto *Nested = llvm::dyn_cast<InitListExpr>(Init))
+ NestedBraces.insert(Nested->getLBraceLoc());
+
+ // Traverse the semantic form to find the designators.
+ // We use their SourceLocation to correlate with the syntactic form later.
+ llvm::DenseMap<SourceLocation, std::string> Designators;
+ std::string EmptyPrefix;
+ collectDesignators(Syn->isSemanticForm() ? Syn : Syn->getSemanticForm(),
+ Designators, NestedBraces, EmptyPrefix);
+ return Designators;
+}
+
+} // namespace clang::tidy::utils
diff --git a/clang-tools-extra/clang-tidy/utils/DesignatedInitializers.h b/clang-tools-extra/clang-tidy/utils/DesignatedInitializers.h
new file mode 100644
index 000000000000..a6cb2963faf7
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/utils/DesignatedInitializers.h
@@ -0,0 +1,42 @@
+//===--- DesignatedInitializers.h - clang-tidy ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides utilities for designated initializers.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang::tidy::utils {
+
+/// Get designators describing the elements of a (syntactic) init list.
+///
+/// Given for example the type
+/// \code
+/// struct S { int i, j; };
+/// \endcode
+/// and the definition
+/// \code
+/// S s{1, 2};
+/// \endcode
+/// calling `getUnwrittenDesignators` for the initializer list expression
+/// `{1, 2}` would produce the map `{loc(1): ".i", loc(2): ".j"}`.
+///
+/// It does not produce designators for any explicitly-written nested lists,
+/// e.g. `{1, .j=2}` would only return `{loc(1): ".i"}`.
+///
+/// It also considers structs with fields of record types like
+/// `struct T { S s; };`. In this case, there would be designators of the
+/// form `.s.i` and `.s.j` in the returned map.
+llvm::DenseMap<clang::SourceLocation, std::string>
+getUnwrittenDesignators(const clang::InitListExpr *Syn);
+
+} // namespace clang::tidy::utils
diff --git a/clang-tools-extra/clangd/CMakeLists.txt b/clang-tools-extra/clangd/CMakeLists.txt
index 3911fb6c6c74..f49704157880 100644
--- a/clang-tools-extra/clangd/CMakeLists.txt
+++ b/clang-tools-extra/clangd/CMakeLists.txt
@@ -182,6 +182,7 @@ target_link_libraries(clangDaemon
clangIncludeCleaner
clangPseudo
clangTidy
+ clangTidyUtils
clangdSupport
)
diff --git a/clang-tools-extra/clangd/InlayHints.cpp b/clang-tools-extra/clangd/InlayHints.cpp
index 671a9c30ffa9..a0ebc631ef82 100644
--- a/clang-tools-extra/clangd/InlayHints.cpp
+++ b/clang-tools-extra/clangd/InlayHints.cpp
@@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//
#include "InlayHints.h"
+#include "../clang-tidy/utils/DesignatedInitializers.h"
#include "AST.h"
#include "Config.h"
#include "HeuristicResolver.h"
@@ -24,7 +25,6 @@
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
@@ -42,169 +42,6 @@ namespace {
// For now, inlay hints are always anchored at the left or right of their range.
enum class HintSide { Left, Right };
-// Helper class to iterate over the designator names of an aggregate type.
-//
-// For an array type, yields [0], [1], [2]...
-// For aggregate classes, yields null for each base, then .field1, .field2, ...
-class AggregateDesignatorNames {
-public:
- AggregateDesignatorNames(QualType T) {
- if (!T.isNull()) {
- T = T.getCanonicalType();
- if (T->isArrayType()) {
- IsArray = true;
- Valid = true;
- return;
- }
- if (const RecordDecl *RD = T->getAsRecordDecl()) {
- Valid = true;
- FieldsIt = RD->field_begin();
- FieldsEnd = RD->field_end();
- if (const auto *CRD = llvm::dyn_cast<CXXRecordDecl>(RD)) {
- BasesIt = CRD->bases_begin();
- BasesEnd = CRD->bases_end();
- Valid = CRD->isAggregate();
- }
- OneField = Valid && BasesIt == BasesEnd && FieldsIt != FieldsEnd &&
- std::next(FieldsIt) == FieldsEnd;
- }
- }
- }
- // Returns false if the type was not an aggregate.
- operator bool() { return Valid; }
- // Advance to the next element in the aggregate.
- void next() {
- if (IsArray)
- ++Index;
- else if (BasesIt != BasesEnd)
- ++BasesIt;
- else if (FieldsIt != FieldsEnd)
- ++FieldsIt;
- }
- // Print the designator to Out.
- // Returns false if we could not produce a designator for this element.
- bool append(std::string &Out, bool ForSubobject) {
- if (IsArray) {
- Out.push_back('[');
- Out.append(std::to_string(Index));
- Out.push_back(']');
- return true;
- }
- if (BasesIt != BasesEnd)
- return false; // Bases can't be designated. Should we make one up?
- if (FieldsIt != FieldsEnd) {
- llvm::StringRef FieldName;
- if (const IdentifierInfo *II = FieldsIt->getIdentifier())
- FieldName = II->getName();
-
- // For certain objects, their subobjects may be named directly.
- if (ForSubobject &&
- (FieldsIt->isAnonymousStructOrUnion() ||
- // std::array<int,3> x = {1,2,3}. Designators not strictly valid!
- (OneField && isReservedName(FieldName))))
- return true;
-
- if (!FieldName.empty() && !isReservedName(FieldName)) {
- Out.push_back('.');
- Out.append(FieldName.begin(), FieldName.end());
- return true;
- }
- return false;
- }
- return false;
- }
-
-private:
- bool Valid = false;
- bool IsArray = false;
- bool OneField = false; // e.g. std::array { T __elements[N]; }
- unsigned Index = 0;
- CXXRecordDecl::base_class_const_iterator BasesIt;
- CXXRecordDecl::base_class_const_iterator BasesEnd;
- RecordDecl::field_iterator FieldsIt;
- RecordDecl::field_iterator FieldsEnd;
-};
-
-// Collect designator labels describing the elements of an init list.
-//
-// This function contributes the designators of some (sub)object, which is
-// represented by the semantic InitListExpr Sem.
-// This includes any nested subobjects, but *only* if they are part of the same
-// original syntactic init list (due to brace elision).
-// In other words, it may descend into subobjects but not written init-lists.
-//
-// For example: struct Outer { Inner a,b; }; struct Inner { int x, y; }
-// Outer o{{1, 2}, 3};
-// This function will be called with Sem = { {1, 2}, {3, ImplicitValue} }
-// It should generate designators '.a:' and '.b.x:'.
-// '.a:' is produced directly without recursing into the written sublist.
-// (The written sublist will have a separate collectDesignators() call later).
-// Recursion with Prefix='.b' and Sem = {3, ImplicitValue} produces '.b.x:'.
-void collectDesignators(const InitListExpr *Sem,
- llvm::DenseMap<SourceLocation, std::string> &Out,
- const llvm::DenseSet<SourceLocation> &NestedBraces,
- std::string &Prefix) {
- if (!Sem || Sem->isTransparent())
- return;
- assert(Sem->isSemanticForm());
-
- // The elements of the semantic form all correspond to direct subobjects of
- // the aggregate type. `Fields` iterates over these subobject names.
- AggregateDesignatorNames Fields(Sem->getType());
- if (!Fields)
- return;
- for (const Expr *Init : Sem->inits()) {
- auto Next = llvm::make_scope_exit([&, Size(Prefix.size())] {
- Fields.next(); // Always advance to the next subobject name.
- Prefix.resize(Size); // Erase any designator we appended.
- });
- // Skip for a broken initializer or if it is a "hole" in a subobject that
- // was not explicitly initialized.
- if (!Init || llvm::isa<ImplicitValueInitExpr>(Init))
- continue;
-
- const auto *BraceElidedSubobject = llvm::dyn_cast<InitListExpr>(Init);
- if (BraceElidedSubobject &&
- NestedBraces.contains(BraceElidedSubobject->getLBraceLoc()))
- BraceElidedSubobject = nullptr; // there were braces!
-
- if (!Fields.append(Prefix, BraceElidedSubobject != nullptr))
- continue; // no designator available for this subobject
- if (BraceElidedSubobject) {
- // If the braces were elided, this aggregate subobject is initialized
- // inline in the same syntactic list.
- // Descend into the semantic list describing the subobject.
- // (NestedBraces are still correct, they're from the same syntactic list).
- collectDesignators(BraceElidedSubobject, Out, NestedBraces, Prefix);
- continue;
- }
- Out.try_emplace(Init->getBeginLoc(), Prefix);
- }
-}
-
-// Get designators describing the elements of a (syntactic) init list.
-// This does not produce designators for any explicitly-written nested lists.
-llvm::DenseMap<SourceLocation, std::string>
-getDesignators(const InitListExpr *Syn) {
- assert(Syn->isSyntacticForm());
-
- // collectDesignators needs to know which InitListExprs in the semantic tree
- // were actually written, but InitListExpr::isExplicit() lies.
- // Instead, record where braces of sub-init-lists occur in the syntactic form.
- llvm::DenseSet<SourceLocation> NestedBraces;
- for (const Expr *Init : Syn->inits())
- if (auto *Nested = llvm::dyn_cast<InitListExpr>(Init))
- NestedBraces.insert(Nested->getLBraceLoc());
-
- // Traverse the semantic form to find the designators.
- // We use their SourceLocation to correlate with the syntactic form later.
- llvm::DenseMap<SourceLocation, std::string> Designators;
- std::string EmptyPrefix;
- collectDesignators(Syn->isSemanticForm() ? Syn : Syn->getSemanticForm(),
- Designators, NestedBraces, EmptyPrefix);
- return Designators;
-}
-
void stripLeadingUnderscores(StringRef &Name) { Name = Name.ltrim('_'); }
// getDeclForType() returns the decl responsible for Type's spelling.
@@ -847,14 +684,15 @@ public:
// This is the one we will ultimately attach designators to.
// It may have subobject initializers inlined without braces. The *semantic*
// form of the init-list has nested init-lists for these.
- // getDesignators will look at the semantic form to determine the labels.
+ // getUnwrittenDesignators will look at the semantic form to determine the
+ // labels.
assert(Syn->isSyntacticForm() && "RAV should not visit implicit code!");
if (!Cfg.InlayHints.Designators)
return true;
if (Syn->isIdiomaticZeroInitializer(AST.getLangOpts()))
return true;
llvm::DenseMap<SourceLocation, std::string> Designators =
- getDesignators(Syn);
+ tidy::utils::getUnwrittenDesignators(Syn);
for (const Expr *Init : Syn->inits()) {
if (llvm::isa<DesignatedInitExpr>(Init))
continue;
diff --git a/clang-tools-extra/clangd/tool/CMakeLists.txt b/clang-tools-extra/clangd/tool/CMakeLists.txt
index 6c21175d7687..4012b6401c00 100644
--- a/clang-tools-extra/clangd/tool/CMakeLists.txt
+++ b/clang-tools-extra/clangd/tool/CMakeLists.txt
@@ -33,6 +33,7 @@ clang_target_link_libraries(clangdMain
target_link_libraries(clangdMain
PRIVATE
clangTidy
+ clangTidyUtils
clangDaemon
clangdRemoteIndex
diff --git a/clang-tools-extra/clangd/unittests/CMakeLists.txt b/clang-tools-extra/clangd/unittests/CMakeLists.txt
index 9cd195eaf164..e432db8d0912 100644
--- a/clang-tools-extra/clangd/unittests/CMakeLists.txt
+++ b/clang-tools-extra/clangd/unittests/CMakeLists.txt
@@ -175,6 +175,7 @@ target_link_libraries(ClangdTests
clangIncludeCleaner
clangTesting
clangTidy
+ clangTidyUtils
clangdSupport
)
diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst
index 3f90e7d63d6b..5bae530e9423 100644
--- a/clang-tools-extra/docs/ReleaseNotes.rst
+++ b/clang-tools-extra/docs/ReleaseNotes.rst
@@ -104,6 +104,12 @@ Improvements to clang-tidy
New checks
^^^^^^^^^^
+- New :doc:`modernize-use-designated-initializers
+ <clang-tidy/checks/modernize/use-designated-initializers>` check.
+
+ Finds initializer lists for aggregate types that could be
+ written as designated initializers instead.
+
- New :doc:`readability-use-std-min-max
<clang-tidy/checks/readability/use-std-min-max>` check.
diff --git a/clang-tools-extra/docs/clang-tidy/checks/list.rst b/clang-tools-extra/docs/clang-tidy/checks/list.rst
index 59ef69f390ee..5e57bc0ee483 100644
--- a/clang-tools-extra/docs/clang-tidy/checks/list.rst
+++ b/clang-tools-extra/docs/clang-tidy/checks/list.rst
@@ -287,6 +287,7 @@ Clang-Tidy Checks
:doc:`modernize-use-bool-literals <modernize/use-bool-literals>`, "Yes"
:doc:`modernize-use-constraints <modernize/use-constraints>`, "Yes"
:doc:`modernize-use-default-member-init <modernize/use-default-member-init>`, "Yes"
+ :doc:`modernize-use-designated-initializers <modernize/use-designated-initializers>`, "Yes"
:doc:`modernize-use-emplace <modernize/use-emplace>`, "Yes"
:doc:`modernize-use-equals-default <modernize/use-equals-default>`, "Yes"
:doc:`modernize-use-equals-delete <modernize/use-equals-delete>`, "Yes"
diff --git a/clang-tools-extra/docs/clang-tidy/checks/modernize/use-designated-initializers.rst b/clang-tools-extra/docs/clang-tidy/checks/modernize/use-designated-initializers.rst
new file mode 100644
index 000000000000..22f50980baad
--- /dev/null
+++ b/clang-tools-extra/docs/clang-tidy/checks/modernize/use-designated-initializers.rst
@@ -0,0 +1,62 @@
+.. title:: clang-tidy - modernize-use-designated-initializers
+
+modernize-use-designated-initializers
+=====================================
+
+Finds initializer lists for aggregate types which could be written as designated
+initializers instead.
+
+With plain initializer lists, it is very easy to introduce bugs when adding new
+fields in the middle of a struct or class type. The same confusion might arise
+when changing the order of fields.
+
+C++20 supports the designated initializer syntax for aggregate types. By
+applying it, we can always be sure that aggregates are constructed correctly,
+because every variable being initialized is referenced by its name.
+
+Example:
+
+.. code-block::
+
+ struct S { int i, j; };
+
+is an aggregate type that should be initialized as
+
+.. code-block::
+
+ S s{.i = 1, .j = 2};
+
+instead of
+
+.. code-block::
+
+ S s{1, 2};
+
+which could easily become an issue when ``i`` and ``j`` are swapped in the
+declaration of ``S``.
+
+Even when compiling in a language version older than C++20, depending on your
+compiler, designated initializers are potentially supported. Therefore, the
+check is not restricted to C++20 and newer versions. Check out the options
+``-Wc99-designator`` to get support for mixed designators in initializer list in
+C and ``-Wc++20-designator`` for support of designated initializers in older C++
+language modes.
+
+Options
+-------
+
+.. option:: IgnoreMacros
+
+ The value `false` specifies that components of initializer lists expanded from
+ macros are not checked. The default value is `true`.
+
+.. option:: IgnoreSingleElementAggregates
+
+ The value `false` specifies that even initializers for aggregate types with
+ only a single element should be checked. The default value is `true`.
+
+.. option:: RestrictToPODTypes
+
+ The value `true` specifies that only Plain Old Data (POD) types shall be
+ checked. This makes the check applicable to even older C++ standards. The
+ default value is `false`.
diff --git a/clang-tools-extra/include-cleaner/lib/WalkAST.cpp b/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
index 277e6ec5b089..878067aca017 100644
--- a/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
+++ b/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
@@ -228,6 +228,11 @@ public:
// Mark declaration from definition as it needs type-checking.
if (FD->isThisDeclarationADefinition())
report(FD->getLocation(), FD);
+ // Explicit specializaiton/instantiations of a function template requires
+ // primary template.
+ if (clang::isTemplateExplicitInstantiationOrSpecialization(
+ FD->getTemplateSpecializationKind()))
+ report(FD->getLocation(), FD->getPrimaryTemplate());
return true;
}
bool VisitVarDecl(VarDecl *VD) {
diff --git a/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp b/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp
index e238dc3d902b..5dc88157e13a 100644
--- a/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp
+++ b/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp
@@ -229,13 +229,9 @@ TEST(WalkAST, FunctionTemplates) {
EXPECT_THAT(testWalk("template<typename T> void foo(T) {}",
"template void ^foo<int>(int);"),
ElementsAre());
- // FIXME: Report specialized template as used from explicit specializations.
- EXPECT_THAT(testWalk("template<typename T> void foo(T);",
+ EXPECT_THAT(testWalk("template<typename T> void $explicit^foo(T);",
"template<> void ^foo<int>(int);"),
- ElementsAre());
- EXPECT_THAT(testWalk("template<typename T> void foo(T) {}",
- "template<typename T> void ^foo(T*) {}"),
- ElementsAre());
+ ElementsAre(Decl::FunctionTemplate));
// Implicit instantiations references most relevant template.
EXPECT_THAT(testWalk(R"cpp(
@@ -510,6 +506,8 @@ TEST(WalkAST, Functions) {
// Definition uses declaration, not the other way around.
testWalk("void $explicit^foo();", "void ^foo() {}");
testWalk("void foo() {}", "void ^foo();");
+ testWalk("template <typename> void $explicit^foo();",
+ "template <typename> void ^foo() {}");
// Unresolved calls marks all the overloads.
testWalk("void $ambiguous^foo(int); void $ambiguous^foo(char);",
diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-designated-initializers.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-designated-initializers.cpp
new file mode 100644
index 000000000000..7e5c26e3f440
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-designated-initializers.cpp
@@ -0,0 +1,203 @@
+// RUN: %check_clang_tidy -std=c++17 %s modernize-use-designated-initializers %t \
+// RUN: -- \
+// RUN: -- -fno-delayed-template-parsing
+// RUN: %check_clang_tidy -check-suffixes=,SINGLE-ELEMENT -std=c++17 %s modernize-use-designated-initializers %t \
+// RUN: -- -config="{CheckOptions: {modernize-use-designated-initializers.IgnoreSingleElementAggregates: false}}" \
+// RUN: -- -fno-delayed-template-parsing
+// RUN: %check_clang_tidy -check-suffixes=POD -std=c++17 %s modernize-use-designated-initializers %t \
+// RUN: -- -config="{CheckOptions: {modernize-use-designated-initializers.RestrictToPODTypes: true}}" \
+// RUN: -- -fno-delayed-template-parsing
+// RUN: %check_clang_tidy -check-suffixes=,MACROS -std=c++17 %s modernize-use-designated-initializers %t \
+// RUN: -- -config="{CheckOptions: {modernize-use-designated-initializers.IgnoreMacros: false}}" \
+// RUN: -- -fno-delayed-template-parsing
+
+struct S1 {};
+
+S1 s11{};
+S1 s12 = {};
+S1 s13();
+S1 s14;
+
+struct S2 { int i, j; };
+
+S2 s21{.i=1, .j =2};
+
+S2 s22 = {1, 2};
+// CHECK-MESSAGES: :[[@LINE-1]]:10: warning: use designated initializer list to initialize 'S2' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-6]]:1: note: aggregate type is defined here
+// CHECK-MESSAGES-POD: :[[@LINE-3]]:10: warning: use designated initializer list to initialize 'S2' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-8]]:1: note: aggregate type is defined here
+// CHECK-FIXES: S2 s22 = {.i=1, .j=2};
+
+S2 s23{1};
+// CHECK-MESSAGES: :[[@LINE-1]]:7: warning: use designated initializer list to initialize 'S2' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-13]]:1: note: aggregate type is defined here
+// CHECK-MESSAGES-POD: :[[@LINE-3]]:7: warning: use designated initializer list to initialize 'S2' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-15]]:1: note: aggregate type is defined here
+// CHECK-FIXES: S2 s23{.i=1};
+
+S2 s24{.i = 1};
+
+S2 s25 = {.i=1, 2};
+// CHECK-MESSAGES: :[[@LINE-1]]:17: warning: use designated init expression to initialize field 'j' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-2]]:17: warning: use designated init expression to initialize field 'j' [modernize-use-designated-initializers]
+// CHECK-FIXES: S2 s25 = {.i=1, .j=2};
+
+class S3 {
+ public:
+ S2 s2;
+ double d;
+};
+
+S3 s31 = {.s2 = 1, 2, 3.1};
+// CHECK-MESSAGES: :[[@LINE-1]]:20: warning: use designated init expression to initialize field 's2.j' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-2]]:23: warning: use designated init expression to initialize field 'd' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-3]]:20: warning: use designated init expression to initialize field 's2.j' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-4]]:23: warning: use designated init expression to initialize field 'd' [modernize-use-designated-initializers]
+// CHECK-FIXES: S3 s31 = {.s2 = 1, .s2.j=2, .d=3.1};
+
+S3 s32 = {{.i = 1, 2}};
+// CHECK-MESSAGES: :[[@LINE-1]]:10: warning: use designated initializer list to initialize 'S3' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-15]]:1: note: aggregate type is defined here
+// CHECK-MESSAGES: :[[@LINE-3]]:20: warning: use designated init expression to initialize field 'j' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-4]]:10: warning: use designated initializer list to initialize 'S3' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-18]]:1: note: aggregate type is defined here
+// CHECK-MESSAGES-POD: :[[@LINE-6]]:20: warning: use designated init expression to initialize field 'j' [modernize-use-designated-initializers]
+// CHECK-FIXES: S3 s32 = {.s2={.i = 1, .j=2}};
+
+S3 s33 = {{2}, .d=3.1};
+// CHECK-MESSAGES: :[[@LINE-1]]:11: warning: use designated init expression to initialize field 's2' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-2]]:11: warning: use designated initializer list to initialize 'S2' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-50]]:1: note: aggregate type is defined here
+// CHECK-MESSAGES-POD: :[[@LINE-4]]:11: warning: use designated init expression to initialize field 's2' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-5]]:11: warning: use designated initializer list to initialize 'S2' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-53]]:1: note: aggregate type is defined here
+// CHECK-FIXES: S3 s33 = {.s2={.i=2}, .d=3.1};
+
+struct S4 {
+ double d;
+ private: static int i;
+};
+
+S4 s41 {2.2};
+// CHECK-MESSAGES-SINGLE-ELEMENT: :[[@LINE-1]]:8: warning: use designated initializer list to initialize 'S4' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-SINGLE-ELEMENT: :[[@LINE-7]]:1: note: aggregate type is defined here
+// CHECK-FIXES-SINGLE-ELEMENT: S4 s41 {.d=2.2};
+
+S4 s42 = {{}};
+// CHECK-MESSAGES-SINGLE-ELEMENT: :[[@LINE-1]]:10: warning: use designated initializer list to initialize 'S4' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-SINGLE-ELEMENT: :[[@LINE-12]]:1: note: aggregate type is defined here
+// CHECK-FIXES-SINGLE-ELEMENT: S4 s42 = {.d={}};
+
+template<typename S> S template1() { return {10, 11}; }
+
+S2 s26 = template1<S2>();
+
+template<typename S> S template2() { return {}; }
+
+S2 s27 = template2<S2>();
+
+struct S5: S2 { int x, y; };
+
+S5 s51 {1, 2, .x = 3, .y = 4};
+
+struct S6 {
+ int i;
+ struct { int j; } s;
+};
+
+S6 s61 {1, 2};
+// CHECK-MESSAGES: :[[@LINE-1]]:8: warning: use designated initializer list to initialize 'S6' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-7]]:1: note: aggregate type is defined here
+// CHECK-MESSAGES-POD: :[[@LINE-3]]:8: warning: use designated initializer list to initialize 'S6' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-9]]:1: note: aggregate type is defined here
+// CHECK-FIXES: S6 s61 {.i=1, .s.j=2};
+
+struct S7 {
+ union {
+ int k;
+ double d;
+ } u;
+};
+
+S7 s71 {1};
+// CHECK-MESSAGES-SINGLE-ELEMENT: :[[@LINE-1]]:8: warning: use designated initializer list to initialize 'S7' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-SINGLE-ELEMENT: :[[@LINE-9]]:1: note: aggregate type is defined here
+// CHECK-FIXES-SINGLE-ELEMENT: S7 s71 {.u.k=1};
+
+struct S8: S7 { int i; };
+
+S8 s81{1, 2};
+
+struct S9 {
+ int i, j;
+ S9 &operator=(S9);
+};
+
+S9 s91{1, 2};
+// CHECK-MESSAGES: :[[@LINE-1]]:7: warning: use designated initializer list to initialize 'S9' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-7]]:1: note: aggregate type is defined here
+// CHECK-FIXES: S9 s91{.i=1, .j=2};
+
+struct S10 { int i = 1, j = 2; };
+
+S10 s101 {1, .j=2};
+// CHECK-MESSAGES: :[[@LINE-1]]:11: warning: use designated init expression to initialize field 'i' [modernize-use-designated-initializers]
+// CHECK-FIXES: S10 s101 {.i=1, .j=2};
+
+struct S11 { int i; S10 s10; };
+
+S11 s111 { .i = 1 };
+S11 s112 { 1 };
+// CHECK-MESSAGES: :[[@LINE-1]]:10: warning: use designated initializer list to initialize 'S11' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-5]]:1: note: aggregate type is defined here
+// CHECK-FIXES: S11 s112 { .i=1 };
+
+S11 s113 { .i=1, {}};
+// CHECK-MESSAGES: :[[@LINE-1]]:18: warning: use designated init expression to initialize field 's10' [modernize-use-designated-initializers]
+// CHECK-FIXES: S11 s113 { .i=1, .s10={}};
+
+S11 s114 { .i=1, .s10={1, .j=2}};
+// CHECK-MESSAGES: :[[@LINE-1]]:24: warning: use designated init expression to initialize field 'i' [modernize-use-designated-initializers]
+// CHECK-FIXES: S11 s114 { .i=1, .s10={.i=1, .j=2}};
+
+struct S12 {
+ int i;
+ struct { int j; };
+};
+
+S12 s121 {1, 2};
+// CHECK-MESSAGES: :[[@LINE-1]]:10: warning: use designated initializer list to initialize 'S12' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-7]]:1: note: aggregate type is defined here
+// CHECK-MESSAGES-POD: :[[@LINE-3]]:10: warning: use designated initializer list to initialize 'S12' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-9]]:1: note: aggregate type is defined here
+// CHECK-FIXES: S12 s121 {.i=1, .j=2};
+
+struct S13 {
+ union {
+ int k;
+ double d;
+ };
+ int i;
+};
+
+S13 s131 {1, 2};
+// CHECK-MESSAGES: :[[@LINE-1]]:10: warning: use designated initializer list to initialize 'S13' [modernize-use-designated-initializers]
+// CHECK-MESSAGES: :[[@LINE-10]]:1: note: aggregate type is defined here
+// CHECK-MESSAGES-POD: :[[@LINE-3]]:10: warning: use designated initializer list to initialize 'S13' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-POD: :[[@LINE-12]]:1: note: aggregate type is defined here
+// CHECK-FIXES: S13 s131 {.k=1, .i=2};
+
+#define A (3+2)
+#define B .j=1
+
+S9 s92 {A, B};
+// CHECK-MESSAGES-MACROS: :[[@LINE-1]]:9: warning: use designated init expression to initialize field 'i' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-MACROS: :[[@LINE-5]]:11: note: expanded from macro 'A'
+
+#define DECLARE_S93 S9 s93 {1, 2}
+
+DECLARE_S93;
+// CHECK-MESSAGES-MACROS: :[[@LINE-1]]:1: warning: use designated initializer list to initialize 'S9' [modernize-use-designated-initializers]
+// CHECK-MESSAGES-MACROS: :[[@LINE-4]]:28: note: expanded from macro 'DECLARE_S93'
+// CHECK-MESSAGES-MACROS: :[[@LINE-71]]:1: note: aggregate type is defined here
diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index 2a177814c4df..bcd69198eafd 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -3477,7 +3477,7 @@ builtin, the mangler emits their usual pattern without any special treatment.
-----------------------
``__builtin_popcountg`` returns the number of 1 bits in the argument. The
-argument can be of any integer type.
+argument can be of any unsigned integer type.
**Syntax**:
@@ -3489,20 +3489,20 @@ argument can be of any integer type.
.. code-block:: c++
- int x = 1;
+ unsigned int x = 1;
int x_pop = __builtin_popcountg(x);
unsigned long y = 3;
int y_pop = __builtin_popcountg(y);
- _BitInt(128) z = 7;
+ unsigned _BitInt(128) z = 7;
int z_pop = __builtin_popcountg(z);
**Description**:
``__builtin_popcountg`` is meant to be a type-generic alternative to the
``__builtin_popcount{,l,ll}`` builtins, with support for other integer types,
-such as ``__int128`` and C23 ``_BitInt(N)``.
+such as ``unsigned __int128`` and C23 ``unsigned _BitInt(N)``.
Multiprecision Arithmetic Builtins
----------------------------------
diff --git a/clang/docs/LibASTMatchersReference.html b/clang/docs/LibASTMatchersReference.html
index c40d679e383b..8a06084955aa 100644
--- a/clang/docs/LibASTMatchersReference.html
+++ b/clang/docs/LibASTMatchersReference.html
@@ -7049,7 +7049,7 @@ binary operator or fold expression matches.
</pre></td></tr>
-<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1CXXFoldExpr.html">CXXFoldExpr</a>&gt;</td><td class="name" onclick="toggle('hasFoldInit0')"><a name="hasFoldInit0Anchor">hasFoldInit</a></td><td>ast_matchers::Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1Expr.html">Expr</a>&gt; InnerMacher</td></tr>
+<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1CXXFoldExpr.html">CXXFoldExpr</a>&gt;</td><td class="name" onclick="toggle('hasFoldInit0')"><a name="hasFoldInit0Anchor">hasFoldInit</a></td><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1Expr.html">Expr</a>&gt; InnerMacher</td></tr>
<tr><td colspan="4" class="doc" id="hasFoldInit0"><pre>Matches the operand that does not contain the parameter pack.
Example matches `(0 + ... + args)` and `(args * ... * 1)`
@@ -7089,7 +7089,7 @@ Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
</pre></td></tr>
-<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1CXXFoldExpr.html">CXXFoldExpr</a>&gt;</td><td class="name" onclick="toggle('hasPattern0')"><a name="hasPattern0Anchor">hasPattern</a></td><td>ast_matchers::Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1Expr.html">Expr</a>&gt; InnerMacher</td></tr>
+<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1CXXFoldExpr.html">CXXFoldExpr</a>&gt;</td><td class="name" onclick="toggle('hasPattern0')"><a name="hasPattern0Anchor">hasPattern</a></td><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1Expr.html">Expr</a>&gt; InnerMacher</td></tr>
<tr><td colspan="4" class="doc" id="hasPattern0"><pre>Matches the operand that contains the parameter pack.
Example matches `(0 + ... + args)`
@@ -7859,7 +7859,7 @@ int a = b ?: 1;
</pre></td></tr>
-<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1ClassTemplateSpecializationDecl.html">ClassTemplateSpecializationDecl</a>&gt;</td><td class="name" onclick="toggle('forEachTemplateArgument0')"><a name="forEachTemplateArgument0Anchor">forEachTemplateArgument</a></td><td>clang::ast_matchers::Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1TemplateArgument.html">TemplateArgument</a>&gt; InnerMatcher</td></tr>
+<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1ClassTemplateSpecializationDecl.html">ClassTemplateSpecializationDecl</a>&gt;</td><td class="name" onclick="toggle('forEachTemplateArgument0')"><a name="forEachTemplateArgument0Anchor">forEachTemplateArgument</a></td><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1TemplateArgument.html">TemplateArgument</a>&gt; InnerMatcher</td></tr>
<tr><td colspan="4" class="doc" id="forEachTemplateArgument0"><pre>Matches classTemplateSpecialization, templateSpecializationType and
functionDecl nodes where the template argument matches the inner matcher.
This matcher may produce multiple matches.
@@ -8454,7 +8454,7 @@ Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
</pre></td></tr>
-<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1Expr.html">Expr</a>&gt;</td><td class="name" onclick="toggle('ignoringElidableConstructorCall0')"><a name="ignoringElidableConstructorCall0Anchor">ignoringElidableConstructorCall</a></td><td>ast_matchers::Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1Expr.html">Expr</a>&gt; InnerMatcher</td></tr>
+<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1Expr.html">Expr</a>&gt;</td><td class="name" onclick="toggle('ignoringElidableConstructorCall0')"><a name="ignoringElidableConstructorCall0Anchor">ignoringElidableConstructorCall</a></td><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1Expr.html">Expr</a>&gt; InnerMatcher</td></tr>
<tr><td colspan="4" class="doc" id="ignoringElidableConstructorCall0"><pre>Matches expressions that match InnerMatcher that are possibly wrapped in an
elidable constructor and other corresponding bookkeeping nodes.
@@ -8691,7 +8691,7 @@ Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
</pre></td></tr>
-<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1FunctionDecl.html">FunctionDecl</a>&gt;</td><td class="name" onclick="toggle('forEachTemplateArgument2')"><a name="forEachTemplateArgument2Anchor">forEachTemplateArgument</a></td><td>clang::ast_matchers::Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1TemplateArgument.html">TemplateArgument</a>&gt; InnerMatcher</td></tr>
+<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1FunctionDecl.html">FunctionDecl</a>&gt;</td><td class="name" onclick="toggle('forEachTemplateArgument2')"><a name="forEachTemplateArgument2Anchor">forEachTemplateArgument</a></td><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1TemplateArgument.html">TemplateArgument</a>&gt; InnerMatcher</td></tr>
<tr><td colspan="4" class="doc" id="forEachTemplateArgument2"><pre>Matches classTemplateSpecialization, templateSpecializationType and
functionDecl nodes where the template argument matches the inner matcher.
This matcher may produce multiple matches.
@@ -8959,7 +8959,7 @@ matcher.
</pre></td></tr>
-<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1InitListExpr.html">InitListExpr</a>&gt;</td><td class="name" onclick="toggle('hasInit0')"><a name="hasInit0Anchor">hasInit</a></td><td>unsigned N, ast_matchers::Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1Expr.html">Expr</a>&gt; InnerMatcher</td></tr>
+<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1InitListExpr.html">InitListExpr</a>&gt;</td><td class="name" onclick="toggle('hasInit0')"><a name="hasInit0Anchor">hasInit</a></td><td>unsigned N, Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1Expr.html">Expr</a>&gt; InnerMatcher</td></tr>
<tr><td colspan="4" class="doc" id="hasInit0"><pre>Matches the n'th item of an initializer list expression.
Example matches y.
@@ -10026,7 +10026,7 @@ varDecl(hasTypeLoc(templateSpecializationTypeLoc(hasTemplateArgumentLoc(0,
</pre></td></tr>
-<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1TemplateSpecializationType.html">TemplateSpecializationType</a>&gt;</td><td class="name" onclick="toggle('forEachTemplateArgument1')"><a name="forEachTemplateArgument1Anchor">forEachTemplateArgument</a></td><td>clang::ast_matchers::Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1TemplateArgument.html">TemplateArgument</a>&gt; InnerMatcher</td></tr>
+<tr><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1TemplateSpecializationType.html">TemplateSpecializationType</a>&gt;</td><td class="name" onclick="toggle('forEachTemplateArgument1')"><a name="forEachTemplateArgument1Anchor">forEachTemplateArgument</a></td><td>Matcher&lt;<a href="https://clang.llvm.org/doxygen/classclang_1_1TemplateArgument.html">TemplateArgument</a>&gt; InnerMatcher</td></tr>
<tr><td colspan="4" class="doc" id="forEachTemplateArgument1"><pre>Matches classTemplateSpecialization, templateSpecializationType and
functionDecl nodes where the template argument matches the inner matcher.
This matcher may produce multiple matches.
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 7e16b9f0c67d..6f6ce7c68a7a 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -192,6 +192,9 @@ Improvements to Clang's diagnostics
- Clang now diagnoses declarative nested name specifiers that name alias templates.
+- Clang now diagnoses lambda function expressions being implicitly cast to boolean values, under ``-Wpointer-bool-conversion``.
+ Fixes `#82512 <https://github.com/llvm/llvm-project/issues/82512>`_.
+
Improvements to Clang's time-trace
----------------------------------
@@ -215,6 +218,10 @@ Bug Fixes in This Version
for logical operators in C23.
Fixes (`#64356 <https://github.com/llvm/llvm-project/issues/64356>`_).
+- Clang no longer produces a false-positive `-Wunused-variable` warning
+ for variables created through copy initialization having side-effects in C++17 and later.
+ Fixes (`#79518 <https://github.com/llvm/llvm-project/issues/79518>`_).
+
Bug Fixes to Compiler Builtins
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -290,6 +297,20 @@ Bug Fixes to C++ Support
lookup searches the bases of an incomplete class.
- Fix a crash when an unresolved overload set is encountered on the RHS of a ``.*`` operator.
(`#53815 <https://github.com/llvm/llvm-project/issues/53815>`_)
+- In ``__restrict``-qualified member functions, attach ``__restrict`` to the pointer type of
+ ``this`` rather than the pointee type.
+ Fixes (`#82941 <https://github.com/llvm/llvm-project/issues/82941>`_),
+ (`#42411 <https://github.com/llvm/llvm-project/issues/42411>`_), and
+ (`#18121 <https://github.com/llvm/llvm-project/issues/18121>`_).
+- Clang now properly reports supported C++11 attributes when using
+ ``__has_cpp_attribute`` and parses attributes with arguments in C++03
+ (`#82995 <https://github.com/llvm/llvm-project/issues/82995>`_)
+- Clang now properly diagnoses missing 'default' template arguments on a variety
+ of templates. Previously we were diagnosing on any non-function template
+ instead of only on class, alias, and variable templates, as last updated by
+ CWG2032.
+ Fixes (`#83461 <https://github.com/llvm/llvm-project/issues/83461>`_)
+
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -325,6 +346,11 @@ Arm and AArch64 Support
improvements for most targets. We have not changed the default behavior for
ARMv6, but may revisit that decision in the future. Users can restore the old
behavior with -m[no-]unaligned-access.
+- An alias identifier (rdma) has been added for targeting the AArch64
+ Architecture Extension which uses Rounding Doubling Multiply Accumulate
+ instructions (rdm). The identifier is available on the command line as
+ a feature modifier for -march and -mcpu as well as via target attributes
+ like ``target_version`` or ``target_clones``.
Android Support
^^^^^^^^^^^^^^^
diff --git a/clang/docs/tools/dump_ast_matchers.py b/clang/docs/tools/dump_ast_matchers.py
index cc7024d1627b..705ff0d4d409 100755
--- a/clang/docs/tools/dump_ast_matchers.py
+++ b/clang/docs/tools/dump_ast_matchers.py
@@ -116,6 +116,8 @@ def strip_doxygen(comment):
def unify_arguments(args):
"""Gets rid of anything the user doesn't care about in the argument list."""
+ args = re.sub(r"clang::ast_matchers::internal::", r"", args)
+ args = re.sub(r"ast_matchers::internal::", r"", args)
args = re.sub(r"internal::", r"", args)
args = re.sub(r"extern const\s+(.*)&", r"\1 ", args)
args = re.sub(r"&", r" ", args)
diff --git a/clang/include/clang/APINotes/APINotesWriter.h b/clang/include/clang/APINotes/APINotesWriter.h
index dad44623e16a..c5ca3e461779 100644
--- a/clang/include/clang/APINotes/APINotesWriter.h
+++ b/clang/include/clang/APINotes/APINotesWriter.h
@@ -5,7 +5,13 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-
+//
+// This file defines the \c APINotesWriter class that writes out source
+// API notes data providing additional information about source code as
+// a separate input, such as the non-nil/nilable annotations for
+// method parameters.
+//
+//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_APINOTES_WRITER_H
#define LLVM_CLANG_APINOTES_WRITER_H
@@ -20,11 +26,16 @@ namespace clang {
class FileEntry;
namespace api_notes {
+
+/// A class that writes API notes data to a binary representation that can be
+/// read by the \c APINotesReader.
class APINotesWriter {
class Implementation;
std::unique_ptr<Implementation> Implementation;
public:
+ /// Create a new API notes writer with the given module name and
+ /// (optional) source file.
APINotesWriter(llvm::StringRef ModuleName, const FileEntry *SF);
~APINotesWriter();
diff --git a/clang/include/clang/ASTMatchers/ASTMatchers.h b/clang/include/clang/ASTMatchers/ASTMatchers.h
index dc1f49525a00..ced89ff127ab 100644
--- a/clang/include/clang/ASTMatchers/ASTMatchers.h
+++ b/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -4580,8 +4580,7 @@ AST_POLYMORPHIC_MATCHER_P2(hasArgument,
/// return (args * ... * 1);
/// }
/// \endcode
-AST_MATCHER_P(CXXFoldExpr, hasFoldInit, ast_matchers::internal::Matcher<Expr>,
- InnerMacher) {
+AST_MATCHER_P(CXXFoldExpr, hasFoldInit, internal::Matcher<Expr>, InnerMacher) {
const auto *const Init = Node.getInit();
return Init && InnerMacher.matches(*Init, Finder, Builder);
}
@@ -4603,8 +4602,7 @@ AST_MATCHER_P(CXXFoldExpr, hasFoldInit, ast_matchers::internal::Matcher<Expr>,
/// return (args * ... * 1);
/// }
/// \endcode
-AST_MATCHER_P(CXXFoldExpr, hasPattern, ast_matchers::internal::Matcher<Expr>,
- InnerMacher) {
+AST_MATCHER_P(CXXFoldExpr, hasPattern, internal::Matcher<Expr>, InnerMacher) {
const Expr *const Pattern = Node.getPattern();
return Pattern && InnerMacher.matches(*Pattern, Finder, Builder);
}
@@ -4685,8 +4683,8 @@ AST_MATCHER(CXXFoldExpr, isBinaryFold) { return Node.getInit() != nullptr; }
/// \code
/// int x{y}.
/// \endcode
-AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
- ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
+AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, internal::Matcher<Expr>,
+ InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
@@ -5309,7 +5307,7 @@ AST_POLYMORPHIC_MATCHER_P(
forEachTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType, FunctionDecl),
- clang::ast_matchers::internal::Matcher<TemplateArgument>, InnerMatcher) {
+ internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> TemplateArgs =
clang::ast_matchers::internal::getTemplateSpecializationArgs(Node);
clang::ast_matchers::internal::BoundNodesTreeBuilder Result;
@@ -8525,8 +8523,8 @@ AST_MATCHER(FunctionDecl, hasTrailingReturn) {
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
-AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
- ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
+AST_MATCHER_P(Expr, ignoringElidableConstructorCall, internal::Matcher<Expr>,
+ InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
index 7f8c70d16937..62e7af7ac219 100644
--- a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
+++ b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
@@ -723,9 +723,12 @@ RecordStorageLocation *getImplicitObjectLocation(const CXXMemberCallExpr &MCE,
RecordStorageLocation *getBaseObjectLocation(const MemberExpr &ME,
const Environment &Env);
-/// Returns the fields of `RD` that are initialized by an `InitListExpr`, in the
-/// order in which they appear in `InitListExpr::inits()`.
-std::vector<FieldDecl *> getFieldsForInitListExpr(const RecordDecl *RD);
+/// Returns the fields of a `RecordDecl` that are initialized by an
+/// `InitListExpr`, in the order in which they appear in
+/// `InitListExpr::inits()`.
+/// `Init->getType()` must be a record type.
+std::vector<const FieldDecl *>
+getFieldsForInitListExpr(const InitListExpr *InitList);
/// Associates a new `RecordValue` with `Loc` and returns the new value.
RecordValue &refreshRecordValue(RecordStorageLocation &Loc, Environment &Env);
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index 3bc35c5bb38e..2c83dca248fb 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -690,7 +690,7 @@ def Popcount : Builtin, BitInt_Long_LongLongTemplate {
def Popcountg : Builtin {
let Spellings = ["__builtin_popcountg"];
- let Attributes = [NoThrow, Const];
+ let Attributes = [NoThrow, Const, CustomTypeChecking];
let Prototype = "int(...)";
}
@@ -4536,6 +4536,18 @@ def HLSLDotProduct : LangBuiltin<"HLSL_LANG"> {
let Prototype = "void(...)";
}
+def HLSLFrac : LangBuiltin<"HLSL_LANG"> {
+ let Spellings = ["__builtin_hlsl_elementwise_frac"];
+ let Attributes = [NoThrow, Const];
+ let Prototype = "void(...)";
+}
+
+def HLSLLerp : LangBuiltin<"HLSL_LANG"> {
+ let Spellings = ["__builtin_hlsl_lerp"];
+ let Attributes = [NoThrow, Const];
+ let Prototype = "void(...)";
+}
+
// Builtins for XRay.
def XRayCustomEvent : Builtin {
let Spellings = ["__xray_customevent"];
diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td
index b13181f6e708..1bc9885849d5 100644
--- a/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -693,7 +693,6 @@ def err_drv_cannot_mix_options : Error<"cannot specify '%1' along with '%0'">;
def err_drv_invalid_object_mode : Error<
"OBJECT_MODE setting %0 is not recognized and is not a valid setting">;
-def err_aix_unsupported_tls_model : Error<"TLS model '%0' is not yet supported on AIX">;
def err_roptr_requires_data_sections: Error<"-mxcoff-roptr is supported only with -fdata-sections">;
def err_roptr_cannot_build_shared: Error<"-mxcoff-roptr is not supported with -shared">;
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index c8141fefb8ed..91105d4231f0 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -765,7 +765,7 @@ def err_builtin_redeclare : Error<"cannot redeclare builtin function %0">;
def err_arm_invalid_specialreg : Error<"invalid special register for builtin">;
def err_arm_invalid_coproc : Error<"coprocessor %0 must be configured as "
"%select{GCP|CDE}1">;
-def err_invalid_cpu_supports : Error<"invalid cpu feature string for builtin">;
+def warn_invalid_cpu_supports : Warning<"invalid cpu feature string for builtin">;
def err_invalid_cpu_is : Error<"invalid cpu name for builtin">;
def err_invalid_cpu_specific_dispatch_value : Error<
"invalid option '%0' for %select{cpu_specific|cpu_dispatch}1">;
@@ -4127,8 +4127,8 @@ def ext_ms_impcast_fn_obj : ExtWarn<
"Microsoft extension">, InGroup<MicrosoftCast>;
def warn_impcast_pointer_to_bool : Warning<
- "address of%select{| function| array}0 '%1' will always evaluate to "
- "'true'">,
+ "address of %select{'%1'|function '%1'|array '%1'|lambda function pointer "
+ "conversion operator}0 will always evaluate to 'true'">,
InGroup<PointerBoolConversion>;
def warn_cast_nonnull_to_bool : Warning<
"nonnull %select{function call|parameter}0 '%1' will evaluate to "
@@ -10267,9 +10267,9 @@ def err_sizeless_nonlocal : Error<
"non-local variable with sizeless type %0">;
def err_vec_builtin_non_vector : Error<
- "first two arguments to %0 must be vectors">;
+ "%select{first two|all}1 arguments to %0 must be vectors">;
def err_vec_builtin_incompatible_vector : Error<
- "first two arguments to %0 must have the same type">;
+ "%select{first two|all}1 arguments to %0 must have the same type">;
def err_vsx_builtin_nonconstant_argument : Error<
"argument %0 to %1 must be a 2-bit unsigned literal (i.e. 0, 1, 2 or 3)">;
@@ -11984,7 +11984,7 @@ def err_builtin_invalid_arg_type: Error <
"signed integer or floating point type|vector type|"
"floating point type|"
"vector of integers|"
- "type of integer}1 (was %2)">;
+ "type of unsigned integer}1 (was %2)">;
def err_builtin_matrix_disabled: Error<
"matrix types extension is disabled. Pass -fenable-matrix to enable it">;
@@ -12212,6 +12212,10 @@ def err_acc_construct_appertainment
: Error<"OpenACC construct '%0' cannot be used here; it can only "
"be used in a statement context">;
def err_acc_branch_in_out_compute_construct
- : Error<"invalid %select{branch|return}0 %select{out of|into}1 OpenACC "
- "Compute Construct">;
+ : Error<"invalid %select{branch|return|throw}0 %select{out of|into}1 "
+ "OpenACC Compute Construct">;
+def note_acc_branch_into_compute_construct
+ : Note<"invalid branch into OpenACC Compute Construct">;
+def note_acc_branch_out_of_compute_construct
+ : Note<"invalid branch out of OpenACC Compute Construct">;
} // end of sema component.
diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index 48e9cec48275..7682f84e491c 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -1369,13 +1369,35 @@ public:
}
struct BranchProtectionInfo {
- LangOptions::SignReturnAddressScopeKind SignReturnAddr =
- LangOptions::SignReturnAddressScopeKind::None;
- LangOptions::SignReturnAddressKeyKind SignKey =
- LangOptions::SignReturnAddressKeyKind::AKey;
- bool BranchTargetEnforcement = false;
- bool BranchProtectionPAuthLR = false;
- bool GuardedControlStack = false;
+ LangOptions::SignReturnAddressScopeKind SignReturnAddr;
+ LangOptions::SignReturnAddressKeyKind SignKey;
+ bool BranchTargetEnforcement;
+ bool BranchProtectionPAuthLR;
+ bool GuardedControlStack;
+
+ BranchProtectionInfo() = default;
+
+ const char *getSignReturnAddrStr() const {
+ switch (SignReturnAddr) {
+ case LangOptions::SignReturnAddressScopeKind::None:
+ return "none";
+ case LangOptions::SignReturnAddressScopeKind::NonLeaf:
+ return "non-leaf";
+ case LangOptions::SignReturnAddressScopeKind::All:
+ return "all";
+ }
+ llvm_unreachable("Unexpected SignReturnAddressScopeKind");
+ }
+
+ const char *getSignKeyStr() const {
+ switch (SignKey) {
+ case LangOptions::SignReturnAddressKeyKind::AKey:
+ return "a_key";
+ case LangOptions::SignReturnAddressKeyKind::BKey:
+ return "b_key";
+ }
+ llvm_unreachable("Unexpected SignReturnAddressKeyKind");
+ }
};
/// Determine if the Architecture in this TargetInfo supports branch
diff --git a/clang/include/clang/Driver/Driver.h b/clang/include/clang/Driver/Driver.h
index a5ca637853a6..73cf326101ff 100644
--- a/clang/include/clang/Driver/Driver.h
+++ b/clang/include/clang/Driver/Driver.h
@@ -160,7 +160,7 @@ public:
/// Target and driver mode components extracted from clang executable name.
ParsedClangName ClangNameParts;
- /// The path to the installed clang directory, if any.
+ /// TODO: Remove this in favor of Dir.
std::string InstalledDir;
/// The path to the compiler resource directory.
@@ -433,7 +433,6 @@ public:
return InstalledDir.c_str();
return Dir.c_str();
}
- void setInstalledDir(StringRef Value) { InstalledDir = std::string(Value); }
bool isSaveTempsEnabled() const { return SaveTemps != SaveTempsNone; }
bool isSaveTempsObj() const { return SaveTemps == SaveTempsObj; }
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 3a028fadb25b..bef38738fde8 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -550,6 +550,13 @@ multiclass BoolGOption<string flag_base, KeyPathAndMacro kpm,
Group<g_Group>;
}
+multiclass BoolMOption<string flag_base, KeyPathAndMacro kpm,
+ Default default, FlagDef flag1, FlagDef flag2,
+ BothFlags both = BothFlags<[]>> {
+ defm NAME : BoolOption<"m", flag_base, kpm, default, flag1, flag2, both>,
+ Group<m_Group>;
+}
+
// Works like BoolOption except without marshalling
multiclass BoolOptionWithoutMarshalling<string prefix = "", string spelling_base,
FlagDef flag1_base, FlagDef flag2_base,
@@ -4600,11 +4607,10 @@ def mretpoline : Flag<["-"], "mretpoline">, Group<m_Group>,
Visibility<[ClangOption, CLOption]>;
def mno_retpoline : Flag<["-"], "mno-retpoline">, Group<m_Group>,
Visibility<[ClangOption, CLOption]>;
-defm speculative_load_hardening : BoolOption<"m", "speculative-load-hardening",
+defm speculative_load_hardening : BoolMOption<"speculative-load-hardening",
CodeGenOpts<"SpeculativeLoadHardening">, DefaultFalse,
PosFlag<SetTrue, [], [ClangOption, CC1Option]>,
- NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption]>>,
- Group<m_Group>;
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption]>>;
def mlvi_hardening : Flag<["-"], "mlvi-hardening">, Group<m_Group>,
Visibility<[ClangOption, CLOption]>,
HelpText<"Enable all mitigations for Load Value Injection (LVI)">;
@@ -4821,13 +4827,13 @@ def mexec_model_EQ : Joined<["-"], "mexec-model=">, Group<m_wasm_Features_Driver
"explicitly terminated.">;
} // let Flags = [TargetSpecific]
-defm amdgpu_ieee : BoolOption<"m", "amdgpu-ieee",
+defm amdgpu_ieee : BoolMOption<"amdgpu-ieee",
CodeGenOpts<"EmitIEEENaNCompliantInsts">, DefaultTrue,
PosFlag<SetTrue, [], [ClangOption], "Sets the IEEE bit in the expected default floating point "
" mode register. Floating point opcodes that support exception flag "
"gathering quiet and propagate signaling NaN inputs per IEEE 754-2008. "
"This option changes the ABI. (AMDGPU only)">,
- NegFlag<SetFalse, [], [ClangOption, CC1Option]>>, Group<m_Group>;
+ NegFlag<SetFalse, [], [ClangOption, CC1Option]>>;
def mcode_object_version_EQ : Joined<["-"], "mcode-object-version=">, Group<m_Group>,
HelpText<"Specify code object ABI version. Defaults to 5. (AMDGPU only)">,
@@ -4846,14 +4852,14 @@ defm wavefrontsize64 : SimpleMFlag<"wavefrontsize64",
"Specify wavefront size 64", "Specify wavefront size 32",
" mode (AMDGPU only)">;
-defm unsafe_fp_atomics : BoolOption<"m", "unsafe-fp-atomics",
+defm unsafe_fp_atomics : BoolMOption<"unsafe-fp-atomics",
TargetOpts<"AllowAMDGPUUnsafeFPAtomics">, DefaultFalse,
PosFlag<SetTrue, [], [ClangOption, CC1Option],
"Enable generation of unsafe floating point "
"atomic instructions. May generate more efficient code, but may not "
"respect rounding and denormal modes, and may give incorrect results "
"for certain memory destinations. (AMDGPU only)">,
- NegFlag<SetFalse>>, Group<m_Group>;
+ NegFlag<SetFalse>>;
def faltivec : Flag<["-"], "faltivec">, Group<f_Group>;
def fno_altivec : Flag<["-"], "fno-altivec">, Group<f_Group>;
@@ -4941,11 +4947,10 @@ def mrop_protect : Flag<["-"], "mrop-protect">,
def mprivileged : Flag<["-"], "mprivileged">,
Group<m_ppc_Features_Group>;
-defm regnames : BoolOption<"m", "regnames",
+defm regnames : BoolMOption<"regnames",
CodeGenOpts<"PPCUseFullRegisterNames">, DefaultFalse,
PosFlag<SetTrue, [], [ClangOption, CC1Option], "Use full register names when writing assembly output">,
- NegFlag<SetFalse, [], [ClangOption], "Use only register numbers when writing assembly output">>,
- Group<m_Group>;
+ NegFlag<SetFalse, [], [ClangOption], "Use only register numbers when writing assembly output">>;
} // let Flags = [TargetSpecific]
def maix_small_local_exec_tls : Flag<["-"], "maix-small-local-exec-tls">,
Group<m_ppc_Features_Group>,
@@ -4987,10 +4992,10 @@ def mxcoff_build_id_EQ : Joined<["-"], "mxcoff-build-id=">, Group<Link_Group>, M
def mignore_xcoff_visibility : Flag<["-"], "mignore-xcoff-visibility">, Group<m_Group>,
HelpText<"Not emit the visibility attribute for asm in AIX OS or give all symbols 'unspecified' visibility in XCOFF object file">,
Flags<[TargetSpecific]>, Visibility<[ClangOption, CC1Option]>;
-defm backchain : BoolOption<"m", "backchain",
+defm backchain : BoolMOption<"backchain",
CodeGenOpts<"Backchain">, DefaultFalse,
PosFlag<SetTrue, [], [ClangOption], "Link stack frames through backchain on System Z">,
- NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option]>>, Group<m_Group>;
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option]>>;
def mno_warn_nonportable_cfstrings : Flag<["-"], "mno-warn-nonportable-cfstrings">, Group<m_Group>;
def mno_omit_leaf_frame_pointer : Flag<["-"], "mno-omit-leaf-frame-pointer">, Group<m_Group>;
@@ -6952,7 +6957,7 @@ def msmall_data_limit : Separate<["-"], "msmall-data-limit">,
def funwind_tables_EQ : Joined<["-"], "funwind-tables=">,
HelpText<"Generate unwinding tables for all functions">,
MarshallingInfoInt<CodeGenOpts<"UnwindTables">>;
-defm constructor_aliases : BoolOption<"m", "constructor-aliases",
+defm constructor_aliases : BoolMOption<"constructor-aliases",
CodeGenOpts<"CXXCtorDtorAliases">, DefaultFalse,
PosFlag<SetTrue, [], [ClangOption], "Enable">,
NegFlag<SetFalse, [], [ClangOption], "Disable">,
diff --git a/clang/include/clang/Frontend/CompilerInstance.h b/clang/include/clang/Frontend/CompilerInstance.h
index b97d0c636806..cce91862ae3d 100644
--- a/clang/include/clang/Frontend/CompilerInstance.h
+++ b/clang/include/clang/Frontend/CompilerInstance.h
@@ -225,6 +225,9 @@ public:
// of the context or else not CompilerInstance specific.
bool ExecuteAction(FrontendAction &Act);
+ /// At the end of a compilation, print the number of warnings/errors.
+ void printDiagnosticStats();
+
/// Load the list of plugins requested in the \c FrontendOptions.
void LoadRequestedPlugins();
diff --git a/clang/include/clang/Frontend/CompilerInvocation.h b/clang/include/clang/Frontend/CompilerInvocation.h
index 8fc51e6ec03b..1a2a39411e58 100644
--- a/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/clang/include/clang/Frontend/CompilerInvocation.h
@@ -201,6 +201,8 @@ private:
/// @}
};
+class CowCompilerInvocation;
+
/// Helper class for holding the data necessary to invoke the compiler.
///
/// This class is designed to represent an abstract "invocation" of the
@@ -220,6 +222,9 @@ public:
}
~CompilerInvocation() = default;
+ explicit CompilerInvocation(const CowCompilerInvocation &X);
+ CompilerInvocation &operator=(const CowCompilerInvocation &X);
+
/// Const getters.
/// @{
// Note: These need to be pulled in manually. Otherwise, they get hidden by
diff --git a/clang/include/clang/InstallAPI/Context.h b/clang/include/clang/InstallAPI/Context.h
index 7d105920734f..4e9e90e5d2db 100644
--- a/clang/include/clang/InstallAPI/Context.h
+++ b/clang/include/clang/InstallAPI/Context.h
@@ -9,12 +9,15 @@
#ifndef LLVM_CLANG_INSTALLAPI_CONTEXT_H
#define LLVM_CLANG_INSTALLAPI_CONTEXT_H
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/InstallAPI/HeaderFile.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/TextAPI/InterfaceFile.h"
-#include "llvm/TextAPI/RecordVisitor.h"
-#include "llvm/TextAPI/RecordsSlice.h"
namespace clang {
namespace installapi {
+class FrontendRecordsSlice;
/// Struct used for generating validating InstallAPI.
/// The attributes captured represent all necessary information
@@ -24,14 +27,53 @@ struct InstallAPIContext {
/// Library attributes that are typically passed as linker inputs.
llvm::MachO::RecordsSlice::BinaryAttrs BA;
- /// Active target triple to parse.
- llvm::Triple TargetTriple{};
+ /// All headers that represent a library.
+ HeaderSeq InputHeaders;
+
+ /// Active language mode to parse in.
+ Language LangMode = Language::ObjC;
+
+ /// Active header access type.
+ HeaderType Type = HeaderType::Unknown;
+
+ /// Active TargetSlice for symbol record collection.
+ std::shared_ptr<FrontendRecordsSlice> Slice;
+
+ /// FileManager for all I/O operations.
+ FileManager *FM = nullptr;
+
+ /// DiagnosticsEngine for all error reporting.
+ DiagnosticsEngine *Diags = nullptr;
/// File Path of output location.
llvm::StringRef OutputLoc{};
/// What encoding to write output as.
llvm::MachO::FileType FT = llvm::MachO::FileType::TBD_V5;
+
+ /// Populate entries of headers that should be included for TextAPI
+ /// generation.
+ void addKnownHeader(const HeaderFile &H);
+
+ /// Record visited files during frontend actions to determine whether to
+ /// include their declarations for TextAPI generation.
+ ///
+ /// \param FE Header that is being parsed.
+ /// \param PP Preprocesser used for querying how header was imported.
+ /// \return Access level of header if it should be included for TextAPI
+ /// generation.
+ std::optional<HeaderType> findAndRecordFile(const FileEntry *FE,
+ const Preprocessor &PP);
+
+private:
+ using HeaderMap = llvm::DenseMap<const FileEntry *, HeaderType>;
+
+ // Collection of parsed header files and their access level. If set to
+ // HeaderType::Unknown, they are not used for TextAPI generation.
+ HeaderMap KnownFiles;
+
+ // Collection of expected header includes and the access level for them.
+ llvm::DenseMap<StringRef, HeaderType> KnownIncludes;
};
} // namespace installapi
diff --git a/clang/include/clang/InstallAPI/Frontend.h b/clang/include/clang/InstallAPI/Frontend.h
new file mode 100644
index 000000000000..d72b4680fde4
--- /dev/null
+++ b/clang/include/clang/InstallAPI/Frontend.h
@@ -0,0 +1,104 @@
+//===- InstallAPI/Frontend.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Top level wrappers for InstallAPI frontend operations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INSTALLAPI_FRONTEND_H
+#define LLVM_CLANG_INSTALLAPI_FRONTEND_H
+
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Availability.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/InstallAPI/Context.h"
+#include "clang/InstallAPI/Visitor.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace clang {
+namespace installapi {
+
+using SymbolFlags = llvm::MachO::SymbolFlags;
+using RecordLinkage = llvm::MachO::RecordLinkage;
+using GlobalRecord = llvm::MachO::GlobalRecord;
+using ObjCInterfaceRecord = llvm::MachO::ObjCInterfaceRecord;
+
+// Represents a collection of frontend records for a library that are tied to a
+// darwin target triple.
+class FrontendRecordsSlice : public llvm::MachO::RecordsSlice {
+public:
+ FrontendRecordsSlice(const llvm::Triple &T)
+ : llvm::MachO::RecordsSlice({T}) {}
+
+ /// Add non-ObjC global record with attributes from AST.
+ ///
+ /// \param Name The name of symbol.
+ /// \param Linkage The linkage of symbol.
+ /// \param GV The kind of global.
+ /// \param Avail The availability information tied to the active target
+ /// triple.
+ /// \param D The pointer to the declaration from traversing AST.
+ /// \param Access The intended access level of symbol.
+ /// \param Flags The flags that describe attributes of the symbol.
+ /// \return The non-owning pointer to added record in slice.
+ GlobalRecord *addGlobal(StringRef Name, RecordLinkage Linkage,
+ GlobalRecord::Kind GV,
+ const clang::AvailabilityInfo Avail, const Decl *D,
+ const HeaderType Access,
+ SymbolFlags Flags = SymbolFlags::None);
+
+ /// Add ObjC Class record with attributes from AST.
+ ///
+ /// \param Name The name of class, not symbol.
+ /// \param Linkage The linkage of symbol.
+ /// \param Avail The availability information tied to the active target
+ /// triple.
+ /// \param D The pointer to the declaration from traversing AST.
+ /// \param Access The intended access level of symbol.
+ /// \param IsEHType Whether declaration has an exception attribute.
+ /// \return The non-owning pointer to added record in slice.
+ ObjCInterfaceRecord *addObjCInterface(StringRef Name, RecordLinkage Linkage,
+ const clang::AvailabilityInfo Avail,
+ const Decl *D, HeaderType Access,
+ bool IsEHType);
+
+private:
+ /// Frontend information captured about records.
+ struct FrontendAttrs {
+ const AvailabilityInfo Avail;
+ const Decl *D;
+ const HeaderType Access;
+ };
+
+ /// Mapping of records stored in slice to their frontend attributes.
+ llvm::DenseMap<llvm::MachO::Record *, FrontendAttrs> FrontendRecords;
+};
+
+/// Create a buffer that contains all headers to scan
+/// for global symbols with.
+std::unique_ptr<llvm::MemoryBuffer> createInputBuffer(InstallAPIContext &Ctx);
+
+class InstallAPIAction : public ASTFrontendAction {
+public:
+ explicit InstallAPIAction(InstallAPIContext &Ctx) : Ctx(Ctx) {}
+
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override {
+ return std::make_unique<InstallAPIVisitor>(
+ CI.getASTContext(), Ctx, CI.getSourceManager(), CI.getPreprocessor());
+ }
+
+private:
+ InstallAPIContext &Ctx;
+};
+} // namespace installapi
+} // namespace clang
+
+#endif // LLVM_CLANG_INSTALLAPI_FRONTEND_H
diff --git a/clang/include/clang/InstallAPI/HeaderFile.h b/clang/include/clang/InstallAPI/HeaderFile.h
index fc64a43b3def..70e83bbb3e76 100644
--- a/clang/include/clang/InstallAPI/HeaderFile.h
+++ b/clang/include/clang/InstallAPI/HeaderFile.h
@@ -15,6 +15,7 @@
#include "clang/Basic/LangStandard.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <optional>
#include <string>
@@ -32,6 +33,20 @@ enum class HeaderType {
Project,
};
+inline StringRef getName(const HeaderType T) {
+ switch (T) {
+ case HeaderType::Public:
+ return "Public";
+ case HeaderType::Private:
+ return "Private";
+ case HeaderType::Project:
+ return "Project";
+ case HeaderType::Unknown:
+ return "Unknown";
+ }
+ llvm_unreachable("unexpected header type");
+}
+
class HeaderFile {
/// Full input path to header.
std::string FullPath;
@@ -52,6 +67,14 @@ public:
static llvm::Regex getFrameworkIncludeRule();
+ HeaderType getType() const { return Type; }
+ StringRef getIncludeName() const { return IncludeName; }
+ StringRef getPath() const { return FullPath; }
+
+ bool useIncludeName() const {
+ return Type != HeaderType::Project && !IncludeName.empty();
+ }
+
bool operator==(const HeaderFile &Other) const {
return std::tie(Type, FullPath, IncludeName, Language) ==
std::tie(Other.Type, Other.FullPath, Other.IncludeName,
diff --git a/clang/include/clang/InstallAPI/Visitor.h b/clang/include/clang/InstallAPI/Visitor.h
new file mode 100644
index 000000000000..60a05005df84
--- /dev/null
+++ b/clang/include/clang/InstallAPI/Visitor.h
@@ -0,0 +1,60 @@
+//===- InstallAPI/Visitor.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// ASTVisitor Interface for InstallAPI frontend operations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INSTALLAPI_VISITOR_H
+#define LLVM_CLANG_INSTALLAPI_VISITOR_H
+
+#include "clang/AST/Mangle.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/InstallAPI/Context.h"
+#include "llvm/ADT/Twine.h"
+
+namespace clang {
+namespace installapi {
+
+/// ASTVisitor for collecting declarations that represent global symbols.
+class InstallAPIVisitor final : public ASTConsumer,
+ public RecursiveASTVisitor<InstallAPIVisitor> {
+public:
+ InstallAPIVisitor(ASTContext &ASTCtx, InstallAPIContext &Ctx,
+ SourceManager &SrcMgr, Preprocessor &PP)
+ : Ctx(Ctx), SrcMgr(SrcMgr), PP(PP),
+ MC(ItaniumMangleContext::create(ASTCtx, ASTCtx.getDiagnostics())),
+ Layout(ASTCtx.getTargetInfo().getDataLayoutString()) {}
+ void HandleTranslationUnit(ASTContext &ASTCtx) override;
+
+ /// Collect global variables.
+ bool VisitVarDecl(const VarDecl *D);
+
+ /// Collect Objective-C Interface declarations.
+ /// Every Objective-C class has an interface declaration that lists all the
+ /// ivars, properties, and methods of the class.
+ bool VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D);
+
+private:
+ std::string getMangledName(const NamedDecl *D) const;
+ std::string getBackendMangledName(llvm::Twine Name) const;
+ std::optional<HeaderType> getAccessForDecl(const NamedDecl *D) const;
+
+ InstallAPIContext &Ctx;
+ SourceManager &SrcMgr;
+ Preprocessor &PP;
+ std::unique_ptr<clang::ItaniumMangleContext> MC;
+ StringRef Layout;
+};
+
+} // namespace installapi
+} // namespace clang
+
+#endif // LLVM_CLANG_INSTALLAPI_VISITOR_H
diff --git a/clang/include/clang/Sema/Scope.h b/clang/include/clang/Sema/Scope.h
index b6b5a1f3479a..536c12cb9d64 100644
--- a/clang/include/clang/Sema/Scope.h
+++ b/clang/include/clang/Sema/Scope.h
@@ -43,6 +43,9 @@ public:
/// ScopeFlags - These are bitfields that are or'd together when creating a
/// scope, which defines the sorts of things the scope contains.
enum ScopeFlags {
+ // A bitfield value representing no scopes.
+ NoScope = 0,
+
/// This indicates that the scope corresponds to a function, which
/// means that labels are set here.
FnScope = 0x01,
@@ -521,10 +524,17 @@ public:
return getFlags() & Scope::OpenACCComputeConstructScope;
}
- bool isInOpenACCComputeConstructScope() const {
+ /// Determine if this scope (or its parents) are a compute construct. If the
+ /// argument is provided, the search will stop at any of the specified scopes.
+ /// Otherwise, it will stop only at the normal 'no longer search' scopes.
+ bool isInOpenACCComputeConstructScope(ScopeFlags Flags = NoScope) const {
for (const Scope *S = this; S; S = S->getParent()) {
- if (S->getFlags() & Scope::OpenACCComputeConstructScope)
+ if (S->isOpenACCComputeConstructScope())
return true;
+
+ if (S->getFlags() & Flags)
+ return false;
+
else if (S->getFlags() &
(Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
Scope::TemplateParamScope | Scope::FunctionPrototypeScope |
diff --git a/clang/lib/APINotes/APINotesReader.cpp b/clang/lib/APINotes/APINotesReader.cpp
index 55ea4bae81e6..fbbe9c32ce12 100644
--- a/clang/lib/APINotes/APINotesReader.cpp
+++ b/clang/lib/APINotes/APINotesReader.cpp
@@ -5,7 +5,13 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-
+//
+// This file implements the \c APINotesReader class that reads source
+// API notes data providing additional information about source code as
+// a separate input, such as the non-nil/nilable annotations for
+// method parameters.
+//
+//===----------------------------------------------------------------------===//
#include "clang/APINotes/APINotesReader.h"
#include "APINotesFormat.h"
#include "llvm/ADT/Hashing.h"
diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp
index 117e802dae2d..b4f2327d9c56 100644
--- a/clang/lib/AST/DeclCXX.cpp
+++ b/clang/lib/AST/DeclCXX.cpp
@@ -2543,8 +2543,19 @@ QualType CXXMethodDecl::getThisType(const FunctionProtoType *FPT,
const CXXRecordDecl *Decl) {
ASTContext &C = Decl->getASTContext();
QualType ObjectTy = ::getThisObjectType(C, FPT, Decl);
- return C.getLangOpts().HLSL ? C.getLValueReferenceType(ObjectTy)
- : C.getPointerType(ObjectTy);
+
+ // Unlike 'const' and 'volatile', a '__restrict' qualifier must be
+ // attached to the pointer type, not the pointee.
+ bool Restrict = FPT->getMethodQuals().hasRestrict();
+ if (Restrict)
+ ObjectTy.removeLocalRestrict();
+
+ ObjectTy = C.getLangOpts().HLSL ? C.getLValueReferenceType(ObjectTy)
+ : C.getPointerType(ObjectTy);
+
+ if (Restrict)
+ ObjectTy.addRestrict();
+ return ObjectTy;
}
QualType CXXMethodDecl::getThisType() const {
diff --git a/clang/lib/AST/Interp/ByteCodeEmitter.cpp b/clang/lib/AST/Interp/ByteCodeEmitter.cpp
index 60d8afecb2b3..e1b954fcc684 100644
--- a/clang/lib/AST/Interp/ByteCodeEmitter.cpp
+++ b/clang/lib/AST/Interp/ByteCodeEmitter.cpp
@@ -108,8 +108,12 @@ Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) {
this->LambdaCaptures[Cap.first] = {
Offset, Cap.second->getType()->isReferenceType()};
}
- if (LTC)
- this->LambdaThisCapture = R->getField(LTC)->Offset;
+ if (LTC) {
+ QualType CaptureType = R->getField(LTC)->Decl->getType();
+ this->LambdaThisCapture = {R->getField(LTC)->Offset,
+ CaptureType->isReferenceType() ||
+ CaptureType->isPointerType()};
+ }
}
}
diff --git a/clang/lib/AST/Interp/ByteCodeEmitter.h b/clang/lib/AST/Interp/ByteCodeEmitter.h
index 03de286582c9..548769329b7f 100644
--- a/clang/lib/AST/Interp/ByteCodeEmitter.h
+++ b/clang/lib/AST/Interp/ByteCodeEmitter.h
@@ -62,7 +62,7 @@ protected:
/// Lambda captures.
llvm::DenseMap<const ValueDecl *, ParamOffset> LambdaCaptures;
/// Offset of the This parameter in a lambda record.
- unsigned LambdaThisCapture = 0;
+ ParamOffset LambdaThisCapture{0, false};
/// Local descriptors.
llvm::SmallVector<SmallVector<Local, 8>, 2> Descriptors;
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index f193f959d3a6..63ab80f59dac 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -1220,14 +1220,18 @@ bool ByteCodeExprGen<Emitter>::VisitArrayInitLoopExpr(
template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+ const Expr *SourceExpr = E->getSourceExpr();
+ if (!SourceExpr)
+ return false;
+
if (Initializing)
- return this->visitInitializer(E->getSourceExpr());
+ return this->visitInitializer(SourceExpr);
- PrimType SubExprT = classify(E->getSourceExpr()).value_or(PT_Ptr);
+ PrimType SubExprT = classify(SourceExpr).value_or(PT_Ptr);
if (auto It = OpaqueExprs.find(E); It != OpaqueExprs.end())
return this->emitGetLocal(SubExprT, It->second, E);
- if (!this->visit(E->getSourceExpr()))
+ if (!this->visit(SourceExpr))
return false;
// At this point we either have the evaluated source expression or a pointer
@@ -2587,7 +2591,10 @@ bool ByteCodeExprGen<Emitter>::visitExpr(const Expr *E) {
if (!this->emitFinishInit(E))
return false;
- return this->emitRetValue(E);
+ // We are destroying the locals AFTER the Ret op.
+ // The Ret op needs to copy the (alive) values, but the
+ // destructors may still turn the entire expression invalid.
+ return this->emitRetValue(E) && RootScope.destroyLocals();
}
return false;
@@ -2835,7 +2842,8 @@ bool ByteCodeExprGen<Emitter>::VisitCallExpr(const CallExpr *E) {
return false;
} else if (Func->isVariadic()) {
uint32_t VarArgSize = 0;
- unsigned NumParams = Func->getNumWrittenParams();
+ unsigned NumParams =
+ Func->getNumWrittenParams() + isa<CXXOperatorCallExpr>(E);
for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I)
VarArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr)));
if (!this->emitCallVar(Func, VarArgSize, E))
@@ -2926,8 +2934,11 @@ bool ByteCodeExprGen<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
if (DiscardResult)
return true;
- if (this->LambdaThisCapture > 0)
- return this->emitGetThisFieldPtr(this->LambdaThisCapture, E);
+ if (this->LambdaThisCapture.Offset > 0) {
+ if (this->LambdaThisCapture.IsPtr)
+ return this->emitGetThisFieldPtr(this->LambdaThisCapture.Offset, E);
+ return this->emitGetPtrThisField(this->LambdaThisCapture.Offset, E);
+ }
return this->emitThis(E);
}
@@ -3210,12 +3221,6 @@ bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
// we haven't seen yet.
if (Ctx.getLangOpts().CPlusPlus) {
if (const auto *VD = dyn_cast<VarDecl>(D)) {
- // Dummy for static locals
- if (VD->isStaticLocal()) {
- if (std::optional<unsigned> I = P.getOrCreateDummy(D))
- return this->emitGetPtrGlobal(*I, E);
- return false;
- }
// Visit local const variables like normal.
if (VD->isLocalVarDecl() && VD->getType().isConstQualified()) {
if (!this->visitVarDecl(VD))
@@ -3223,6 +3228,9 @@ bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
// Retry.
return this->VisitDeclRefExpr(E);
}
+
+ if (VD->hasExternalStorage())
+ return this->emitInvalidDeclRef(E, E);
}
} else {
if (const auto *VD = dyn_cast<VarDecl>(D);
@@ -3232,11 +3240,11 @@ bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
// Retry.
return this->VisitDeclRefExpr(E);
}
-
- if (std::optional<unsigned> I = P.getOrCreateDummy(D))
- return this->emitGetPtrGlobal(*I, E);
}
+ if (std::optional<unsigned> I = P.getOrCreateDummy(D))
+ return this->emitGetPtrGlobal(*I, E);
+
return this->emitInvalidDeclRef(E, E);
}
@@ -3414,14 +3422,15 @@ bool ByteCodeExprGen<Emitter>::emitRecordDestruction(const Record *R) {
// Now emit the destructor and recurse into base classes.
if (const CXXDestructorDecl *Dtor = R->getDestructor();
Dtor && !Dtor->isTrivial()) {
- if (const Function *DtorFunc = getFunction(Dtor)) {
- assert(DtorFunc->hasThisPointer());
- assert(DtorFunc->getNumParams() == 1);
- if (!this->emitDupPtr(SourceInfo{}))
- return false;
- if (!this->emitCall(DtorFunc, 0, SourceInfo{}))
- return false;
- }
+ const Function *DtorFunc = getFunction(Dtor);
+ if (!DtorFunc)
+ return false;
+ assert(DtorFunc->hasThisPointer());
+ assert(DtorFunc->getNumParams() == 1);
+ if (!this->emitDupPtr(SourceInfo{}))
+ return false;
+ if (!this->emitCall(DtorFunc, 0, SourceInfo{}))
+ return false;
}
for (const Record::Base &Base : llvm::reverse(R->bases())) {
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.h b/clang/lib/AST/Interp/ByteCodeExprGen.h
index 5b3b533dba38..acbbcc3dc961 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.h
@@ -332,7 +332,7 @@ public:
}
virtual void emitDestruction() {}
- virtual void emitDestructors() {}
+ virtual bool emitDestructors() { return true; }
VariableScope *getParent() const { return Parent; }
protected:
@@ -356,13 +356,18 @@ public:
}
/// Overriden to support explicit destruction.
- void emitDestruction() override {
+ void emitDestruction() override { destroyLocals(); }
+
+ /// Explicit destruction of local variables.
+ bool destroyLocals() {
if (!Idx)
- return;
- this->emitDestructors();
+ return true;
+
+ bool Success = this->emitDestructors();
this->Ctx->emitDestroy(*Idx, SourceInfo{});
removeStoredOpaqueValues();
this->Idx = std::nullopt;
+ return Success;
}
void addLocal(const Scope::Local &Local) override {
@@ -374,19 +379,25 @@ public:
this->Ctx->Descriptors[*Idx].emplace_back(Local);
}
- void emitDestructors() override {
+ bool emitDestructors() override {
if (!Idx)
- return;
+ return true;
// Emit destructor calls for local variables of record
// type with a destructor.
for (Scope::Local &Local : this->Ctx->Descriptors[*Idx]) {
if (!Local.Desc->isPrimitive() && !Local.Desc->isPrimitiveArray()) {
- this->Ctx->emitGetPtrLocal(Local.Offset, SourceInfo{});
- this->Ctx->emitDestruction(Local.Desc);
- this->Ctx->emitPopPtr(SourceInfo{});
+ if (!this->Ctx->emitGetPtrLocal(Local.Offset, SourceInfo{}))
+ return false;
+
+ if (!this->Ctx->emitDestruction(Local.Desc))
+ return false;
+
+ if (!this->Ctx->emitPopPtr(SourceInfo{}))
+ return false;
removeIfStoredOpaqueValue(Local);
}
}
+ return true;
}
void removeStoredOpaqueValues() {
diff --git a/clang/lib/AST/Interp/Context.cpp b/clang/lib/AST/Interp/Context.cpp
index b09019f3e65b..017095352dc2 100644
--- a/clang/lib/AST/Interp/Context.cpp
+++ b/clang/lib/AST/Interp/Context.cpp
@@ -41,8 +41,8 @@ bool Context::isPotentialConstantExpr(State &Parent, const FunctionDecl *FD) {
}
bool Context::evaluateAsRValue(State &Parent, const Expr *E, APValue &Result) {
- assert(Stk.empty());
- ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
+ bool Recursing = !Stk.empty();
+ ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk);
auto Res = C.interpretExpr(E, /*ConvertResultToRValue=*/E->isGLValue());
@@ -51,12 +51,14 @@ bool Context::evaluateAsRValue(State &Parent, const Expr *E, APValue &Result) {
return false;
}
- assert(Stk.empty());
+ if (!Recursing) {
+ assert(Stk.empty());
#ifndef NDEBUG
- // Make sure we don't rely on some value being still alive in
- // InterpStack memory.
- Stk.clear();
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
#endif
+ }
Result = Res.toAPValue();
@@ -64,8 +66,8 @@ bool Context::evaluateAsRValue(State &Parent, const Expr *E, APValue &Result) {
}
bool Context::evaluate(State &Parent, const Expr *E, APValue &Result) {
- assert(Stk.empty());
- ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
+ bool Recursing = !Stk.empty();
+ ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk);
auto Res = C.interpretExpr(E);
if (Res.isInvalid()) {
@@ -73,20 +75,23 @@ bool Context::evaluate(State &Parent, const Expr *E, APValue &Result) {
return false;
}
- assert(Stk.empty());
+ if (!Recursing) {
+ assert(Stk.empty());
#ifndef NDEBUG
- // Make sure we don't rely on some value being still alive in
- // InterpStack memory.
- Stk.clear();
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
#endif
+ }
+
Result = Res.toAPValue();
return true;
}
bool Context::evaluateAsInitializer(State &Parent, const VarDecl *VD,
APValue &Result) {
- assert(Stk.empty());
- ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
+ bool Recursing = !Stk.empty();
+ ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk);
bool CheckGlobalInitialized =
shouldBeGloballyIndexed(VD) &&
@@ -97,12 +102,14 @@ bool Context::evaluateAsInitializer(State &Parent, const VarDecl *VD,
return false;
}
- assert(Stk.empty());
+ if (!Recursing) {
+ assert(Stk.empty());
#ifndef NDEBUG
- // Make sure we don't rely on some value being still alive in
- // InterpStack memory.
- Stk.clear();
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
#endif
+ }
Result = Res.toAPValue();
return true;
@@ -217,6 +224,9 @@ const CXXMethodDecl *
Context::getOverridingFunction(const CXXRecordDecl *DynamicDecl,
const CXXRecordDecl *StaticDecl,
const CXXMethodDecl *InitialFunction) const {
+ assert(DynamicDecl);
+ assert(StaticDecl);
+ assert(InitialFunction);
const CXXRecordDecl *CurRecord = DynamicDecl;
const CXXMethodDecl *FoundFunction = InitialFunction;
diff --git a/clang/lib/AST/Interp/Context.h b/clang/lib/AST/Interp/Context.h
index c7620921e467..dbb63e369181 100644
--- a/clang/lib/AST/Interp/Context.h
+++ b/clang/lib/AST/Interp/Context.h
@@ -75,6 +75,7 @@ public:
/// Classifies an expression.
std::optional<PrimType> classify(const Expr *E) const {
+ assert(E);
if (E->isGLValue()) {
if (E->getType()->isFunctionType())
return PT_FnPtr;
diff --git a/clang/lib/AST/Interp/EvalEmitter.cpp b/clang/lib/AST/Interp/EvalEmitter.cpp
index 9cae25f5c4d6..bfd502d21b4c 100644
--- a/clang/lib/AST/Interp/EvalEmitter.cpp
+++ b/clang/lib/AST/Interp/EvalEmitter.cpp
@@ -18,7 +18,7 @@ using namespace clang;
using namespace clang::interp;
EvalEmitter::EvalEmitter(Context &Ctx, Program &P, State &Parent,
- InterpStack &Stk, APValue &Result)
+ InterpStack &Stk)
: Ctx(Ctx), P(P), S(Parent, P, Stk, Ctx, this), EvalResult(&Ctx) {
// Create a dummy frame for the interpreter which does not have locals.
S.Current =
@@ -38,8 +38,11 @@ EvaluationResult EvalEmitter::interpretExpr(const Expr *E,
this->ConvertResultToRValue = ConvertResultToRValue;
EvalResult.setSource(E);
- if (!this->visitExpr(E) && EvalResult.empty())
+ if (!this->visitExpr(E)) {
+ // EvalResult may already have a result set, but something failed
+ // after that (e.g. evaluating destructors).
EvalResult.setInvalid();
+ }
return std::move(this->EvalResult);
}
diff --git a/clang/lib/AST/Interp/EvalEmitter.h b/clang/lib/AST/Interp/EvalEmitter.h
index 032c8860ee67..116f1d6fc134 100644
--- a/clang/lib/AST/Interp/EvalEmitter.h
+++ b/clang/lib/AST/Interp/EvalEmitter.h
@@ -41,8 +41,7 @@ public:
InterpState &getState() { return S; }
protected:
- EvalEmitter(Context &Ctx, Program &P, State &Parent, InterpStack &Stk,
- APValue &Result);
+ EvalEmitter(Context &Ctx, Program &P, State &Parent, InterpStack &Stk);
virtual ~EvalEmitter();
@@ -74,7 +73,7 @@ protected:
/// Lambda captures.
llvm::DenseMap<const ValueDecl *, ParamOffset> LambdaCaptures;
/// Offset of the This parameter in a lambda record.
- unsigned LambdaThisCapture = 0;
+ ParamOffset LambdaThisCapture{0, false};
/// Local descriptors.
llvm::SmallVector<SmallVector<Local, 8>, 2> Descriptors;
diff --git a/clang/lib/AST/Interp/EvaluationResult.h b/clang/lib/AST/Interp/EvaluationResult.h
index 28e1ae6ba3e7..ecf2250074cc 100644
--- a/clang/lib/AST/Interp/EvaluationResult.h
+++ b/clang/lib/AST/Interp/EvaluationResult.h
@@ -72,7 +72,8 @@ private:
Kind = LValue;
}
void setInvalid() {
- assert(empty());
+ // We are NOT asserting empty() here, since setting it to invalid
+ // is allowed even if there is already a result.
Kind = Invalid;
}
void setValid() {
diff --git a/clang/lib/AST/Interp/Interp.cpp b/clang/lib/AST/Interp/Interp.cpp
index 5670888c245e..4f3cd6cd21a1 100644
--- a/clang/lib/AST/Interp/Interp.cpp
+++ b/clang/lib/AST/Interp/Interp.cpp
@@ -285,10 +285,6 @@ static bool CheckConstant(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return CheckConstant(S, OpPC, Ptr.getDeclDesc());
}
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- return !Ptr.isDummy();
-}
-
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK) {
if (!Ptr.isZero())
@@ -595,10 +591,8 @@ bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
return true;
}
-/// We aleady know the given DeclRefExpr is invalid for some reason,
-/// now figure out why and print appropriate diagnostics.
-bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
- const ValueDecl *D = DR->getDecl();
+static bool diagnoseUnknownDecl(InterpState &S, CodePtr OpPC,
+ const ValueDecl *D) {
const SourceInfo &E = S.Current->getSource(OpPC);
if (isa<ParmVarDecl>(D)) {
@@ -621,10 +615,28 @@ bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
return false;
}
}
-
return false;
}
+/// We aleady know the given DeclRefExpr is invalid for some reason,
+/// now figure out why and print appropriate diagnostics.
+bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
+ const ValueDecl *D = DR->getDecl();
+ return diagnoseUnknownDecl(S, OpPC, D);
+}
+
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
+ if (!Ptr.isDummy())
+ return true;
+
+ const Descriptor *Desc = Ptr.getDeclDesc();
+ const ValueDecl *D = Desc->asValueDecl();
+ if (!D)
+ return false;
+
+ return diagnoseUnknownDecl(S, OpPC, D);
+}
+
bool CheckNonNullArgs(InterpState &S, CodePtr OpPC, const Function *F,
const CallExpr *CE, unsigned ArgSize) {
auto Args = llvm::ArrayRef(CE->getArgs(), CE->getNumArgs());
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index db52f6649c18..84f65a33bef3 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -199,6 +199,8 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
llvm::ArrayRef<int64_t> ArrayIndices, int64_t &Result);
+inline bool Invalid(InterpState &S, CodePtr OpPC);
+
enum class ArithOp { Add, Sub };
//===----------------------------------------------------------------------===//
@@ -522,6 +524,11 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (Ptr.isDummy())
return false;
+ if constexpr (std::is_same_v<T, Boolean>) {
+ if (!S.getLangOpts().CPlusPlus14)
+ return Invalid(S, OpPC);
+ }
+
const T &Value = Ptr.deref<T>();
T Result;
@@ -572,7 +579,8 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Inc(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
return false;
@@ -585,7 +593,8 @@ bool Inc(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool IncPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
return false;
@@ -599,7 +608,8 @@ bool IncPop(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Dec(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
return false;
@@ -612,7 +622,8 @@ bool Dec(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool DecPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
return false;
@@ -641,7 +652,8 @@ bool IncDecFloatHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
inline bool Incf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
return false;
@@ -650,7 +662,8 @@ inline bool Incf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
inline bool IncfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
return false;
@@ -660,6 +673,9 @@ inline bool IncfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
inline bool Decf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (Ptr.isDummy())
+ return false;
+
if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
return false;
@@ -669,6 +685,8 @@ inline bool Decf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
inline bool DecfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
return false;
@@ -774,9 +792,9 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
// element in the same array are NOT equal. They have the same Base value,
// but a different Offset. This is a pretty rare case, so we fix this here
// by comparing pointers to the first elements.
- if (LHS.isArrayRoot())
+ if (!LHS.isDummy() && LHS.isArrayRoot())
VL = LHS.atIndex(0).getByteOffset();
- if (RHS.isArrayRoot())
+ if (!RHS.isDummy() && RHS.isArrayRoot())
VR = RHS.atIndex(0).getByteOffset();
S.Stk.push<BoolT>(BoolT::from(Fn(Compare(VL, VR))));
@@ -1680,7 +1698,7 @@ bool CastFloatingIntegral(InterpState &S, CodePtr OpPC) {
auto Status = F.convertToInteger(Result);
// Float-to-Integral overflow check.
- if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite()) {
+ if ((Status & APFloat::opStatus::opInvalidOp)) {
const Expr *E = S.Current->getExpr(OpPC);
QualType Type = E->getType();
@@ -1895,7 +1913,7 @@ inline bool ArrayElemPtr(InterpState &S, CodePtr OpPC) {
const T &Offset = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.peek<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr))
+ if (Ptr.isDummy())
return true;
if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
@@ -1909,7 +1927,7 @@ inline bool ArrayElemPtrPop(InterpState &S, CodePtr OpPC) {
const T &Offset = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr)) {
+ if (Ptr.isDummy()) {
S.Stk.push<Pointer>(Ptr);
return true;
}
@@ -1933,7 +1951,7 @@ inline bool ArrayElemPop(InterpState &S, CodePtr OpPC, uint32_t Index) {
inline bool ArrayDecay(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (Ptr.isDummy()) {
+ if (Ptr.isZero() || Ptr.isDummy()) {
S.Stk.push<Pointer>(Ptr);
return true;
}
@@ -2056,8 +2074,12 @@ inline bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
size_t ThisOffset = ArgSize - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
- const CXXRecordDecl *DynamicDecl =
- ThisPtr.getDeclDesc()->getType()->getAsCXXRecordDecl();
+ QualType DynamicType = ThisPtr.getDeclDesc()->getType();
+ const CXXRecordDecl *DynamicDecl;
+ if (DynamicType->isPointerType() || DynamicType->isReferenceType())
+ DynamicDecl = DynamicType->getPointeeCXXRecordDecl();
+ else
+ DynamicDecl = ThisPtr.getDeclDesc()->getType()->getAsCXXRecordDecl();
const auto *StaticDecl = cast<CXXRecordDecl>(Func->getParentDecl());
const auto *InitialFunction = cast<CXXMethodDecl>(Func->getDecl());
const CXXMethodDecl *Overrider = S.getContext().getOverridingFunction(
diff --git a/clang/lib/AST/Interp/Opcodes.td b/clang/lib/AST/Interp/Opcodes.td
index 3e3ba1b163e3..ffc54646f027 100644
--- a/clang/lib/AST/Interp/Opcodes.td
+++ b/clang/lib/AST/Interp/Opcodes.td
@@ -563,10 +563,10 @@ def Inv: Opcode {
}
// Increment and decrement.
-def Inc: IntegerOpcode;
-def IncPop : IntegerOpcode;
-def Dec: IntegerOpcode;
-def DecPop: IntegerOpcode;
+def Inc: AluOpcode;
+def IncPop : AluOpcode;
+def Dec: AluOpcode;
+def DecPop: AluOpcode;
// Float increment and decrement.
def Incf: FloatOpcode;
diff --git a/clang/lib/AST/Interp/Pointer.h b/clang/lib/AST/Interp/Pointer.h
index fa2e03d71190..34ecdb967960 100644
--- a/clang/lib/AST/Interp/Pointer.h
+++ b/clang/lib/AST/Interp/Pointer.h
@@ -215,7 +215,6 @@ public:
assert(Offset == PastEndMark && "cannot get base of a block");
return Pointer(Pointee, Base, 0);
}
- assert(Offset == Base && "not an inner field");
unsigned NewBase = Base - getInlineDesc()->Offset;
return Pointer(Pointee, NewBase, NewBase);
}
diff --git a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
index d487944ce921..fd7b06efcc78 100644
--- a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
+++ b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -361,8 +361,8 @@ getFieldsGlobalsAndFuncs(const Stmt &S, FieldSet &Fields,
if (const auto *FD = dyn_cast<FieldDecl>(VD))
Fields.insert(FD);
} else if (auto *InitList = dyn_cast<InitListExpr>(&S)) {
- if (RecordDecl *RD = InitList->getType()->getAsRecordDecl())
- for (const auto *FD : getFieldsForInitListExpr(RD))
+ if (InitList->getType()->isRecordType())
+ for (const auto *FD : getFieldsForInitListExpr(InitList))
Fields.insert(FD);
}
}
@@ -983,7 +983,7 @@ StorageLocation &Environment::createObjectInternal(const ValueDecl *D,
}
Value *Val = nullptr;
- if (InitExpr)
+ if (InitExpr) {
// In the (few) cases where an expression is intentionally
// "uninterpreted", `InitExpr` is not associated with a value. There are
// two ways to handle this situation: propagate the status, so that
@@ -998,6 +998,11 @@ StorageLocation &Environment::createObjectInternal(const ValueDecl *D,
// default value (assuming we don't update the environment API to return
// references).
Val = getValue(*InitExpr);
+
+ if (!Val && isa<ImplicitValueInitExpr>(InitExpr) &&
+ InitExpr->getType()->isPointerType())
+ Val = &getOrCreateNullPointerValue(InitExpr->getType()->getPointeeType());
+ }
if (!Val)
Val = createValue(Ty);
@@ -1104,12 +1109,22 @@ RecordStorageLocation *getBaseObjectLocation(const MemberExpr &ME,
return Env.get<RecordStorageLocation>(*Base);
}
-std::vector<FieldDecl *> getFieldsForInitListExpr(const RecordDecl *RD) {
+std::vector<const FieldDecl *>
+getFieldsForInitListExpr(const InitListExpr *InitList) {
+ const RecordDecl *RD = InitList->getType()->getAsRecordDecl();
+ assert(RD != nullptr);
+
+ std::vector<const FieldDecl *> Fields;
+
+ if (InitList->getType()->isUnionType()) {
+ Fields.push_back(InitList->getInitializedFieldInUnion());
+ return Fields;
+ }
+
// Unnamed bitfields are only used for padding and do not appear in
// `InitListExpr`'s inits. However, those fields do appear in `RecordDecl`'s
// field list, and we thus need to remove them before mapping inits to
// fields to avoid mapping inits to the wrongs fields.
- std::vector<FieldDecl *> Fields;
llvm::copy_if(
RD->fields(), std::back_inserter(Fields),
[](const FieldDecl *Field) { return !Field->isUnnamedBitfield(); });
diff --git a/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/clang/lib/Analysis/FlowSensitive/Transfer.cpp
index 089854264f48..04aa2831df05 100644
--- a/clang/lib/Analysis/FlowSensitive/Transfer.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Transfer.cpp
@@ -663,14 +663,7 @@ public:
void VisitInitListExpr(const InitListExpr *S) {
QualType Type = S->getType();
- if (Type->isUnionType()) {
- // FIXME: Initialize unions properly.
- if (auto *Val = Env.createValue(Type))
- Env.setValue(*S, *Val);
- return;
- }
-
- if (!Type->isStructureOrClassType()) {
+ if (!Type->isRecordType()) {
// Until array initialization is implemented, we skip arrays and don't
// need to care about cases where `getNumInits() > 1`.
if (!Type->isArrayType() && S->getNumInits() == 1)
@@ -688,14 +681,26 @@ public:
llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
// This only contains the direct fields for the given type.
- std::vector<FieldDecl *> FieldsForInit =
- getFieldsForInitListExpr(Type->getAsRecordDecl());
+ std::vector<const FieldDecl *> FieldsForInit = getFieldsForInitListExpr(S);
- // `S->inits()` contains all the initializer epressions, including the
+ // `S->inits()` contains all the initializer expressions, including the
// ones for direct base classes.
- auto Inits = S->inits();
+ ArrayRef<Expr *> Inits = S->inits();
size_t InitIdx = 0;
+ // Unions initialized with an empty initializer list need special treatment.
+ // For structs/classes initialized with an empty initializer list, Clang
+ // puts `ImplicitValueInitExpr`s in `InitListExpr::inits()`, but for unions,
+ // it doesn't do this -- so we create an `ImplicitValueInitExpr` ourselves.
+ std::optional<ImplicitValueInitExpr> ImplicitValueInitForUnion;
+ SmallVector<Expr *> InitsForUnion;
+ if (S->getType()->isUnionType() && Inits.empty()) {
+ assert(FieldsForInit.size() == 1);
+ ImplicitValueInitForUnion.emplace(FieldsForInit.front()->getType());
+ InitsForUnion.push_back(&*ImplicitValueInitForUnion);
+ Inits = InitsForUnion;
+ }
+
// Initialize base classes.
if (auto* R = S->getType()->getAsCXXRecordDecl()) {
assert(FieldsForInit.size() + R->getNumBases() == Inits.size());
@@ -731,6 +736,17 @@ public:
FieldLocs.insert({Field, &Loc});
}
+ // In the case of a union, we don't in general have initializers for all
+ // of the fields. Create storage locations for the remaining fields (but
+ // don't associate them with values).
+ if (Type->isUnionType()) {
+ for (const FieldDecl *Field :
+ Env.getDataflowAnalysisContext().getModeledFields(Type)) {
+ if (auto [it, inserted] = FieldLocs.insert({Field, nullptr}); inserted)
+ it->second = &Env.createStorageLocation(Field->getType());
+ }
+ }
+
// Check that we satisfy the invariant that a `RecordStorageLoation`
// contains exactly the set of modeled fields for that type.
// `ModeledFields` includes fields from all the bases, but only the
diff --git a/clang/lib/Analysis/UnsafeBufferUsage.cpp b/clang/lib/Analysis/UnsafeBufferUsage.cpp
index 701f1ac852c2..e1ff0d92f6b2 100644
--- a/clang/lib/Analysis/UnsafeBufferUsage.cpp
+++ b/clang/lib/Analysis/UnsafeBufferUsage.cpp
@@ -130,42 +130,42 @@ public:
bool TraverseGenericSelectionExpr(GenericSelectionExpr *Node) {
// These are unevaluated, except the result expression.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return TraverseStmt(Node->getResultExpr());
return VisitorBase::TraverseGenericSelectionExpr(Node);
}
bool TraverseUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node) {
// Unevaluated context.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return true;
return VisitorBase::TraverseUnaryExprOrTypeTraitExpr(Node);
}
bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node) {
// Unevaluated context.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return true;
return VisitorBase::TraverseTypeOfExprTypeLoc(Node);
}
bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node) {
// Unevaluated context.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return true;
return VisitorBase::TraverseDecltypeTypeLoc(Node);
}
bool TraverseCXXNoexceptExpr(CXXNoexceptExpr *Node) {
// Unevaluated context.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return true;
return VisitorBase::TraverseCXXNoexceptExpr(Node);
}
bool TraverseCXXTypeidExpr(CXXTypeidExpr *Node) {
// Unevaluated context.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return true;
return VisitorBase::TraverseCXXTypeidExpr(Node);
}
@@ -213,24 +213,26 @@ private:
// Because we're dealing with raw pointers, let's define what we mean by that.
static auto hasPointerType() {
- return hasType(hasCanonicalType(pointerType()));
+ return hasType(hasCanonicalType(pointerType()));
}
-static auto hasArrayType() {
- return hasType(hasCanonicalType(arrayType()));
-}
+static auto hasArrayType() { return hasType(hasCanonicalType(arrayType())); }
-AST_MATCHER_P(Stmt, forEachDescendantEvaluatedStmt, internal::Matcher<Stmt>, innerMatcher) {
+AST_MATCHER_P(Stmt, forEachDescendantEvaluatedStmt, internal::Matcher<Stmt>,
+ innerMatcher) {
const DynTypedMatcher &DTM = static_cast<DynTypedMatcher>(innerMatcher);
- MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All, true);
+ MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All,
+ true);
return Visitor.findMatch(DynTypedNode::create(Node));
}
-AST_MATCHER_P(Stmt, forEachDescendantStmt, internal::Matcher<Stmt>, innerMatcher) {
+AST_MATCHER_P(Stmt, forEachDescendantStmt, internal::Matcher<Stmt>,
+ innerMatcher) {
const DynTypedMatcher &DTM = static_cast<DynTypedMatcher>(innerMatcher);
- MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All, false);
+ MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All,
+ false);
return Visitor.findMatch(DynTypedNode::create(Node));
}
@@ -268,10 +270,9 @@ static auto isInUnspecifiedLvalueContext(internal::Matcher<Expr> innerMatcher) {
hasLHS(innerMatcher)
)
));
-// clang-format on
+ // clang-format on
}
-
// Returns a matcher that matches any expression `e` such that `InnerMatcher`
// matches `e` and `e` is in an Unspecified Pointer Context (UPC).
static internal::Matcher<Stmt>
@@ -315,7 +316,7 @@ isInUnspecifiedPointerContext(internal::Matcher<Stmt> InnerMatcher) {
// clang-format on
return stmt(anyOf(CallArgMatcher, CastOperandMatcher, CompOperandMatcher,
- PtrSubtractionMatcher));
+ PtrSubtractionMatcher));
// FIXME: any more cases? (UPC excludes the RHS of an assignment. For now we
// don't have to check that.)
}
@@ -481,7 +482,9 @@ public:
#ifndef NDEBUG
StringRef getDebugName() const {
switch (K) {
-#define GADGET(x) case Kind::x: return #x;
+#define GADGET(x) \
+ case Kind::x: \
+ return #x;
#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
}
llvm_unreachable("Unhandled Gadget::Kind enum");
@@ -502,7 +505,6 @@ private:
Kind K;
};
-
/// Warning gadgets correspond to unsafe code patterns that warrants
/// an immediate warning.
class WarningGadget : public Gadget {
@@ -513,10 +515,10 @@ public:
bool isWarningGadget() const final { return true; }
};
-/// Fixable gadgets correspond to code patterns that aren't always unsafe but need to be
-/// properly recognized in order to emit fixes. For example, if a raw pointer-type
-/// variable is replaced by a safe C++ container, every use of such variable must be
-/// carefully considered and possibly updated.
+/// Fixable gadgets correspond to code patterns that aren't always unsafe but
+/// need to be properly recognized in order to emit fixes. For example, if a raw
+/// pointer-type variable is replaced by a safe C++ container, every use of such
+/// variable must be carefully considered and possibly updated.
class FixableGadget : public Gadget {
public:
FixableGadget(Kind K) : Gadget(K) {}
@@ -531,20 +533,19 @@ public:
return std::nullopt;
}
- /// Returns a list of two elements where the first element is the LHS of a pointer assignment
- /// statement and the second element is the RHS. This two-element list represents the fact that
- /// the LHS buffer gets its bounds information from the RHS buffer. This information will be used
- /// later to group all those variables whose types must be modified together to prevent type
- /// mismatches.
+ /// Returns a list of two elements where the first element is the LHS of a
+ /// pointer assignment statement and the second element is the RHS. This
+ /// two-element list represents the fact that the LHS buffer gets its bounds
+ /// information from the RHS buffer. This information will be used later to
+ /// group all those variables whose types must be modified together to prevent
+ /// type mismatches.
virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
getStrategyImplications() const {
return std::nullopt;
}
};
-static auto toSupportedVariable() {
- return to(varDecl());
-}
+static auto toSupportedVariable() { return to(varDecl()); }
using FixableGadgetList = std::vector<std::unique_ptr<FixableGadget>>;
using WarningGadgetList = std::vector<std::unique_ptr<WarningGadget>>;
@@ -565,10 +566,10 @@ public:
}
static Matcher matcher() {
- return stmt(unaryOperator(
- hasOperatorName("++"),
- hasUnaryOperand(ignoringParenImpCasts(hasPointerType()))
- ).bind(OpTag));
+ return stmt(
+ unaryOperator(hasOperatorName("++"),
+ hasUnaryOperand(ignoringParenImpCasts(hasPointerType())))
+ .bind(OpTag));
}
const UnaryOperator *getBaseStmt() const override { return Op; }
@@ -600,10 +601,10 @@ public:
}
static Matcher matcher() {
- return stmt(unaryOperator(
- hasOperatorName("--"),
- hasUnaryOperand(ignoringParenImpCasts(hasPointerType()))
- ).bind(OpTag));
+ return stmt(
+ unaryOperator(hasOperatorName("--"),
+ hasUnaryOperand(ignoringParenImpCasts(hasPointerType())))
+ .bind(OpTag));
}
const UnaryOperator *getBaseStmt() const override { return Op; }
@@ -754,26 +755,25 @@ class PointerInitGadget : public FixableGadget {
private:
static constexpr const char *const PointerInitLHSTag = "ptrInitLHS";
static constexpr const char *const PointerInitRHSTag = "ptrInitRHS";
- const VarDecl * PtrInitLHS; // the LHS pointer expression in `PI`
- const DeclRefExpr * PtrInitRHS; // the RHS pointer expression in `PI`
+ const VarDecl *PtrInitLHS; // the LHS pointer expression in `PI`
+ const DeclRefExpr *PtrInitRHS; // the RHS pointer expression in `PI`
public:
PointerInitGadget(const MatchFinder::MatchResult &Result)
: FixableGadget(Kind::PointerInit),
- PtrInitLHS(Result.Nodes.getNodeAs<VarDecl>(PointerInitLHSTag)),
- PtrInitRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerInitRHSTag)) {}
+ PtrInitLHS(Result.Nodes.getNodeAs<VarDecl>(PointerInitLHSTag)),
+ PtrInitRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerInitRHSTag)) {}
static bool classof(const Gadget *G) {
return G->getKind() == Kind::PointerInit;
}
static Matcher matcher() {
- auto PtrInitStmt = declStmt(hasSingleDecl(varDecl(
- hasInitializer(ignoringImpCasts(declRefExpr(
- hasPointerType(),
- toSupportedVariable()).
- bind(PointerInitRHSTag)))).
- bind(PointerInitLHSTag)));
+ auto PtrInitStmt = declStmt(hasSingleDecl(
+ varDecl(hasInitializer(ignoringImpCasts(
+ declRefExpr(hasPointerType(), toSupportedVariable())
+ .bind(PointerInitRHSTag))))
+ .bind(PointerInitLHSTag)));
return stmt(PtrInitStmt);
}
@@ -793,8 +793,7 @@ public:
virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
getStrategyImplications() const override {
- return std::make_pair(PtrInitLHS,
- cast<VarDecl>(PtrInitRHS->getDecl()));
+ return std::make_pair(PtrInitLHS, cast<VarDecl>(PtrInitRHS->getDecl()));
}
};
@@ -807,8 +806,8 @@ class PtrToPtrAssignmentGadget : public FixableGadget {
private:
static constexpr const char *const PointerAssignLHSTag = "ptrLHS";
static constexpr const char *const PointerAssignRHSTag = "ptrRHS";
- const DeclRefExpr * PtrLHS; // the LHS pointer expression in `PA`
- const DeclRefExpr * PtrRHS; // the RHS pointer expression in `PA`
+ const DeclRefExpr *PtrLHS; // the LHS pointer expression in `PA`
+ const DeclRefExpr *PtrRHS; // the RHS pointer expression in `PA`
public:
PtrToPtrAssignmentGadget(const MatchFinder::MatchResult &Result)
@@ -821,13 +820,13 @@ public:
}
static Matcher matcher() {
- auto PtrAssignExpr = binaryOperator(allOf(hasOperatorName("="),
- hasRHS(ignoringParenImpCasts(declRefExpr(hasPointerType(),
- toSupportedVariable()).
- bind(PointerAssignRHSTag))),
- hasLHS(declRefExpr(hasPointerType(),
- toSupportedVariable()).
- bind(PointerAssignLHSTag))));
+ auto PtrAssignExpr = binaryOperator(
+ allOf(hasOperatorName("="),
+ hasRHS(ignoringParenImpCasts(
+ declRefExpr(hasPointerType(), toSupportedVariable())
+ .bind(PointerAssignRHSTag))),
+ hasLHS(declRefExpr(hasPointerType(), toSupportedVariable())
+ .bind(PointerAssignLHSTag))));
return stmt(isInUnspecifiedUntypedContext(PtrAssignExpr));
}
@@ -981,9 +980,8 @@ public:
static Matcher matcher() {
auto ArrayOrPtr = anyOf(hasPointerType(), hasArrayType());
- auto BaseIsArrayOrPtrDRE =
- hasBase(ignoringParenImpCasts(declRefExpr(ArrayOrPtr,
- toSupportedVariable())));
+ auto BaseIsArrayOrPtrDRE = hasBase(
+ ignoringParenImpCasts(declRefExpr(ArrayOrPtr, toSupportedVariable())));
auto Target =
arraySubscriptExpr(BaseIsArrayOrPtrDRE).bind(ULCArraySubscriptTag);
@@ -1025,9 +1023,9 @@ public:
static Matcher matcher() {
auto ArrayOrPtr = anyOf(hasPointerType(), hasArrayType());
- auto target = expr(
- ignoringParenImpCasts(declRefExpr(allOf(ArrayOrPtr,
- toSupportedVariable())).bind(DeclRefExprTag)));
+ auto target = expr(ignoringParenImpCasts(
+ declRefExpr(allOf(ArrayOrPtr, toSupportedVariable()))
+ .bind(DeclRefExprTag)));
return stmt(isInUnspecifiedPointerContext(target));
}
@@ -1036,9 +1034,7 @@ public:
virtual const Stmt *getBaseStmt() const override { return Node; }
- virtual DeclUseList getClaimedVarUseSites() const override {
- return {Node};
- }
+ virtual DeclUseList getClaimedVarUseSites() const override { return {Node}; }
};
class PointerDereferenceGadget : public FixableGadget {
@@ -1103,10 +1099,10 @@ public:
static Matcher matcher() {
return expr(isInUnspecifiedPointerContext(expr(ignoringImpCasts(
- unaryOperator(hasOperatorName("&"),
- hasUnaryOperand(arraySubscriptExpr(
- hasBase(ignoringParenImpCasts(declRefExpr(
- toSupportedVariable()))))))
+ unaryOperator(
+ hasOperatorName("&"),
+ hasUnaryOperand(arraySubscriptExpr(hasBase(
+ ignoringParenImpCasts(declRefExpr(toSupportedVariable()))))))
.bind(UPCAddressofArraySubscriptTag)))));
}
@@ -1195,13 +1191,13 @@ public:
class UPCPreIncrementGadget : public FixableGadget {
private:
static constexpr const char *const UPCPreIncrementTag =
- "PointerPreIncrementUnderUPC";
+ "PointerPreIncrementUnderUPC";
const UnaryOperator *Node; // the `++Ptr` node
public:
UPCPreIncrementGadget(const MatchFinder::MatchResult &Result)
- : FixableGadget(Kind::UPCPreIncrement),
- Node(Result.Nodes.getNodeAs<UnaryOperator>(UPCPreIncrementTag)) {
+ : FixableGadget(Kind::UPCPreIncrement),
+ Node(Result.Nodes.getNodeAs<UnaryOperator>(UPCPreIncrementTag)) {
assert(Node != nullptr && "Expecting a non-null matching result");
}
@@ -1215,10 +1211,9 @@ public:
// can have the matcher be general, so long as `getClaimedVarUseSites` does
// things right.
return stmt(isInUnspecifiedPointerContext(expr(ignoringImpCasts(
- unaryOperator(isPreInc(),
- hasUnaryOperand(declRefExpr(
- toSupportedVariable()))
- ).bind(UPCPreIncrementTag)))));
+ unaryOperator(isPreInc(),
+ hasUnaryOperand(declRefExpr(toSupportedVariable())))
+ .bind(UPCPreIncrementTag)))));
}
virtual std::optional<FixItList>
@@ -1782,9 +1777,9 @@ static SourceRange getSourceRangeToTokenEnd(const Decl *D,
const LangOptions &LangOpts) {
SourceLocation Begin = D->getBeginLoc();
SourceLocation
- End = // `D->getEndLoc` should always return the starting location of the
- // last token, so we should get the end of the token
- Lexer::getLocForEndOfToken(D->getEndLoc(), 0, SM, LangOpts);
+ End = // `D->getEndLoc` should always return the starting location of the
+ // last token, so we should get the end of the token
+ Lexer::getLocForEndOfToken(D->getEndLoc(), 0, SM, LangOpts);
return SourceRange(Begin, End);
}
@@ -1976,7 +1971,7 @@ PointerDereferenceGadget::getFixits(const FixitStrategy &S) const {
if (auto LocPastOperand =
getPastLoc(BaseDeclRefExpr, SM, Ctx.getLangOpts())) {
return FixItList{{FixItHint::CreateRemoval(derefRange),
- FixItHint::CreateInsertion(*LocPastOperand, "[0]")}};
+ FixItHint::CreateInsertion(*LocPastOperand, "[0]")}};
}
break;
}
@@ -2162,7 +2157,8 @@ FixVarInitializerWithSpan(const Expr *Init, ASTContext &Ctx,
// NULL pointer, we use the default constructor to initialize the span
// object, i.e., a `std:span` variable declaration with no initializer.
// So the fix-it is just to remove the initializer.
- if (Init->isNullPointerConstant(Ctx,
+ if (Init->isNullPointerConstant(
+ Ctx,
// FIXME: Why does this function not ask for `const ASTContext
// &`? It should. Maybe worth an NFC patch later.
Expr::NullPointerConstantValueDependence::
@@ -2230,8 +2226,10 @@ FixVarInitializerWithSpan(const Expr *Init, ASTContext &Ctx,
}
#ifndef NDEBUG
-#define DEBUG_NOTE_DECL_FAIL(D, Msg) \
-Handler.addDebugNoteForVar((D), (D)->getBeginLoc(), "failed to produce fixit for declaration '" + (D)->getNameAsString() + "'" + (Msg))
+#define DEBUG_NOTE_DECL_FAIL(D, Msg) \
+ Handler.addDebugNoteForVar((D), (D)->getBeginLoc(), \
+ "failed to produce fixit for declaration '" + \
+ (D)->getNameAsString() + "'" + (Msg))
#else
#define DEBUG_NOTE_DECL_FAIL(D, Msg)
#endif
@@ -2239,8 +2237,8 @@ Handler.addDebugNoteForVar((D), (D)->getBeginLoc(), "failed to produce fixit for
// For the given variable declaration with a pointer-to-T type, returns the text
// `std::span<T>`. If it is unable to generate the text, returns
// `std::nullopt`.
-static std::optional<std::string> createSpanTypeForVarDecl(const VarDecl *VD,
- const ASTContext &Ctx) {
+static std::optional<std::string>
+createSpanTypeForVarDecl(const VarDecl *VD, const ASTContext &Ctx) {
assert(VD->getType()->isPointerType());
std::optional<Qualifiers> PteTyQualifiers = std::nullopt;
@@ -2277,8 +2275,8 @@ static std::optional<std::string> createSpanTypeForVarDecl(const VarDecl *VD,
// the non-empty fix-it list, if fix-its are successfuly generated; empty
// list otherwise.
static FixItList fixLocalVarDeclWithSpan(const VarDecl *D, ASTContext &Ctx,
- const StringRef UserFillPlaceHolder,
- UnsafeBufferUsageHandler &Handler) {
+ const StringRef UserFillPlaceHolder,
+ UnsafeBufferUsageHandler &Handler) {
if (hasUnsupportedSpecifiers(D, Ctx.getSourceManager()))
return {};
@@ -2431,9 +2429,9 @@ createOverloadsForFixedParams(const FixitStrategy &S, const FunctionDecl *FD,
// print parameter name if provided:
if (IdentifierInfo *II = Parm->getIdentifier())
SS << ' ' << II->getName().str();
- } else if (auto ParmTypeText = getRangeText(
- getSourceRangeToTokenEnd(Parm, SM, LangOpts),
- SM, LangOpts)) {
+ } else if (auto ParmTypeText =
+ getRangeText(getSourceRangeToTokenEnd(Parm, SM, LangOpts),
+ SM, LangOpts)) {
// print the whole `Parm` without modification:
SS << ParmTypeText->str();
} else
@@ -2577,7 +2575,8 @@ static FixItList fixVariableWithSpan(const VarDecl *VD,
UnsafeBufferUsageHandler &Handler) {
const DeclStmt *DS = Tracker.lookupDecl(VD);
if (!DS) {
- DEBUG_NOTE_DECL_FAIL(VD, " : variables declared this way not implemented yet");
+ DEBUG_NOTE_DECL_FAIL(VD,
+ " : variables declared this way not implemented yet");
return {};
}
if (!DS->isSingleDecl()) {
@@ -2979,8 +2978,8 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
#endif
assert(D && D->getBody());
- // We do not want to visit a Lambda expression defined inside a method independently.
- // Instead, it should be visited along with the outer method.
+ // We do not want to visit a Lambda expression defined inside a method
+ // independently. Instead, it should be visited along with the outer method.
// FIXME: do we want to do the same thing for `BlockDecl`s?
if (const auto *fd = dyn_cast<CXXMethodDecl>(D)) {
if (fd->getParent()->isLambda() && fd->getParent()->isLocalClass())
@@ -2990,7 +2989,7 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
// Do not emit fixit suggestions for functions declared in an
// extern "C" block.
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
- for (FunctionDecl *FReDecl : FD->redecls()) {
+ for (FunctionDecl *FReDecl : FD->redecls()) {
if (FReDecl->isExternC()) {
EmitSuggestions = false;
break;
@@ -3002,7 +3001,7 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
FixableGadgetSets FixablesForAllVars;
auto [FixableGadgets, WarningGadgets, Tracker] =
- findGadgets(D, Handler, EmitSuggestions);
+ findGadgets(D, Handler, EmitSuggestions);
if (!EmitSuggestions) {
// Our job is very easy without suggestions. Just warn about
@@ -3055,36 +3054,36 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
// Filter out non-local vars and vars with unclaimed DeclRefExpr-s.
for (auto it = FixablesForAllVars.byVar.cbegin();
it != FixablesForAllVars.byVar.cend();) {
- // FIXME: need to deal with global variables later
- if ((!it->first->isLocalVarDecl() && !isa<ParmVarDecl>(it->first))) {
+ // FIXME: need to deal with global variables later
+ if ((!it->first->isLocalVarDecl() && !isa<ParmVarDecl>(it->first))) {
#ifndef NDEBUG
- Handler.addDebugNoteForVar(
- it->first, it->first->getBeginLoc(),
- ("failed to produce fixit for '" + it->first->getNameAsString() +
- "' : neither local nor a parameter"));
+ Handler.addDebugNoteForVar(it->first, it->first->getBeginLoc(),
+ ("failed to produce fixit for '" +
+ it->first->getNameAsString() +
+ "' : neither local nor a parameter"));
#endif
- it = FixablesForAllVars.byVar.erase(it);
- } else if (it->first->getType().getCanonicalType()->isReferenceType()) {
+ it = FixablesForAllVars.byVar.erase(it);
+ } else if (it->first->getType().getCanonicalType()->isReferenceType()) {
#ifndef NDEBUG
- Handler.addDebugNoteForVar(it->first, it->first->getBeginLoc(),
- ("failed to produce fixit for '" +
- it->first->getNameAsString() +
- "' : has a reference type"));
+ Handler.addDebugNoteForVar(it->first, it->first->getBeginLoc(),
+ ("failed to produce fixit for '" +
+ it->first->getNameAsString() +
+ "' : has a reference type"));
#endif
- it = FixablesForAllVars.byVar.erase(it);
- } else if (Tracker.hasUnclaimedUses(it->first)) {
- it = FixablesForAllVars.byVar.erase(it);
- } else if (it->first->isInitCapture()) {
+ it = FixablesForAllVars.byVar.erase(it);
+ } else if (Tracker.hasUnclaimedUses(it->first)) {
+ it = FixablesForAllVars.byVar.erase(it);
+ } else if (it->first->isInitCapture()) {
#ifndef NDEBUG
- Handler.addDebugNoteForVar(
- it->first, it->first->getBeginLoc(),
- ("failed to produce fixit for '" + it->first->getNameAsString() +
- "' : init capture"));
+ Handler.addDebugNoteForVar(it->first, it->first->getBeginLoc(),
+ ("failed to produce fixit for '" +
+ it->first->getNameAsString() +
+ "' : init capture"));
#endif
- it = FixablesForAllVars.byVar.erase(it);
- } else {
- ++it;
- }
+ it = FixablesForAllVars.byVar.erase(it);
+ } else {
+ ++it;
+ }
}
#ifndef NDEBUG
@@ -3115,7 +3114,7 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
for (auto it : FixablesForAllVars.byVar) {
for (const FixableGadget *fixable : it.second) {
std::optional<std::pair<const VarDecl *, const VarDecl *>> ImplPair =
- fixable->getStrategyImplications();
+ fixable->getStrategyImplications();
if (ImplPair) {
std::pair<const VarDecl *, const VarDecl *> Impl = std::move(*ImplPair);
PtrAssignmentGraph[Impl.first].insert(Impl.second);
@@ -3144,10 +3143,10 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
for (const auto &[Var, ignore] : UnsafeOps.byVar) {
if (VisitedVarsDirected.find(Var) == VisitedVarsDirected.end()) {
- std::queue<const VarDecl*> QueueDirected{};
+ std::queue<const VarDecl *> QueueDirected{};
QueueDirected.push(Var);
- while(!QueueDirected.empty()) {
- const VarDecl* CurrentVar = QueueDirected.front();
+ while (!QueueDirected.empty()) {
+ const VarDecl *CurrentVar = QueueDirected.front();
QueueDirected.pop();
VisitedVarsDirected.insert(CurrentVar);
auto AdjacentNodes = PtrAssignmentGraph[CurrentVar];
@@ -3178,11 +3177,11 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
for (const auto &[Var, ignore] : UnsafeOps.byVar) {
if (VisitedVars.find(Var) == VisitedVars.end()) {
VarGrpTy &VarGroup = Groups.emplace_back();
- std::queue<const VarDecl*> Queue{};
+ std::queue<const VarDecl *> Queue{};
Queue.push(Var);
- while(!Queue.empty()) {
- const VarDecl* CurrentVar = Queue.front();
+ while (!Queue.empty()) {
+ const VarDecl *CurrentVar = Queue.front();
Queue.pop();
VisitedVars.insert(CurrentVar);
VarGroup.push_back(CurrentVar);
diff --git a/clang/lib/CodeGen/ABIInfo.cpp b/clang/lib/CodeGen/ABIInfo.cpp
index 1b56cf7c596d..efcff958ce54 100644
--- a/clang/lib/CodeGen/ABIInfo.cpp
+++ b/clang/lib/CodeGen/ABIInfo.cpp
@@ -184,6 +184,58 @@ ABIArgInfo ABIInfo::getNaturalAlignIndirectInReg(QualType Ty,
/*ByVal*/ false, Realign);
}
+void ABIInfo::appendAttributeMangling(TargetAttr *Attr,
+ raw_ostream &Out) const {
+ if (Attr->isDefaultVersion())
+ return;
+ appendAttributeMangling(Attr->getFeaturesStr(), Out);
+}
+
+void ABIInfo::appendAttributeMangling(TargetVersionAttr *Attr,
+ raw_ostream &Out) const {
+ appendAttributeMangling(Attr->getNamesStr(), Out);
+}
+
+void ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
+ raw_ostream &Out) const {
+ appendAttributeMangling(Attr->getFeatureStr(Index), Out);
+ Out << '.' << Attr->getMangledIndex(Index);
+}
+
+void ABIInfo::appendAttributeMangling(StringRef AttrStr,
+ raw_ostream &Out) const {
+ if (AttrStr == "default") {
+ Out << ".default";
+ return;
+ }
+
+ Out << '.';
+ const TargetInfo &TI = CGT.getTarget();
+ ParsedTargetAttr Info = TI.parseTargetAttr(AttrStr);
+
+ llvm::sort(Info.Features, [&TI](StringRef LHS, StringRef RHS) {
+ // Multiversioning doesn't allow "no-${feature}", so we can
+ // only have "+" prefixes here.
+ assert(LHS.starts_with("+") && RHS.starts_with("+") &&
+ "Features should always have a prefix.");
+ return TI.multiVersionSortPriority(LHS.substr(1)) >
+ TI.multiVersionSortPriority(RHS.substr(1));
+ });
+
+ bool IsFirst = true;
+ if (!Info.CPU.empty()) {
+ IsFirst = false;
+ Out << "arch_" << Info.CPU;
+ }
+
+ for (StringRef Feat : Info.Features) {
+ if (!IsFirst)
+ Out << '_';
+ IsFirst = false;
+ Out << Feat.substr(1);
+ }
+}
+
// Pin the vtable to this file.
SwiftABIInfo::~SwiftABIInfo() = default;
diff --git a/clang/lib/CodeGen/ABIInfo.h b/clang/lib/CodeGen/ABIInfo.h
index b9a5ef6e4366..ff4ae44a42c3 100644
--- a/clang/lib/CodeGen/ABIInfo.h
+++ b/clang/lib/CodeGen/ABIInfo.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
#define LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
+#include "clang/AST/Attr.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Type.h"
#include "llvm/IR/CallingConv.h"
@@ -111,6 +112,15 @@ public:
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty,
bool Realign = false) const;
+
+ virtual void appendAttributeMangling(TargetAttr *Attr,
+ raw_ostream &Out) const;
+ virtual void appendAttributeMangling(TargetVersionAttr *Attr,
+ raw_ostream &Out) const;
+ virtual void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
+ raw_ostream &Out) const;
+ virtual void appendAttributeMangling(StringRef AttrStr,
+ raw_ostream &Out) const;
};
/// Target specific hooks for defining how a type should be passed or returned
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index a31082524023..056f790d4185 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -186,6 +186,14 @@ class EmitAssemblyHelper {
TargetTriple.getVendor() != llvm::Triple::Apple;
}
+ /// Check whether we should emit a flag for UnifiedLTO.
+ /// The UnifiedLTO module flag should be set when UnifiedLTO is enabled for
+ /// ThinLTO or Full LTO with module summaries.
+ bool shouldEmitUnifiedLTOModueFlag() const {
+ return CodeGenOpts.UnifiedLTO &&
+ (CodeGenOpts.PrepareForThinLTO || shouldEmitRegularLTOSummary());
+ }
+
public:
EmitAssemblyHelper(DiagnosticsEngine &_Diags,
const HeaderSearchOptions &HeaderSearchOpts,
@@ -1036,7 +1044,8 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!actionRequiresCodeGen(Action) && CodeGenOpts.VerifyModule)
MPM.addPass(VerifierPass());
- if (Action == Backend_EmitBC || Action == Backend_EmitLL) {
+ if (Action == Backend_EmitBC || Action == Backend_EmitLL ||
+ CodeGenOpts.FatLTO) {
if (CodeGenOpts.PrepareForThinLTO && !CodeGenOpts.DisableLLVMPasses) {
if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
TheModule->addModuleFlag(llvm::Module::Error, "EnableSplitLTOUnit",
@@ -1047,11 +1056,9 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!ThinLinkOS)
return;
}
- if (CodeGenOpts.UnifiedLTO)
- TheModule->addModuleFlag(llvm::Module::Error, "UnifiedLTO", uint32_t(1));
MPM.addPass(ThinLTOBitcodeWriterPass(
*OS, ThinLinkOS ? &ThinLinkOS->os() : nullptr));
- } else {
+ } else if (Action == Backend_EmitLL) {
MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists,
/*EmitLTOSummary=*/true));
}
@@ -1065,24 +1072,17 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
TheModule->addModuleFlag(llvm::Module::Error, "EnableSplitLTOUnit",
uint32_t(1));
- if (CodeGenOpts.UnifiedLTO)
- TheModule->addModuleFlag(llvm::Module::Error, "UnifiedLTO", uint32_t(1));
}
- if (Action == Backend_EmitBC)
+ if (Action == Backend_EmitBC) {
MPM.addPass(BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists,
EmitLTOSummary));
- else
+ } else if (Action == Backend_EmitLL) {
MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists,
EmitLTOSummary));
+ }
}
- }
- if (CodeGenOpts.FatLTO) {
- // Set the EnableSplitLTOUnit and UnifiedLTO module flags, since FatLTO
- // uses a different action than Backend_EmitBC or Backend_EmitLL.
- if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
- TheModule->addModuleFlag(llvm::Module::Error, "EnableSplitLTOUnit",
- uint32_t(CodeGenOpts.EnableSplitLTOUnit));
- if (CodeGenOpts.UnifiedLTO && !TheModule->getModuleFlag("UnifiedLTO"))
+
+ if (shouldEmitUnifiedLTOModueFlag())
TheModule->addModuleFlag(llvm::Module::Error, "UnifiedLTO", uint32_t(1));
}
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 2d16e7cdc060..e90014261217 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -13952,6 +13952,8 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
+ if (!getContext().getTargetInfo().validateCpuSupports(FeatureStr))
+ return Builder.getFalse();
return EmitX86CpuSupports(FeatureStr);
}
@@ -14041,6 +14043,8 @@ Value *CodeGenFunction::EmitAArch64CpuSupports(const CallExpr *E) {
ArgStr.split(Features, "+");
for (auto &Feature : Features) {
Feature = Feature.trim();
+ if (!llvm::AArch64::parseArchExtension(Feature))
+ return Builder.getFalse();
if (Feature != "default")
Features.push_back(Feature);
}
@@ -16639,7 +16643,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
.Case(Name, {FA_WORD, Bitmask})
#include "llvm/TargetParser/PPCTargetParser.def"
.Default({0, 0});
- assert(BitMask && "Invalid target feature string. Missed by SemaChecking?");
+ if (!BitMask)
+ return Builder.getFalse();
Value *Op0 = llvm::ConstantInt::get(Int32Ty, FeatureWord);
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_fixed_addr_ld);
Value *TheCall = Builder.CreateCall(F, {Op0}, "cpu_supports");
@@ -18007,6 +18012,51 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
/*ReturnType*/ T0->getScalarType(), Intrinsic::dx_dot,
ArrayRef<Value *>{Op0, Op1}, nullptr, "dx.dot");
} break;
+ case Builtin::BI__builtin_hlsl_lerp: {
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Y = EmitScalarExpr(E->getArg(1));
+ Value *S = EmitScalarExpr(E->getArg(2));
+ llvm::Type *Xty = X->getType();
+ llvm::Type *Yty = Y->getType();
+ llvm::Type *Sty = S->getType();
+ if (!Xty->isVectorTy() && !Yty->isVectorTy() && !Sty->isVectorTy()) {
+ if (Xty->isFloatingPointTy()) {
+ auto V = Builder.CreateFSub(Y, X);
+ V = Builder.CreateFMul(S, V);
+ return Builder.CreateFAdd(X, V, "dx.lerp");
+ }
+ llvm_unreachable("Scalar Lerp is only supported on floats.");
+ }
+ // A VectorSplat should have happened
+ assert(Xty->isVectorTy() && Yty->isVectorTy() && Sty->isVectorTy() &&
+ "Lerp of vector and scalar is not supported.");
+
+ [[maybe_unused]] auto *XVecTy =
+ E->getArg(0)->getType()->getAs<VectorType>();
+ [[maybe_unused]] auto *YVecTy =
+ E->getArg(1)->getType()->getAs<VectorType>();
+ [[maybe_unused]] auto *SVecTy =
+ E->getArg(2)->getType()->getAs<VectorType>();
+ // A HLSLVectorTruncation should have happend
+ assert(XVecTy->getNumElements() == YVecTy->getNumElements() &&
+ XVecTy->getNumElements() == SVecTy->getNumElements() &&
+ "Lerp requires vectors to be of the same size.");
+ assert(XVecTy->getElementType()->isRealFloatingType() &&
+ XVecTy->getElementType() == YVecTy->getElementType() &&
+ XVecTy->getElementType() == SVecTy->getElementType() &&
+ "Lerp requires float vectors to be of the same type.");
+ return Builder.CreateIntrinsic(
+ /*ReturnType*/ Xty, Intrinsic::dx_lerp, ArrayRef<Value *>{X, Y, S},
+ nullptr, "dx.lerp");
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_frac: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ if (!E->getArg(0)->getType()->hasFloatingRepresentation())
+ llvm_unreachable("frac operand must have a float representation");
+ return Builder.CreateIntrinsic(
+ /*ReturnType*/ Op0->getType(), Intrinsic::dx_frac,
+ ArrayRef<Value *>{Op0}, nullptr, "dx.frac");
+ }
}
return nullptr;
}
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index d05cf1c6e181..13f68237b464 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -3221,12 +3221,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
- llvm::TypeSize StructSize;
- llvm::TypeSize PtrElementSize;
if (ArgI.isDirect() && !ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
- StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
- PtrElementSize =
+ [[maybe_unused]] llvm::TypeSize StructSize =
+ CGM.getDataLayout().getTypeAllocSize(STy);
+ [[maybe_unused]] llvm::TypeSize PtrElementSize =
CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(Ty));
if (STy->containsHomogeneousScalableVectorTypes()) {
assert(StructSize == PtrElementSize &&
@@ -5310,12 +5309,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
- llvm::Type *SrcTy = ConvertTypeForMem(I->Ty);
- llvm::TypeSize SrcTypeSize;
- llvm::TypeSize DstTypeSize;
if (STy && ArgInfo.isDirect() && !ArgInfo.getCanBeFlattened()) {
- SrcTypeSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
- DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy);
+ llvm::Type *SrcTy = ConvertTypeForMem(I->Ty);
+ [[maybe_unused]] llvm::TypeSize SrcTypeSize =
+ CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ [[maybe_unused]] llvm::TypeSize DstTypeSize =
+ CGM.getDataLayout().getTypeAllocSize(STy);
if (STy->containsHomogeneousScalableVectorTypes()) {
assert(SrcTypeSize == DstTypeSize &&
"Only allow non-fractional movement of structure with "
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 8fd74697de3c..3fbd2e03eb61 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -7023,31 +7023,47 @@ void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
S.getSingleClause<OMPUseClause>())) &&
"OMPNowaitClause clause is used separately in OMPInteropDirective.");
- if (const auto *C = S.getSingleClause<OMPInitClause>()) {
- llvm::Value *InteropvarPtr =
- EmitLValue(C->getInteropVar()).getPointer(*this);
- llvm::omp::OMPInteropType InteropType = llvm::omp::OMPInteropType::Unknown;
- if (C->getIsTarget()) {
- InteropType = llvm::omp::OMPInteropType::Target;
- } else {
- assert(C->getIsTargetSync() && "Expected interop-type target/targetsync");
- InteropType = llvm::omp::OMPInteropType::TargetSync;
+ auto ItOMPInitClause = S.getClausesOfKind<OMPInitClause>();
+ if (!ItOMPInitClause.empty()) {
+ // Look at the multiple init clauses
+ for (const OMPInitClause *C : ItOMPInitClause) {
+ llvm::Value *InteropvarPtr =
+ EmitLValue(C->getInteropVar()).getPointer(*this);
+ llvm::omp::OMPInteropType InteropType =
+ llvm::omp::OMPInteropType::Unknown;
+ if (C->getIsTarget()) {
+ InteropType = llvm::omp::OMPInteropType::Target;
+ } else {
+ assert(C->getIsTargetSync() &&
+ "Expected interop-type target/targetsync");
+ InteropType = llvm::omp::OMPInteropType::TargetSync;
+ }
+ OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType,
+ Device, NumDependences, DependenceList,
+ Data.HasNowaitClause);
+ }
+ }
+ auto ItOMPDestroyClause = S.getClausesOfKind<OMPDestroyClause>();
+ if (!ItOMPDestroyClause.empty()) {
+ // Look at the multiple destroy clauses
+ for (const OMPDestroyClause *C : ItOMPDestroyClause) {
+ llvm::Value *InteropvarPtr =
+ EmitLValue(C->getInteropVar()).getPointer(*this);
+ OMPBuilder.createOMPInteropDestroy(Builder, InteropvarPtr, Device,
+ NumDependences, DependenceList,
+ Data.HasNowaitClause);
+ }
+ }
+ auto ItOMPUseClause = S.getClausesOfKind<OMPUseClause>();
+ if (!ItOMPUseClause.empty()) {
+ // Look at the multiple use clauses
+ for (const OMPUseClause *C : ItOMPUseClause) {
+ llvm::Value *InteropvarPtr =
+ EmitLValue(C->getInteropVar()).getPointer(*this);
+ OMPBuilder.createOMPInteropUse(Builder, InteropvarPtr, Device,
+ NumDependences, DependenceList,
+ Data.HasNowaitClause);
}
- OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType, Device,
- NumDependences, DependenceList,
- Data.HasNowaitClause);
- } else if (const auto *C = S.getSingleClause<OMPDestroyClause>()) {
- llvm::Value *InteropvarPtr =
- EmitLValue(C->getInteropVar()).getPointer(*this);
- OMPBuilder.createOMPInteropDestroy(Builder, InteropvarPtr, Device,
- NumDependences, DependenceList,
- Data.HasNowaitClause);
- } else if (const auto *C = S.getSingleClause<OMPUseClause>()) {
- llvm::Value *InteropvarPtr =
- EmitLValue(C->getInteropVar()).getPointer(*this);
- OMPBuilder.createOMPInteropUse(Builder, InteropvarPtr, Device,
- NumDependences, DependenceList,
- Data.HasNowaitClause);
}
}
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index d16d12fac8b0..82a97ecfaa00 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -1727,59 +1727,6 @@ static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
Out << ".resolver";
}
-static void AppendTargetVersionMangling(const CodeGenModule &CGM,
- const TargetVersionAttr *Attr,
- raw_ostream &Out) {
- if (Attr->isDefaultVersion()) {
- Out << ".default";
- return;
- }
- Out << "._";
- const TargetInfo &TI = CGM.getTarget();
- llvm::SmallVector<StringRef, 8> Feats;
- Attr->getFeatures(Feats);
- llvm::stable_sort(Feats, [&TI](const StringRef FeatL, const StringRef FeatR) {
- return TI.multiVersionSortPriority(FeatL) <
- TI.multiVersionSortPriority(FeatR);
- });
- for (const auto &Feat : Feats) {
- Out << 'M';
- Out << Feat;
- }
-}
-
-static void AppendTargetMangling(const CodeGenModule &CGM,
- const TargetAttr *Attr, raw_ostream &Out) {
- if (Attr->isDefaultVersion())
- return;
-
- Out << '.';
- const TargetInfo &Target = CGM.getTarget();
- ParsedTargetAttr Info = Target.parseTargetAttr(Attr->getFeaturesStr());
- llvm::sort(Info.Features, [&Target](StringRef LHS, StringRef RHS) {
- // Multiversioning doesn't allow "no-${feature}", so we can
- // only have "+" prefixes here.
- assert(LHS.starts_with("+") && RHS.starts_with("+") &&
- "Features should always have a prefix.");
- return Target.multiVersionSortPriority(LHS.substr(1)) >
- Target.multiVersionSortPriority(RHS.substr(1));
- });
-
- bool IsFirst = true;
-
- if (!Info.CPU.empty()) {
- IsFirst = false;
- Out << "arch_" << Info.CPU;
- }
-
- for (StringRef Feat : Info.Features) {
- if (!IsFirst)
- Out << '_';
- IsFirst = false;
- Out << Feat.substr(1);
- }
-}
-
// Returns true if GD is a function decl with internal linkage and
// needs a unique suffix after the mangled name.
static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
@@ -1789,41 +1736,6 @@ static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
(CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
}
-static void AppendTargetClonesMangling(const CodeGenModule &CGM,
- const TargetClonesAttr *Attr,
- unsigned VersionIndex,
- raw_ostream &Out) {
- const TargetInfo &TI = CGM.getTarget();
- if (TI.getTriple().isAArch64()) {
- StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
- if (FeatureStr == "default") {
- Out << ".default";
- return;
- }
- Out << "._";
- SmallVector<StringRef, 8> Features;
- FeatureStr.split(Features, "+");
- llvm::stable_sort(Features,
- [&TI](const StringRef FeatL, const StringRef FeatR) {
- return TI.multiVersionSortPriority(FeatL) <
- TI.multiVersionSortPriority(FeatR);
- });
- for (auto &Feat : Features) {
- Out << 'M';
- Out << Feat;
- }
- } else {
- Out << '.';
- StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
- if (FeatureStr.starts_with("arch="))
- Out << "arch_" << FeatureStr.substr(sizeof("arch=") - 1);
- else
- Out << FeatureStr;
-
- Out << '.' << Attr->getMangledIndex(VersionIndex);
- }
-}
-
static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
const NamedDecl *ND,
bool OmitMultiVersionMangling = false) {
@@ -1877,16 +1789,25 @@ static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
FD->getAttr<CPUSpecificAttr>(),
GD.getMultiVersionIndex(), Out);
break;
- case MultiVersionKind::Target:
- AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
+ case MultiVersionKind::Target: {
+ auto *Attr = FD->getAttr<TargetAttr>();
+ const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
+ Info.appendAttributeMangling(Attr, Out);
break;
- case MultiVersionKind::TargetVersion:
- AppendTargetVersionMangling(CGM, FD->getAttr<TargetVersionAttr>(), Out);
+ }
+ case MultiVersionKind::TargetVersion: {
+ auto *Attr = FD->getAttr<TargetVersionAttr>();
+ const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
+ Info.appendAttributeMangling(Attr, Out);
break;
- case MultiVersionKind::TargetClones:
- AppendTargetClonesMangling(CGM, FD->getAttr<TargetClonesAttr>(),
- GD.getMultiVersionIndex(), Out);
+ }
+ case MultiVersionKind::TargetClones: {
+ auto *Attr = FD->getAttr<TargetClonesAttr>();
+ unsigned Index = GD.getMultiVersionIndex();
+ const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
+ Info.appendAttributeMangling(Attr, Index, Out);
break;
+ }
case MultiVersionKind::None:
llvm_unreachable("None multiversion type isn't valid here");
}
diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp
index 94f8e7be2ee6..725e8a70fddf 100644
--- a/clang/lib/CodeGen/Targets/AArch64.cpp
+++ b/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -9,6 +9,7 @@
#include "ABIInfoImpl.h"
#include "TargetInfo.h"
#include "clang/Basic/DiagnosticFrontend.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
using namespace clang;
using namespace clang::CodeGen;
@@ -75,6 +76,12 @@ private:
bool allowBFloatArgsAndRet() const override {
return getTarget().hasBFloat16Type();
}
+
+ using ABIInfo::appendAttributeMangling;
+ void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
+ raw_ostream &Out) const override;
+ void appendAttributeMangling(StringRef AttrStr,
+ raw_ostream &Out) const override;
};
class AArch64SwiftABIInfo : public SwiftABIInfo {
@@ -125,8 +132,7 @@ public:
assert(Error.empty());
auto *Fn = cast<llvm::Function>(GV);
- static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
- Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
+ Fn->addFnAttr("sign-return-address", BPI.getSignReturnAddrStr());
if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
Fn->addFnAttr("sign-return-address-key",
@@ -857,6 +863,34 @@ void AArch64TargetCodeGenInfo::checkFunctionCallABI(
<< Callee->getDeclName();
}
+void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
+ unsigned Index,
+ raw_ostream &Out) const {
+ appendAttributeMangling(Attr->getFeatureStr(Index), Out);
+}
+
+void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
+ raw_ostream &Out) const {
+ if (AttrStr == "default") {
+ Out << ".default";
+ return;
+ }
+
+ Out << "._";
+ SmallVector<StringRef, 8> Features;
+ AttrStr.split(Features, "+");
+ for (auto &Feat : Features)
+ Feat = Feat.trim();
+
+ llvm::sort(Features, [](const StringRef LHS, const StringRef RHS) {
+ return LHS.compare(RHS) < 0;
+ });
+
+ for (auto &Feat : Features)
+ if (auto Ext = llvm::AArch64::parseArchExtension(Feat))
+ Out << 'M' << Ext->Name;
+}
+
std::unique_ptr<TargetCodeGenInfo>
CodeGen::createAArch64TargetCodeGenInfo(CodeGenModule &CGM,
AArch64ABIKind Kind) {
diff --git a/clang/lib/CodeGen/Targets/ARM.cpp b/clang/lib/CodeGen/Targets/ARM.cpp
index d7d175ff1724..5d42e6286e52 100644
--- a/clang/lib/CodeGen/Targets/ARM.cpp
+++ b/clang/lib/CodeGen/Targets/ARM.cpp
@@ -152,13 +152,7 @@ public:
diag::warn_target_unsupported_branch_protection_attribute)
<< Arch;
} else {
- static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
- assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 &&
- "Unexpected SignReturnAddressScopeKind");
- Fn->addFnAttr(
- "sign-return-address",
- SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
-
+ Fn->addFnAttr("sign-return-address", BPI.getSignReturnAddrStr());
Fn->addFnAttr("branch-target-enforcement",
BPI.BranchTargetEnforcement ? "true" : "false");
}
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 0cf96bb5c9cb..aa3b80cb16e5 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -165,11 +165,14 @@ getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune,
// Handle CPU name is 'native'.
if (MtuneLowerCase == "native")
MtuneLowerCase = std::string(llvm::sys::getHostCPUName());
+
+ // 'cyclone' and later have zero-cycle register moves and zeroing.
if (MtuneLowerCase == "cyclone" ||
StringRef(MtuneLowerCase).starts_with("apple")) {
Features.push_back("+zcm");
Features.push_back("+zcz");
}
+
return true;
}
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index dbfc729bba24..66c3a237c121 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -4479,10 +4479,9 @@ renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T,
options::OPT_gpubnames, options::OPT_gno_pubnames);
if (DwarfFission != DwarfFissionKind::None ||
(PubnamesArg && checkDebugInfoOption(PubnamesArg, Args, D, TC)))
- if (DebuggerTuning != llvm::DebuggerKind::LLDB &&
- (!PubnamesArg ||
- (!PubnamesArg->getOption().matches(options::OPT_gno_gnu_pubnames) &&
- !PubnamesArg->getOption().matches(options::OPT_gno_pubnames))))
+ if (!PubnamesArg ||
+ (!PubnamesArg->getOption().matches(options::OPT_gno_gnu_pubnames) &&
+ !PubnamesArg->getOption().matches(options::OPT_gno_pubnames)))
CmdArgs.push_back(PubnamesArg && PubnamesArg->getOption().matches(
options::OPT_gpubnames)
? "-gpubnames"
@@ -5959,7 +5958,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_fbasic_block_address_map,
options::OPT_fno_basic_block_address_map)) {
- if (Triple.isX86() && Triple.isOSBinFormatELF()) {
+ if ((Triple.isX86() || Triple.isAArch64()) && Triple.isOSBinFormatELF()) {
if (A->getOption().matches(options::OPT_fbasic_block_address_map))
A->render(Args, CmdArgs);
} else {
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index faceee85a2f8..7f0f78b41e79 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -2763,14 +2763,10 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
StringRef BitcodeSuffix,
- const llvm::Triple &Triple) {
+ const llvm::Triple &Triple,
+ const ToolChain &HostTC) {
SmallVector<StringRef, 8> LibraryPaths;
- // Add path to clang lib / lib64 folder.
- SmallString<256> DefaultLibPath = llvm::sys::path::parent_path(D.Dir);
- llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
- LibraryPaths.emplace_back(DefaultLibPath.c_str());
-
// Add user defined library paths from LIBRARY_PATH.
std::optional<std::string> LibPath =
llvm::sys::Process::GetEnv("LIBRARY_PATH");
@@ -2782,6 +2778,10 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
LibraryPaths.emplace_back(Path.trim());
}
+ // Check all of the standard library search paths used by the compiler.
+ for (const auto &LibPath : HostTC.getFilePaths())
+ LibraryPaths.emplace_back(LibPath);
+
OptSpecifier LibomptargetBCPathOpt =
Triple.isAMDGCN() ? options::OPT_libomptarget_amdgpu_bc_path_EQ
: options::OPT_libomptarget_nvptx_bc_path_EQ;
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.h b/clang/lib/Driver/ToolChains/CommonArgs.h
index 2db0f889ca82..b8f649aab4bd 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -214,7 +214,8 @@ void addMachineOutlinerArgs(const Driver &D, const llvm::opt::ArgList &Args,
void addOpenMPDeviceRTL(const Driver &D, const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
- StringRef BitcodeSuffix, const llvm::Triple &Triple);
+ StringRef BitcodeSuffix, const llvm::Triple &Triple,
+ const ToolChain &HostTC);
void addOutlineAtomicsArgs(const Driver &D, const ToolChain &TC,
const llvm::opt::ArgList &Args,
diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp
index ff3687ca7dae..177fd6310e7e 100644
--- a/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -903,7 +903,7 @@ void CudaToolChain::addClangTargetOptions(
return;
addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, GpuArch.str(),
- getTriple());
+ getTriple(), HostTC);
}
}
diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
index 4300a2bdff17..dc09b13351f4 100644
--- a/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
@@ -237,11 +237,18 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
ExtraOpts.push_back("relro");
}
- // Android ARM/AArch64 use max-page-size=4096 to reduce VMA usage. Note, lld
- // from 11 onwards default max-page-size to 65536 for both ARM and AArch64.
- if ((Triple.isARM() || Triple.isAArch64()) && Triple.isAndroid()) {
- ExtraOpts.push_back("-z");
- ExtraOpts.push_back("max-page-size=4096");
+ // Note, lld from 11 onwards default max-page-size to 65536 for both ARM and
+ // AArch64.
+ if (Triple.isAndroid()) {
+ if (Triple.isARM()) {
+ // Android ARM uses max-page-size=4096 to reduce VMA usage.
+ ExtraOpts.push_back("-z");
+ ExtraOpts.push_back("max-page-size=4096");
+ } else if (Triple.isAArch64()) {
+ // Android AArch64 uses max-page-size=16384 to support 4k/16k page sizes.
+ ExtraOpts.push_back("-z");
+ ExtraOpts.push_back("max-page-size=16384");
+ }
}
if (GCCInstallation.getParentLibPath().contains("opt/rh/"))
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index a25aa88bd85e..444ffff30737 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -1061,30 +1061,7 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
}
}
- if (getDiagnosticOpts().ShowCarets) {
- // We can have multiple diagnostics sharing one diagnostic client.
- // Get the total number of warnings/errors from the client.
- unsigned NumWarnings = getDiagnostics().getClient()->getNumWarnings();
- unsigned NumErrors = getDiagnostics().getClient()->getNumErrors();
-
- if (NumWarnings)
- OS << NumWarnings << " warning" << (NumWarnings == 1 ? "" : "s");
- if (NumWarnings && NumErrors)
- OS << " and ";
- if (NumErrors)
- OS << NumErrors << " error" << (NumErrors == 1 ? "" : "s");
- if (NumWarnings || NumErrors) {
- OS << " generated";
- if (getLangOpts().CUDA) {
- if (!getLangOpts().CUDAIsDevice) {
- OS << " when compiling for host";
- } else {
- OS << " when compiling for " << getTargetOpts().CPU;
- }
- }
- OS << ".\n";
- }
- }
+ printDiagnosticStats();
if (getFrontendOpts().ShowStats) {
if (hasFileManager()) {
@@ -1112,6 +1089,36 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
return !getDiagnostics().getClient()->getNumErrors();
}
+void CompilerInstance::printDiagnosticStats() {
+ if (!getDiagnosticOpts().ShowCarets)
+ return;
+
+ raw_ostream &OS = getVerboseOutputStream();
+
+ // We can have multiple diagnostics sharing one diagnostic client.
+ // Get the total number of warnings/errors from the client.
+ unsigned NumWarnings = getDiagnostics().getClient()->getNumWarnings();
+ unsigned NumErrors = getDiagnostics().getClient()->getNumErrors();
+
+ if (NumWarnings)
+ OS << NumWarnings << " warning" << (NumWarnings == 1 ? "" : "s");
+ if (NumWarnings && NumErrors)
+ OS << " and ";
+ if (NumErrors)
+ OS << NumErrors << " error" << (NumErrors == 1 ? "" : "s");
+ if (NumWarnings || NumErrors) {
+ OS << " generated";
+ if (getLangOpts().CUDA) {
+ if (!getLangOpts().CUDAIsDevice) {
+ OS << " when compiling for host";
+ } else {
+ OS << " when compiling for " << getTargetOpts().CPU;
+ }
+ }
+ OS << ".\n";
+ }
+}
+
void CompilerInstance::LoadRequestedPlugins() {
// Load any requested plugins.
for (const std::string &Path : getFrontendOpts().Plugins) {
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index 8d7b75b56d61..691f3b989b81 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -191,6 +191,17 @@ CompilerInvocationBase::shallow_copy_assign(const CompilerInvocationBase &X) {
return *this;
}
+CompilerInvocation::CompilerInvocation(const CowCompilerInvocation &X)
+ : CompilerInvocationBase(EmptyConstructor{}) {
+ CompilerInvocationBase::deep_copy_assign(X);
+}
+
+CompilerInvocation &
+CompilerInvocation::operator=(const CowCompilerInvocation &X) {
+ CompilerInvocationBase::deep_copy_assign(X);
+ return *this;
+}
+
namespace {
template <typename T>
T &ensureOwned(std::shared_ptr<T> &Storage) {
@@ -1975,14 +1986,6 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.LinkBitcodeFiles.push_back(F);
}
- if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
- if (T.isOSAIX()) {
- StringRef Name = A->getValue();
- if (Name == "local-dynamic")
- Diags.Report(diag::err_aix_unsupported_tls_model) << Name;
- }
- }
-
if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_EQ)) {
StringRef Val = A->getValue();
Opts.FPDenormalMode = llvm::parseDenormalFPAttribute(Val);
diff --git a/clang/lib/Headers/__clang_hip_math.h b/clang/lib/Headers/__clang_hip_math.h
index 34d1de0a0600..11e1e7d03258 100644
--- a/clang/lib/Headers/__clang_hip_math.h
+++ b/clang/lib/Headers/__clang_hip_math.h
@@ -1306,75 +1306,15 @@ float min(float __x, float __y) { return __builtin_fminf(__x, __y); }
__DEVICE__
double min(double __x, double __y) { return __builtin_fmin(__x, __y); }
-// Define host min/max functions.
-#if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__) && \
- !defined(__HIP_NO_HOST_MIN_MAX_IN_GLOBAL_NAMESPACE__)
-
-#pragma push_macro("DEFINE_MIN_MAX_FUNCTIONS")
-#pragma push_macro("DEFINE_MIN_MAX_FUNCTIONS")
-#define DEFINE_MIN_MAX_FUNCTIONS(ret_type, type1, type2) \
- inline ret_type min(const type1 __a, const type2 __b) { \
- return (__a < __b) ? __a : __b; \
- } \
- inline ret_type max(const type1 __a, const type2 __b) { \
- return (__a > __b) ? __a : __b; \
- }
-
-// Define min and max functions for same type comparisons
-DEFINE_MIN_MAX_FUNCTIONS(int, int, int)
-DEFINE_MIN_MAX_FUNCTIONS(unsigned int, unsigned int, unsigned int)
-DEFINE_MIN_MAX_FUNCTIONS(long, long, long)
-DEFINE_MIN_MAX_FUNCTIONS(unsigned long, unsigned long, unsigned long)
-DEFINE_MIN_MAX_FUNCTIONS(long long, long long, long long)
-DEFINE_MIN_MAX_FUNCTIONS(unsigned long long, unsigned long long,
- unsigned long long)
-
-// The host min/max functions below accept mixed signed/unsigned integer
-// parameters and perform unsigned comparisons, which may produce unexpected
-// results if a signed integer was passed unintentionally. To avoid this
-// happening silently, these overloaded functions are not defined by default.
-// However, for compatibility with CUDA, they will be defined if users define
-// __HIP_DEFINE_MIXED_HOST_MIN_MAX__.
-#ifdef __HIP_DEFINE_MIXED_HOST_MIN_MAX__
-DEFINE_MIN_MAX_FUNCTIONS(unsigned int, int, unsigned int)
-DEFINE_MIN_MAX_FUNCTIONS(unsigned int, unsigned int, int)
-DEFINE_MIN_MAX_FUNCTIONS(unsigned long, long, unsigned long)
-DEFINE_MIN_MAX_FUNCTIONS(unsigned long, unsigned long, long)
-DEFINE_MIN_MAX_FUNCTIONS(unsigned long long, long long, unsigned long long)
-DEFINE_MIN_MAX_FUNCTIONS(unsigned long long, unsigned long long, long long)
-#endif // ifdef __HIP_DEFINE_MIXED_HOST_MIN_MAX__
-
-// Floating-point comparisons using built-in functions
-inline float min(float const __a, float const __b) {
- return __builtin_fminf(__a, __b);
-}
-inline double min(double const __a, double const __b) {
- return __builtin_fmin(__a, __b);
-}
-inline double min(float const __a, double const __b) {
- return __builtin_fmin(__a, __b);
-}
-inline double min(double const __a, float const __b) {
- return __builtin_fmin(__a, __b);
+#if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
+__host__ inline static int min(int __arg1, int __arg2) {
+ return __arg1 < __arg2 ? __arg1 : __arg2;
}
-inline float max(float const __a, float const __b) {
- return __builtin_fmaxf(__a, __b);
-}
-inline double max(double const __a, double const __b) {
- return __builtin_fmax(__a, __b);
-}
-inline double max(float const __a, double const __b) {
- return __builtin_fmax(__a, __b);
+__host__ inline static int max(int __arg1, int __arg2) {
+ return __arg1 > __arg2 ? __arg1 : __arg2;
}
-inline double max(double const __a, float const __b) {
- return __builtin_fmax(__a, __b);
-}
-
-#pragma pop_macro("DEFINE_MIN_MAX_FUNCTIONS")
-
-#endif // !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__) &&
- // !defined(__HIP_NO_HOST_MIN_MAX_IN_GLOBAL_NAMESPACE__)
+#endif // !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
#endif
#pragma pop_macro("__DEVICE__")
diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h
index 96e3ebdecbdf..1d451b5f5b25 100644
--- a/clang/lib/Headers/emmintrin.h
+++ b/clang/lib/Headers/emmintrin.h
@@ -2099,9 +2099,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a,
}
/// Adds, with saturation, the corresponding elements of two 128-bit
-/// signed [16 x i8] vectors, saving each sum in the corresponding element of
-/// a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are
-/// saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80.
+/// signed [16 x i8] vectors, saving each sum in the corresponding element
+/// of a 128-bit result vector of [16 x i8].
+///
+/// Positive sums greater than 0x7F are saturated to 0x7F. Negative sums
+/// less than 0x80 are saturated to 0x80.
///
/// \headerfile <x86intrin.h>
///
@@ -2119,10 +2121,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a,
}
/// Adds, with saturation, the corresponding elements of two 128-bit
-/// signed [8 x i16] vectors, saving each sum in the corresponding element of
-/// a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF
-/// are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
-/// 0x8000.
+/// signed [8 x i16] vectors, saving each sum in the corresponding element
+/// of a 128-bit result vector of [8 x i16].
+///
+/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums
+/// less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -2141,8 +2144,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a,
/// Adds, with saturation, the corresponding elements of two 128-bit
/// unsigned [16 x i8] vectors, saving each sum in the corresponding element
-/// of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF
-/// are saturated to 0xFF. Negative sums are saturated to 0x00.
+/// of a 128-bit result vector of [16 x i8].
+///
+/// Positive sums greater than 0xFF are saturated to 0xFF. Negative sums are
+/// saturated to 0x00.
///
/// \headerfile <x86intrin.h>
///
@@ -2161,8 +2166,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a,
/// Adds, with saturation, the corresponding elements of two 128-bit
/// unsigned [8 x i16] vectors, saving each sum in the corresponding element
-/// of a 128-bit result vector of [8 x i16]. Positive sums greater than
-/// 0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000.
+/// of a 128-bit result vector of [8 x i16].
+///
+/// Positive sums greater than 0xFFFF are saturated to 0xFFFF. Negative sums
+/// are saturated to 0x0000.
///
/// \headerfile <x86intrin.h>
///
@@ -2518,10 +2525,12 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a,
return (__m128i)((__v2du)__a - (__v2du)__b);
}
-/// Subtracts corresponding 8-bit signed integer values in the input and
-/// returns the differences in the corresponding bytes in the destination.
-/// Differences greater than 0x7F are saturated to 0x7F, and differences less
-/// than 0x80 are saturated to 0x80.
+/// Subtracts, with saturation, corresponding 8-bit signed integer values in
+/// the input and returns the differences in the corresponding bytes in the
+/// destination.
+///
+/// Differences greater than 0x7F are saturated to 0x7F, and differences
+/// less than 0x80 are saturated to 0x80.
///
/// \headerfile <x86intrin.h>
///
@@ -2538,8 +2547,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a,
return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b);
}
-/// Subtracts corresponding 16-bit signed integer values in the input and
-/// returns the differences in the corresponding bytes in the destination.
+/// Subtracts, with saturation, corresponding 16-bit signed integer values in
+/// the input and returns the differences in the corresponding bytes in the
+/// destination.
+///
/// Differences greater than 0x7FFF are saturated to 0x7FFF, and values less
/// than 0x8000 are saturated to 0x8000.
///
@@ -2558,9 +2569,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a,
return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b);
}
-/// Subtracts corresponding 8-bit unsigned integer values in the input
-/// and returns the differences in the corresponding bytes in the
-/// destination. Differences less than 0x00 are saturated to 0x00.
+/// Subtracts, with saturation, corresponding 8-bit unsigned integer values in
+/// the input and returns the differences in the corresponding bytes in the
+/// destination.
+///
+/// Differences less than 0x00 are saturated to 0x00.
///
/// \headerfile <x86intrin.h>
///
@@ -2577,9 +2590,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a,
return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b);
}
-/// Subtracts corresponding 16-bit unsigned integer values in the input
-/// and returns the differences in the corresponding bytes in the
-/// destination. Differences less than 0x0000 are saturated to 0x0000.
+/// Subtracts, with saturation, corresponding 16-bit unsigned integer values in
+/// the input and returns the differences in the corresponding bytes in the
+/// destination.
+///
+/// Differences less than 0x0000 are saturated to 0x0000.
///
/// \headerfile <x86intrin.h>
///
@@ -4050,26 +4065,22 @@ void _mm_mfence(void);
} // extern "C"
#endif
-/// Converts 16-bit signed integers from both 128-bit integer vector
-/// operands into 8-bit signed integers, and packs the results into the
-/// destination. Positive values greater than 0x7F are saturated to 0x7F.
-/// Negative values less than 0x80 are saturated to 0x80.
+/// Converts, with saturation, 16-bit signed integers from both 128-bit integer
+/// vector operands into 8-bit signed integers, and packs the results into
+/// the destination.
+///
+/// Positive values greater than 0x7F are saturated to 0x7F. Negative values
+/// less than 0x80 are saturated to 0x80.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VPACKSSWB / PACKSSWB </c> instruction.
///
/// \param __a
-/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-/// a signed integer and is converted to a 8-bit signed integer with
-/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are
+/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are
/// written to the lower 64 bits of the result.
/// \param __b
-/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-/// a signed integer and is converted to a 8-bit signed integer with
-/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are
+/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are
/// written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [16 x i8] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a,
@@ -4077,26 +4088,22 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a,
return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
}
-/// Converts 32-bit signed integers from both 128-bit integer vector
-/// operands into 16-bit signed integers, and packs the results into the
-/// destination. Positive values greater than 0x7FFF are saturated to 0x7FFF.
-/// Negative values less than 0x8000 are saturated to 0x8000.
+/// Converts, with saturation, 32-bit signed integers from both 128-bit integer
+/// vector operands into 16-bit signed integers, and packs the results into
+/// the destination.
+///
+/// Positive values greater than 0x7FFF are saturated to 0x7FFF. Negative
+/// values less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VPACKSSDW / PACKSSDW </c> instruction.
///
/// \param __a
-/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-/// a signed integer and is converted to a 16-bit signed integer with
-/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
+/// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values
/// are written to the lower 64 bits of the result.
/// \param __b
-/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-/// a signed integer and is converted to a 16-bit signed integer with
-/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
+/// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values
/// are written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [8 x i16] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a,
@@ -4104,26 +4111,22 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a,
return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
}
-/// Converts 16-bit signed integers from both 128-bit integer vector
-/// operands into 8-bit unsigned integers, and packs the results into the
-/// destination. Values greater than 0xFF are saturated to 0xFF. Values less
-/// than 0x00 are saturated to 0x00.
+/// Converts, with saturation, 16-bit signed integers from both 128-bit integer
+/// vector operands into 8-bit unsigned integers, and packs the results into
+/// the destination.
+///
+/// Values greater than 0xFF are saturated to 0xFF. Values less than 0x00
+/// are saturated to 0x00.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VPACKUSWB / PACKUSWB </c> instruction.
///
/// \param __a
-/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-/// a signed integer and is converted to an 8-bit unsigned integer with
-/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are
+/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are
/// written to the lower 64 bits of the result.
/// \param __b
-/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-/// a signed integer and is converted to an 8-bit unsigned integer with
-/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are
+/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are
/// written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [16 x i8] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a,
diff --git a/clang/lib/Headers/fmaintrin.h b/clang/lib/Headers/fmaintrin.h
index ea832fac4f99..22d1a780bbfd 100644
--- a/clang/lib/Headers/fmaintrin.h
+++ b/clang/lib/Headers/fmaintrin.h
@@ -60,7 +60,8 @@ _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a scalar multiply-add of the single-precision values in the
/// low 32 bits of 128-bit vectors of [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
@@ -88,7 +89,8 @@ _mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
/// Computes a scalar multiply-add of the double-precision values in the
/// low 64 bits of 128-bit vectors of [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
@@ -156,7 +158,8 @@ _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a scalar multiply-subtract of the single-precision values in
/// the low 32 bits of 128-bit vectors of [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
@@ -184,7 +187,8 @@ _mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
/// Computes a scalar multiply-subtract of the double-precision values in
/// the low 64 bits of 128-bit vectors of [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
@@ -252,7 +256,8 @@ _mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a scalar negated multiply-add of the single-precision values in
/// the low 32 bits of 128-bit vectors of [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = -(__A[31:0] * __B[31:0]) + __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
@@ -280,7 +285,8 @@ _mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
/// Computes a scalar negated multiply-add of the double-precision values
/// in the low 64 bits of 128-bit vectors of [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = -(__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
@@ -348,7 +354,8 @@ _mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a scalar negated multiply-subtract of the single-precision
/// values in the low 32 bits of 128-bit vectors of [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = -(__A[31:0] * __B[31:0]) - __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
@@ -376,7 +383,8 @@ _mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
/// Computes a scalar negated multiply-subtract of the double-precision
/// values in the low 64 bits of 128-bit vectors of [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = -(__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
@@ -404,7 +412,8 @@ _mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]
@@ -430,7 +439,8 @@ _mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]
/// \endcode
@@ -454,7 +464,8 @@ _mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]
@@ -480,7 +491,8 @@ _mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]
/// \endcode
@@ -664,7 +676,8 @@ _mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
/// Computes a multiply with alternating add/subtract of 256-bit vectors of
/// [8 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]
@@ -694,7 +707,8 @@ _mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
/// Computes a multiply with alternating add/subtract of 256-bit vectors of
/// [4 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]
/// result[191:128] = (__A[191:128] * __B[191:128]) - __C[191:128]
@@ -720,7 +734,8 @@ _mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
/// Computes a vector multiply with alternating add/subtract of 256-bit
/// vectors of [8 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]
@@ -750,7 +765,8 @@ _mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
/// Computes a vector multiply with alternating add/subtract of 256-bit
/// vectors of [4 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]
/// result[191:128] = (__A[191:128] * __B[191:128]) + __C[191:128]
diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
index 08e5d981a4a4..518053036388 100644
--- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -318,6 +318,74 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
double4 floor(double4);
//===----------------------------------------------------------------------===//
+// frac builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T frac(T x)
+/// \brief Returns the fractional (or decimal) part of x. \a x parameter.
+/// \param x The specified input value.
+///
+/// If \a the return value is greater than or equal to 0 and less than 1.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+half frac(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+half2 frac(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+half3 frac(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+half4 frac(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+float frac(float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+float2 frac(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+float3 frac(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+float4 frac(float4);
+
+//===----------------------------------------------------------------------===//
+// lerp builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T lerp(T x, T y, T s)
+/// \brief Returns the linear interpolation of x to y by s.
+/// \param x [in] The first-floating point value.
+/// \param y [in] The second-floating point value.
+/// \param s [in] A value that linearly interpolates between the x parameter and
+/// the y parameter.
+///
+/// Linear interpolation is based on the following formula: x*(1-s) + y*s which
+/// can equivalently be written as x + s(y-x).
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+half lerp(half, half, half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+half2 lerp(half2, half2, half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+half3 lerp(half3, half3, half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+half4 lerp(half4, half4, half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+float lerp(float, float, float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+float2 lerp(float2, float2, float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+float3 lerp(float3, float3, float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+float4 lerp(float4, float4, float4);
+
+//===----------------------------------------------------------------------===//
// log builtins
//===----------------------------------------------------------------------===//
@@ -764,6 +832,40 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint64_t4 reversebits(uint64_t4);
//===----------------------------------------------------------------------===//
+// round builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T round(T x)
+/// \brief Rounds the specified value \a x to the nearest integer.
+/// \param x The specified input value.
+///
+/// The return value is the \a x parameter, rounded to the nearest integer
+/// within a floating-point type. Halfway cases are
+/// rounded to the nearest even value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+half round(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+half2 round(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+half3 round(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+half4 round(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+float round(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+float2 round(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+float3 round(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+float4 round(float4);
+
+//===----------------------------------------------------------------------===//
// sin builtins
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Headers/mmintrin.h b/clang/lib/Headers/mmintrin.h
index 08849f01071a..962d24738e7a 100644
--- a/clang/lib/Headers/mmintrin.h
+++ b/clang/lib/Headers/mmintrin.h
@@ -105,28 +105,23 @@ _mm_cvtm64_si64(__m64 __m)
return (long long)__m;
}
-/// Converts 16-bit signed integers from both 64-bit integer vector
-/// parameters of [4 x i16] into 8-bit signed integer values, and constructs
-/// a 64-bit integer vector of [8 x i8] as the result. Positive values
-/// greater than 0x7F are saturated to 0x7F. Negative values less than 0x80
-/// are saturated to 0x80.
+/// Converts, with saturation, 16-bit signed integers from both 64-bit integer
+/// vector parameters of [4 x i16] into 8-bit signed integer values, and
+/// constructs a 64-bit integer vector of [8 x i8] as the result.
+///
+/// Positive values greater than 0x7F are saturated to 0x7F. Negative values
+/// less than 0x80 are saturated to 0x80.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> PACKSSWB </c> instruction.
///
/// \param __m1
-/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
-/// 16-bit signed integer and is converted to an 8-bit signed integer with
-/// saturation. Positive values greater than 0x7F are saturated to 0x7F.
-/// Negative values less than 0x80 are saturated to 0x80. The converted
-/// [4 x i8] values are written to the lower 32 bits of the result.
+/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are
+/// written to the lower 32 bits of the result.
/// \param __m2
-/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
-/// 16-bit signed integer and is converted to an 8-bit signed integer with
-/// saturation. Positive values greater than 0x7F are saturated to 0x7F.
-/// Negative values less than 0x80 are saturated to 0x80. The converted
-/// [4 x i8] values are written to the upper 32 bits of the result.
+/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are
+/// written to the upper 32 bits of the result.
/// \returns A 64-bit integer vector of [8 x i8] containing the converted
/// values.
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -135,28 +130,23 @@ _mm_packs_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
}
-/// Converts 32-bit signed integers from both 64-bit integer vector
-/// parameters of [2 x i32] into 16-bit signed integer values, and constructs
-/// a 64-bit integer vector of [4 x i16] as the result. Positive values
-/// greater than 0x7FFF are saturated to 0x7FFF. Negative values less than
-/// 0x8000 are saturated to 0x8000.
+/// Converts, with saturation, 32-bit signed integers from both 64-bit integer
+/// vector parameters of [2 x i32] into 16-bit signed integer values, and
+/// constructs a 64-bit integer vector of [4 x i16] as the result.
+///
+/// Positive values greater than 0x7FFF are saturated to 0x7FFF. Negative
+/// values less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> PACKSSDW </c> instruction.
///
/// \param __m1
-/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a
-/// 32-bit signed integer and is converted to a 16-bit signed integer with
-/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF.
-/// Negative values less than 0x8000 are saturated to 0x8000. The converted
-/// [2 x i16] values are written to the lower 32 bits of the result.
+/// A 64-bit integer vector of [2 x i32]. The converted [2 x i16] values are
+/// written to the lower 32 bits of the result.
/// \param __m2
-/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a
-/// 32-bit signed integer and is converted to a 16-bit signed integer with
-/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF.
-/// Negative values less than 0x8000 are saturated to 0x8000. The converted
-/// [2 x i16] values are written to the upper 32 bits of the result.
+/// A 64-bit integer vector of [2 x i32]. The converted [2 x i16] values are
+/// written to the upper 32 bits of the result.
/// \returns A 64-bit integer vector of [4 x i16] containing the converted
/// values.
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -165,28 +155,23 @@ _mm_packs_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
}
-/// Converts 16-bit signed integers from both 64-bit integer vector
-/// parameters of [4 x i16] into 8-bit unsigned integer values, and
-/// constructs a 64-bit integer vector of [8 x i8] as the result. Values
-/// greater than 0xFF are saturated to 0xFF. Values less than 0 are saturated
-/// to 0.
+/// Converts, with saturation, 16-bit signed integers from both 64-bit integer
+/// vector parameters of [4 x i16] into 8-bit unsigned integer values, and
+/// constructs a 64-bit integer vector of [8 x i8] as the result.
+///
+/// Values greater than 0xFF are saturated to 0xFF. Values less than 0 are
+/// saturated to 0.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> PACKUSWB </c> instruction.
///
/// \param __m1
-/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
-/// 16-bit signed integer and is converted to an 8-bit unsigned integer with
-/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-/// than 0 are saturated to 0. The converted [4 x i8] values are written to
-/// the lower 32 bits of the result.
+/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are
+/// written to the lower 32 bits of the result.
/// \param __m2
-/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
-/// 16-bit signed integer and is converted to an 8-bit unsigned integer with
-/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-/// than 0 are saturated to 0. The converted [4 x i8] values are written to
-/// the upper 32 bits of the result.
+/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are
+/// written to the upper 32 bits of the result.
/// \returns A 64-bit integer vector of [8 x i8] containing the converted
/// values.
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -400,11 +385,13 @@ _mm_add_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);
}
-/// Adds each 8-bit signed integer element of the first 64-bit integer
-/// vector of [8 x i8] to the corresponding 8-bit signed integer element of
-/// the second 64-bit integer vector of [8 x i8]. Positive sums greater than
-/// 0x7F are saturated to 0x7F. Negative sums less than 0x80 are saturated to
-/// 0x80. The results are packed into a 64-bit integer vector of [8 x i8].
+/// Adds, with saturation, each 8-bit signed integer element of the first
+/// 64-bit integer vector of [8 x i8] to the corresponding 8-bit signed
+/// integer element of the second 64-bit integer vector of [8 x i8].
+///
+/// Positive sums greater than 0x7F are saturated to 0x7F. Negative sums
+/// less than 0x80 are saturated to 0x80. The results are packed into a
+/// 64-bit integer vector of [8 x i8].
///
/// \headerfile <x86intrin.h>
///
@@ -422,12 +409,13 @@ _mm_adds_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
}
-/// Adds each 16-bit signed integer element of the first 64-bit integer
-/// vector of [4 x i16] to the corresponding 16-bit signed integer element of
-/// the second 64-bit integer vector of [4 x i16]. Positive sums greater than
-/// 0x7FFF are saturated to 0x7FFF. Negative sums less than 0x8000 are
-/// saturated to 0x8000. The results are packed into a 64-bit integer vector
-/// of [4 x i16].
+/// Adds, with saturation, each 16-bit signed integer element of the first
+/// 64-bit integer vector of [4 x i16] to the corresponding 16-bit signed
+/// integer element of the second 64-bit integer vector of [4 x i16].
+///
+/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums
+/// less than 0x8000 are saturated to 0x8000. The results are packed into a
+/// 64-bit integer vector of [4 x i16].
///
/// \headerfile <x86intrin.h>
///
@@ -445,11 +433,12 @@ _mm_adds_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
}
-/// Adds each 8-bit unsigned integer element of the first 64-bit integer
-/// vector of [8 x i8] to the corresponding 8-bit unsigned integer element of
-/// the second 64-bit integer vector of [8 x i8]. Sums greater than 0xFF are
-/// saturated to 0xFF. The results are packed into a 64-bit integer vector of
-/// [8 x i8].
+/// Adds, with saturation, each 8-bit unsigned integer element of the first
+/// 64-bit integer vector of [8 x i8] to the corresponding 8-bit unsigned
+/// integer element of the second 64-bit integer vector of [8 x i8].
+///
+/// Sums greater than 0xFF are saturated to 0xFF. The results are packed
+/// into a 64-bit integer vector of [8 x i8].
///
/// \headerfile <x86intrin.h>
///
@@ -467,11 +456,12 @@ _mm_adds_pu8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
}
-/// Adds each 16-bit unsigned integer element of the first 64-bit integer
-/// vector of [4 x i16] to the corresponding 16-bit unsigned integer element
-/// of the second 64-bit integer vector of [4 x i16]. Sums greater than
-/// 0xFFFF are saturated to 0xFFFF. The results are packed into a 64-bit
-/// integer vector of [4 x i16].
+/// Adds, with saturation, each 16-bit unsigned integer element of the first
+/// 64-bit integer vector of [4 x i16] to the corresponding 16-bit unsigned
+/// integer element of the second 64-bit integer vector of [4 x i16].
+///
+/// Sums greater than 0xFFFF are saturated to 0xFFFF. The results are packed
+/// into a 64-bit integer vector of [4 x i16].
///
/// \headerfile <x86intrin.h>
///
@@ -552,12 +542,13 @@ _mm_sub_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);
}
-/// Subtracts each 8-bit signed integer element of the second 64-bit
-/// integer vector of [8 x i8] from the corresponding 8-bit signed integer
-/// element of the first 64-bit integer vector of [8 x i8]. Positive results
-/// greater than 0x7F are saturated to 0x7F. Negative results less than 0x80
-/// are saturated to 0x80. The results are packed into a 64-bit integer
-/// vector of [8 x i8].
+/// Subtracts, with saturation, each 8-bit signed integer element of the second
+/// 64-bit integer vector of [8 x i8] from the corresponding 8-bit signed
+/// integer element of the first 64-bit integer vector of [8 x i8].
+///
+/// Positive results greater than 0x7F are saturated to 0x7F. Negative
+/// results less than 0x80 are saturated to 0x80. The results are packed
+/// into a 64-bit integer vector of [8 x i8].
///
/// \headerfile <x86intrin.h>
///
@@ -575,12 +566,13 @@ _mm_subs_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
}
-/// Subtracts each 16-bit signed integer element of the second 64-bit
-/// integer vector of [4 x i16] from the corresponding 16-bit signed integer
-/// element of the first 64-bit integer vector of [4 x i16]. Positive results
-/// greater than 0x7FFF are saturated to 0x7FFF. Negative results less than
-/// 0x8000 are saturated to 0x8000. The results are packed into a 64-bit
-/// integer vector of [4 x i16].
+/// Subtracts, with saturation, each 16-bit signed integer element of the
+/// second 64-bit integer vector of [4 x i16] from the corresponding 16-bit
+/// signed integer element of the first 64-bit integer vector of [4 x i16].
+///
+/// Positive results greater than 0x7FFF are saturated to 0x7FFF. Negative
+/// results less than 0x8000 are saturated to 0x8000. The results are packed
+/// into a 64-bit integer vector of [4 x i16].
///
/// \headerfile <x86intrin.h>
///
diff --git a/clang/lib/Headers/prfchwintrin.h b/clang/lib/Headers/prfchwintrin.h
index d2f91aa0123e..8a13784543c5 100644
--- a/clang/lib/Headers/prfchwintrin.h
+++ b/clang/lib/Headers/prfchwintrin.h
@@ -15,9 +15,10 @@
#define __PRFCHWINTRIN_H
/// Loads a memory sequence containing the specified memory address into
-/// all data cache levels. The cache-coherency state is set to exclusive.
-/// Data can be read from and written to the cache line without additional
-/// delay.
+/// all data cache levels.
+///
+/// The cache-coherency state is set to exclusive. Data can be read from
+/// and written to the cache line without additional delay.
///
/// \headerfile <x86intrin.h>
///
@@ -32,10 +33,11 @@ _m_prefetch(void *__P)
}
/// Loads a memory sequence containing the specified memory address into
-/// the L1 data cache and sets the cache-coherency to modified. This
-/// provides a hint to the processor that the cache line will be modified.
-/// It is intended for use when the cache line will be written to shortly
-/// after the prefetch is performed.
+/// the L1 data cache and sets the cache-coherency state to modified.
+///
+/// This provides a hint to the processor that the cache line will be
+/// modified. It is intended for use when the cache line will be written to
+/// shortly after the prefetch is performed.
///
/// Note that the effect of this intrinsic is dependent on the processor
/// implementation.
diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h
index 005d7db9c3c3..c52ffb77e33d 100644
--- a/clang/lib/Headers/smmintrin.h
+++ b/clang/lib/Headers/smmintrin.h
@@ -1431,8 +1431,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
}
/* SSE4 Pack with Unsigned Saturation. */
-/// Converts 32-bit signed integers from both 128-bit integer vector
-/// operands into 16-bit unsigned integers, and returns the packed result.
+/// Converts, with saturation, 32-bit signed integers from both 128-bit integer
+/// vector operands into 16-bit unsigned integers, and returns the packed
+/// result.
+///
/// Values greater than 0xFFFF are saturated to 0xFFFF. Values less than
/// 0x0000 are saturated to 0x0000.
///
@@ -1441,17 +1443,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
/// This intrinsic corresponds to the <c> VPACKUSDW / PACKUSDW </c> instruction.
///
/// \param __V1
-/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a
-/// signed integer and is converted to a 16-bit unsigned integer with
-/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values
-/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
-/// are written to the lower 64 bits of the result.
+/// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are
+/// written to the lower 64 bits of the result.
/// \param __V2
-/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a
-/// signed integer and is converted to a 16-bit unsigned integer with
-/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values
-/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
-/// are written to the higher 64 bits of the result.
+/// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are
+/// written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [8 x i16] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1,
__m128i __V2) {
diff --git a/clang/lib/Headers/tmmintrin.h b/clang/lib/Headers/tmmintrin.h
index 7d8dc46c57bf..bf8327b692d1 100644
--- a/clang/lib/Headers/tmmintrin.h
+++ b/clang/lib/Headers/tmmintrin.h
@@ -271,10 +271,11 @@ _mm_hadd_pi32(__m64 __a, __m64 __b)
return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b);
}
-/// Horizontally adds the adjacent pairs of values contained in 2 packed
-/// 128-bit vectors of [8 x i16]. Positive sums greater than 0x7FFF are
-/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
-/// 0x8000.
+/// Horizontally adds, with saturation, the adjacent pairs of values contained
+/// in two packed 128-bit vectors of [8 x i16].
+///
+/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums
+/// less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -296,10 +297,11 @@ _mm_hadds_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);
}
-/// Horizontally adds the adjacent pairs of values contained in 2 packed
-/// 64-bit vectors of [4 x i16]. Positive sums greater than 0x7FFF are
-/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
-/// 0x8000.
+/// Horizontally adds, with saturation, the adjacent pairs of values contained
+/// in two packed 64-bit vectors of [4 x i16].
+///
+/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums
+/// less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -413,10 +415,11 @@ _mm_hsub_pi32(__m64 __a, __m64 __b)
return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b);
}
-/// Horizontally subtracts the adjacent pairs of values contained in 2
-/// packed 128-bit vectors of [8 x i16]. Positive differences greater than
-/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are
-/// saturated to 0x8000.
+/// Horizontally subtracts, with saturation, the adjacent pairs of values
+/// contained in two packed 128-bit vectors of [8 x i16].
+///
+/// Positive differences greater than 0x7FFF are saturated to 0x7FFF.
+/// Negative differences less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -438,10 +441,11 @@ _mm_hsubs_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);
}
-/// Horizontally subtracts the adjacent pairs of values contained in 2
-/// packed 64-bit vectors of [4 x i16]. Positive differences greater than
-/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are
-/// saturated to 0x8000.
+/// Horizontally subtracts, with saturation, the adjacent pairs of values
+/// contained in two packed 64-bit vectors of [4 x i16].
+///
+/// Positive differences greater than 0x7FFF are saturated to 0x7FFF.
+/// Negative differences less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
diff --git a/clang/lib/Index/IndexSymbol.cpp b/clang/lib/Index/IndexSymbol.cpp
index 0f79694d1faa..419ff79a5cba 100644
--- a/clang/lib/Index/IndexSymbol.cpp
+++ b/clang/lib/Index/IndexSymbol.cpp
@@ -552,8 +552,7 @@ StringRef index::getSymbolSubKindString(SymbolSubKind K) {
case SymbolSubKind::AccessorSetter: return "acc-set";
case SymbolSubKind::UsingTypename: return "using-typename";
case SymbolSubKind::UsingValue: return "using-value";
- case SymbolSubKind::UsingEnum:
- return "using-enum";
+ case SymbolSubKind::UsingEnum: return "using-enum";
}
llvm_unreachable("invalid symbol subkind");
}
diff --git a/clang/lib/Index/IndexingAction.cpp b/clang/lib/Index/IndexingAction.cpp
index c9fcaad31128..81c46a0d08de 100644
--- a/clang/lib/Index/IndexingAction.cpp
+++ b/clang/lib/Index/IndexingAction.cpp
@@ -199,7 +199,7 @@ index::createIndexingAction(std::shared_ptr<IndexDataConsumer> DataConsumer,
}
static bool topLevelDeclVisitor(void *context, const Decl *D) {
- IndexingContext &IndexCtx = *static_cast<IndexingContext*>(context);
+ IndexingContext &IndexCtx = *static_cast<IndexingContext *>(context);
return IndexCtx.indexTopLevelDecl(D);
}
diff --git a/clang/lib/InstallAPI/CMakeLists.txt b/clang/lib/InstallAPI/CMakeLists.txt
index fdc4f064f29e..dc90d6370de4 100644
--- a/clang/lib/InstallAPI/CMakeLists.txt
+++ b/clang/lib/InstallAPI/CMakeLists.txt
@@ -1,13 +1,17 @@
set(LLVM_LINK_COMPONENTS
Support
TextAPI
+ Core
)
add_clang_library(clangInstallAPI
FileList.cpp
+ Frontend.cpp
HeaderFile.cpp
+ Visitor.cpp
LINK_LIBS
clangAST
clangBasic
+ clangLex
)
diff --git a/clang/lib/InstallAPI/Frontend.cpp b/clang/lib/InstallAPI/Frontend.cpp
new file mode 100644
index 000000000000..caa6e7e8a405
--- /dev/null
+++ b/clang/lib/InstallAPI/Frontend.cpp
@@ -0,0 +1,129 @@
+//===- Frontend.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/InstallAPI/Frontend.h"
+#include "clang/AST/Availability.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+
+namespace clang::installapi {
+
+GlobalRecord *FrontendRecordsSlice::addGlobal(
+ StringRef Name, RecordLinkage Linkage, GlobalRecord::Kind GV,
+ const clang::AvailabilityInfo Avail, const Decl *D, const HeaderType Access,
+ SymbolFlags Flags) {
+
+ auto *GR = llvm::MachO::RecordsSlice::addGlobal(Name, Linkage, GV, Flags);
+ FrontendRecords.insert({GR, FrontendAttrs{Avail, D, Access}});
+ return GR;
+}
+
+ObjCInterfaceRecord *FrontendRecordsSlice::addObjCInterface(
+ StringRef Name, RecordLinkage Linkage, const clang::AvailabilityInfo Avail,
+ const Decl *D, HeaderType Access, bool IsEHType) {
+ ObjCIFSymbolKind SymType =
+ ObjCIFSymbolKind::Class | ObjCIFSymbolKind::MetaClass;
+ if (IsEHType)
+ SymType |= ObjCIFSymbolKind::EHType;
+ auto *ObjCR =
+ llvm::MachO::RecordsSlice::addObjCInterface(Name, Linkage, SymType);
+ FrontendRecords.insert({ObjCR, FrontendAttrs{Avail, D, Access}});
+ return ObjCR;
+}
+
+std::optional<HeaderType>
+InstallAPIContext::findAndRecordFile(const FileEntry *FE,
+ const Preprocessor &PP) {
+ if (!FE)
+ return std::nullopt;
+
+ // Check if header has been looked up already and whether it is something
+ // installapi should use.
+ auto It = KnownFiles.find(FE);
+ if (It != KnownFiles.end()) {
+ if (It->second != HeaderType::Unknown)
+ return It->second;
+ else
+ return std::nullopt;
+ }
+
+ // If file was not found, search by how the header was
+ // included. This is primarily to resolve headers found
+ // in a different location than what passed directly as input.
+ StringRef IncludeName = PP.getHeaderSearchInfo().getIncludeNameForHeader(FE);
+ auto BackupIt = KnownIncludes.find(IncludeName.str());
+ if (BackupIt != KnownIncludes.end()) {
+ KnownFiles[FE] = BackupIt->second;
+ return BackupIt->second;
+ }
+
+ // Record that the file was found to avoid future string searches for the
+ // same file.
+ KnownFiles.insert({FE, HeaderType::Unknown});
+ return std::nullopt;
+}
+
+void InstallAPIContext::addKnownHeader(const HeaderFile &H) {
+ auto FE = FM->getFile(H.getPath());
+ if (!FE)
+ return; // File does not exist.
+ KnownFiles[*FE] = H.getType();
+
+ if (!H.useIncludeName())
+ return;
+
+ KnownIncludes[H.getIncludeName()] = H.getType();
+}
+
+static StringRef getFileExtension(clang::Language Lang) {
+ switch (Lang) {
+ default:
+ llvm_unreachable("Unexpected language option.");
+ case clang::Language::C:
+ return ".c";
+ case clang::Language::CXX:
+ return ".cpp";
+ case clang::Language::ObjC:
+ return ".m";
+ case clang::Language::ObjCXX:
+ return ".mm";
+ }
+}
+
+std::unique_ptr<MemoryBuffer> createInputBuffer(InstallAPIContext &Ctx) {
+ assert(Ctx.Type != HeaderType::Unknown &&
+ "unexpected access level for parsing");
+ SmallString<4096> Contents;
+ raw_svector_ostream OS(Contents);
+ for (const HeaderFile &H : Ctx.InputHeaders) {
+ if (H.getType() != Ctx.Type)
+ continue;
+ if (Ctx.LangMode == Language::C || Ctx.LangMode == Language::CXX)
+ OS << "#include ";
+ else
+ OS << "#import ";
+ if (H.useIncludeName())
+ OS << "<" << H.getIncludeName() << ">";
+ else
+ OS << "\"" << H.getPath() << "\"";
+
+ Ctx.addKnownHeader(H);
+ }
+ if (Contents.empty())
+ return nullptr;
+
+ SmallString<64> BufferName(
+ {"installapi-includes-", Ctx.Slice->getTriple().str(), "-",
+ getName(Ctx.Type), getFileExtension(Ctx.LangMode)});
+ return llvm::MemoryBuffer::getMemBufferCopy(Contents, BufferName);
+}
+
+} // namespace clang::installapi
diff --git a/clang/lib/InstallAPI/Visitor.cpp b/clang/lib/InstallAPI/Visitor.cpp
new file mode 100644
index 000000000000..355a092520c3
--- /dev/null
+++ b/clang/lib/InstallAPI/Visitor.cpp
@@ -0,0 +1,158 @@
+//===- Visitor.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/InstallAPI/Visitor.h"
+#include "clang/Basic/Linkage.h"
+#include "clang/InstallAPI/Frontend.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Mangler.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+
+namespace clang::installapi {
+
+// Exported NamedDecl needs to have external linkage and
+// default visibility from LinkageComputer.
+static bool isExported(const NamedDecl *D) {
+ auto LV = D->getLinkageAndVisibility();
+ return isExternallyVisible(LV.getLinkage()) &&
+ (LV.getVisibility() == DefaultVisibility);
+}
+
+static SymbolFlags getFlags(bool WeakDef, bool ThreadLocal) {
+ SymbolFlags Result = SymbolFlags::None;
+ if (WeakDef)
+ Result |= SymbolFlags::WeakDefined;
+ if (ThreadLocal)
+ Result |= SymbolFlags::ThreadLocalValue;
+
+ return Result;
+}
+
+void InstallAPIVisitor::HandleTranslationUnit(ASTContext &ASTCtx) {
+ if (ASTCtx.getDiagnostics().hasErrorOccurred())
+ return;
+
+ auto *D = ASTCtx.getTranslationUnitDecl();
+ TraverseDecl(D);
+}
+
+std::string InstallAPIVisitor::getMangledName(const NamedDecl *D) const {
+ SmallString<256> Name;
+ if (MC->shouldMangleDeclName(D)) {
+ raw_svector_ostream NStream(Name);
+ MC->mangleName(D, NStream);
+ } else
+ Name += D->getNameAsString();
+
+ return getBackendMangledName(Name);
+}
+
+std::string InstallAPIVisitor::getBackendMangledName(Twine Name) const {
+ SmallString<256> FinalName;
+ Mangler::getNameWithPrefix(FinalName, Name, DataLayout(Layout));
+ return std::string(FinalName);
+}
+
+std::optional<HeaderType>
+InstallAPIVisitor::getAccessForDecl(const NamedDecl *D) const {
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid())
+ return std::nullopt;
+
+ // If the loc refers to a macro expansion, InstallAPI needs to first get the
+ // file location of the expansion.
+ auto FileLoc = SrcMgr.getFileLoc(Loc);
+ FileID ID = SrcMgr.getFileID(FileLoc);
+ if (ID.isInvalid())
+ return std::nullopt;
+
+ const FileEntry *FE = SrcMgr.getFileEntryForID(ID);
+ if (!FE)
+ return std::nullopt;
+
+ auto Header = Ctx.findAndRecordFile(FE, PP);
+ if (!Header.has_value())
+ return std::nullopt;
+
+ HeaderType Access = Header.value();
+ assert(Access != HeaderType::Unknown && "unexpected access level for global");
+ return Access;
+}
+
+/// Check if the interface itself or any of its super classes have an
+/// exception attribute. InstallAPI needs to export an additional symbol
+/// ("OBJC_EHTYPE_$CLASS_NAME") if any of the classes have the exception
+/// attribute.
+static bool hasObjCExceptionAttribute(const ObjCInterfaceDecl *D) {
+ for (; D != nullptr; D = D->getSuperClass())
+ if (D->hasAttr<ObjCExceptionAttr>())
+ return true;
+
+ return false;
+}
+
+bool InstallAPIVisitor::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
+ // Skip forward declaration for classes (@class)
+ if (!D->isThisDeclarationADefinition())
+ return true;
+
+ // Skip over declarations that access could not be collected for.
+ auto Access = getAccessForDecl(D);
+ if (!Access)
+ return true;
+
+ StringRef Name = D->getObjCRuntimeNameAsString();
+ const RecordLinkage Linkage =
+ isExported(D) ? RecordLinkage::Exported : RecordLinkage::Internal;
+ const AvailabilityInfo Avail = AvailabilityInfo::createFromDecl(D);
+ const bool IsEHType =
+ (!D->getASTContext().getLangOpts().ObjCRuntime.isFragile() &&
+ hasObjCExceptionAttribute(D));
+
+ Ctx.Slice->addObjCInterface(Name, Linkage, Avail, D, *Access, IsEHType);
+ return true;
+}
+
+bool InstallAPIVisitor::VisitVarDecl(const VarDecl *D) {
+ // Skip function parameters.
+ if (isa<ParmVarDecl>(D))
+ return true;
+
+ // Skip variables in records. They are handled seperately for C++.
+ if (D->getDeclContext()->isRecord())
+ return true;
+
+ // Skip anything inside functions or methods.
+ if (!D->isDefinedOutsideFunctionOrMethod())
+ return true;
+
+ // If this is a template but not specialization or instantiation, skip.
+ if (D->getASTContext().getTemplateOrSpecializationInfo(D) &&
+ D->getTemplateSpecializationKind() == TSK_Undeclared)
+ return true;
+
+ // Skip over declarations that access could not collected for.
+ auto Access = getAccessForDecl(D);
+ if (!Access)
+ return true;
+
+ const RecordLinkage Linkage =
+ isExported(D) ? RecordLinkage::Exported : RecordLinkage::Internal;
+ const bool WeakDef = D->hasAttr<WeakAttr>();
+ const bool ThreadLocal = D->getTLSKind() != VarDecl::TLS_None;
+ const AvailabilityInfo Avail = AvailabilityInfo::createFromDecl(D);
+ Ctx.Slice->addGlobal(getMangledName(D), Linkage, GlobalRecord::Kind::Variable,
+ Avail, D, *Access, getFlags(WeakDef, ThreadLocal));
+ return true;
+}
+
+} // namespace clang::installapi
diff --git a/clang/lib/Sema/JumpDiagnostics.cpp b/clang/lib/Sema/JumpDiagnostics.cpp
index ec3892e92f3c..6722878883be 100644
--- a/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/clang/lib/Sema/JumpDiagnostics.cpp
@@ -604,6 +604,16 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
break;
}
+ case Stmt::OpenACCComputeConstructClass: {
+ unsigned NewParentScope = Scopes.size();
+ OpenACCComputeConstruct *CC = cast<OpenACCComputeConstruct>(S);
+ Scopes.push_back(GotoScope(
+ ParentScope, diag::note_acc_branch_into_compute_construct,
+ diag::note_acc_branch_out_of_compute_construct, CC->getBeginLoc()));
+ BuildScopeInformation(CC->getStructuredBlock(), NewParentScope);
+ return;
+ }
+
default:
if (auto *ED = dyn_cast<OMPExecutableDirective>(S)) {
if (!ED->isStandaloneDirective()) {
@@ -936,11 +946,16 @@ void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
if (Scopes[I].InDiag == diag::note_protected_by_seh_finally) {
S.Diag(From->getBeginLoc(), diag::warn_jump_out_of_seh_finally);
break;
- }
- if (Scopes[I].InDiag == diag::note_omp_protected_structured_block) {
+ } else if (Scopes[I].InDiag ==
+ diag::note_omp_protected_structured_block) {
S.Diag(From->getBeginLoc(), diag::err_goto_into_protected_scope);
S.Diag(To->getBeginLoc(), diag::note_omp_exits_structured_block);
break;
+ } else if (Scopes[I].InDiag ==
+ diag::note_acc_branch_into_compute_construct) {
+ S.Diag(From->getBeginLoc(), diag::err_goto_into_protected_scope);
+ S.Diag(Scopes[I].Loc, diag::note_acc_branch_out_of_compute_construct);
+ return;
}
}
}
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 0de76ee119cf..0d4d57db01c9 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -2180,9 +2180,11 @@ static bool SemaBuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
// Check the contents of the string.
StringRef Feature = cast<StringLiteral>(Arg)->getString();
- if (IsCPUSupports && !TheTI->validateCpuSupports(Feature))
- return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
- << Arg->getSourceRange();
+ if (IsCPUSupports && !TheTI->validateCpuSupports(Feature)) {
+ S.Diag(TheCall->getBeginLoc(), diag::warn_invalid_cpu_supports)
+ << Arg->getSourceRange();
+ return false;
+ }
if (!IsCPUSupports && !TheTI->validateCpuIs(Feature))
return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
<< Arg->getSourceRange();
@@ -2190,17 +2192,23 @@ static bool SemaBuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
}
/// Checks that __builtin_popcountg was called with a single argument, which is
-/// an integer.
+/// an unsigned integer.
static bool SemaBuiltinPopcountg(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 1))
return true;
- Expr *Arg = TheCall->getArg(0);
+ ExprResult ArgRes = S.DefaultLvalueConversion(TheCall->getArg(0));
+ if (ArgRes.isInvalid())
+ return true;
+
+ Expr *Arg = ArgRes.get();
+ TheCall->setArg(0, Arg);
+
QualType ArgTy = Arg->getType();
- if (!ArgTy->isIntegerType()) {
+ if (!ArgTy->isUnsignedIntegerType()) {
S.Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
- << 1 << /*integer ty*/ 7 << ArgTy;
+ << 1 << /*unsigned integer ty*/ 7 << ArgTy;
return true;
}
return false;
@@ -5191,49 +5199,74 @@ bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
bool CheckVectorElementCallArgs(Sema *S, CallExpr *TheCall) {
assert(TheCall->getNumArgs() > 1);
ExprResult A = TheCall->getArg(0);
- ExprResult B = TheCall->getArg(1);
+
QualType ArgTyA = A.get()->getType();
- QualType ArgTyB = B.get()->getType();
+
auto *VecTyA = ArgTyA->getAs<VectorType>();
- auto *VecTyB = ArgTyB->getAs<VectorType>();
SourceLocation BuiltinLoc = TheCall->getBeginLoc();
- if (VecTyA == nullptr && VecTyB == nullptr)
- return false;
- if (VecTyA && VecTyB) {
- bool retValue = false;
- if (VecTyA->getElementType() != VecTyB->getElementType()) {
- // Note: type promotion is intended to be handeled via the intrinsics
- // and not the builtin itself.
- S->Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_incompatible_vector)
- << TheCall->getDirectCallee()
- << SourceRange(A.get()->getBeginLoc(), B.get()->getEndLoc());
- retValue = true;
- }
- if (VecTyA->getNumElements() != VecTyB->getNumElements()) {
- // if we get here a HLSLVectorTruncation is needed.
- S->Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
- << TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(0)->getBeginLoc(),
- TheCall->getArg(1)->getEndLoc());
- retValue = true;
- }
+ for (unsigned i = 1; i < TheCall->getNumArgs(); ++i) {
+ ExprResult B = TheCall->getArg(i);
+ QualType ArgTyB = B.get()->getType();
+ auto *VecTyB = ArgTyB->getAs<VectorType>();
+ if (VecTyA == nullptr && VecTyB == nullptr)
+ return false;
- if (retValue)
- TheCall->setType(VecTyA->getElementType());
+ if (VecTyA && VecTyB) {
+ bool retValue = false;
+ if (VecTyA->getElementType() != VecTyB->getElementType()) {
+ // Note: type promotion is intended to be handeled via the intrinsics
+ // and not the builtin itself.
+ S->Diag(TheCall->getBeginLoc(),
+ diag::err_vec_builtin_incompatible_vector)
+ << TheCall->getDirectCallee() << /*useAllTerminology*/ true
+ << SourceRange(A.get()->getBeginLoc(), B.get()->getEndLoc());
+ retValue = true;
+ }
+ if (VecTyA->getNumElements() != VecTyB->getNumElements()) {
+ // You should only be hitting this case if you are calling the builtin
+ // directly. HLSL intrinsics should avoid this case via a
+ // HLSLVectorTruncation.
+ S->Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
+ << TheCall->getDirectCallee() << /*useAllTerminology*/ true
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc());
+ retValue = true;
+ }
+
+ if (!retValue)
+ TheCall->setType(VecTyA->getElementType());
- return retValue;
+ return retValue;
+ }
}
// Note: if we get here one of the args is a scalar which
// requires a VectorSplat on Arg0 or Arg1
S->Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
- << TheCall->getDirectCallee()
+ << TheCall->getDirectCallee() << /*useAllTerminology*/ true
<< SourceRange(TheCall->getArg(0)->getBeginLoc(),
TheCall->getArg(1)->getEndLoc());
return true;
}
+bool CheckAllArgsHaveFloatRepresentation(Sema *S, CallExpr *TheCall) {
+ QualType ExpectedType = S->Context.FloatTy;
+ for (unsigned i = 0; i < TheCall->getNumArgs(); ++i) {
+ QualType PassedType = TheCall->getArg(i)->getType();
+ if (!PassedType->hasFloatingRepresentation()) {
+ if (auto *VecTyA = PassedType->getAs<VectorType>())
+ ExpectedType = S->Context.getVectorType(
+ ExpectedType, VecTyA->getNumElements(), VecTyA->getVectorKind());
+ S->Diag(TheCall->getArg(0)->getBeginLoc(),
+ diag::err_typecheck_convert_incompatible)
+ << PassedType << ExpectedType << 1 << 0 << 0;
+ return true;
+ }
+ }
+ return false;
+}
+
// Note: returning true in this case results in CheckBuiltinFunctionCall
// returning an ExprError
bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
@@ -5247,6 +5280,24 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return true;
break;
}
+ case Builtin::BI__builtin_hlsl_elementwise_frac: {
+ if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
+ return true;
+ if (CheckAllArgsHaveFloatRepresentation(this, TheCall))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_hlsl_lerp: {
+ if (checkArgCount(*this, TheCall, 3))
+ return true;
+ if (CheckVectorElementCallArgs(this, TheCall))
+ return true;
+ if (SemaBuiltinElementwiseTernaryMath(TheCall))
+ return true;
+ if (CheckAllArgsHaveFloatRepresentation(this, TheCall))
+ return true;
+ break;
+ }
}
return false;
}
@@ -9423,7 +9474,7 @@ bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
(!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
- << TheCall->getDirectCallee()
+ << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
<< SourceRange(TheCall->getArg(0)->getBeginLoc(),
TheCall->getArg(1)->getEndLoc());
}
@@ -9431,7 +9482,7 @@ bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
// Check the first two arguments are the same type.
if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
- << TheCall->getDirectCallee()
+ << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
<< SourceRange(TheCall->getArg(0)->getBeginLoc(),
TheCall->getArg(1)->getEndLoc());
}
@@ -9467,7 +9518,7 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
if (!LHSType->isVectorType() || !RHSType->isVectorType())
return ExprError(
Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector)
- << TheCall->getDirectCallee()
+ << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
<< SourceRange(TheCall->getArg(0)->getBeginLoc(),
TheCall->getArg(1)->getEndLoc()));
@@ -9483,12 +9534,14 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_vec_builtin_incompatible_vector)
<< TheCall->getDirectCallee()
+ << /*isMorethantwoArgs*/ false
<< SourceRange(TheCall->getArg(1)->getBeginLoc(),
TheCall->getArg(1)->getEndLoc()));
} else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_vec_builtin_incompatible_vector)
<< TheCall->getDirectCallee()
+ << /*isMorethantwoArgs*/ false
<< SourceRange(TheCall->getArg(0)->getBeginLoc(),
TheCall->getArg(1)->getEndLoc()));
} else if (numElements != numResElements) {
@@ -16538,6 +16591,20 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
}
}
+ // Complain if we are converting a lambda expression to a boolean value
+ // outside of instantiation.
+ if (!inTemplateInstantiation()) {
+ if (const auto *MCallExpr = dyn_cast<CXXMemberCallExpr>(E)) {
+ if (const auto *MRecordDecl = MCallExpr->getRecordDecl();
+ MRecordDecl && MRecordDecl->isLambda()) {
+ Diag(E->getExprLoc(), diag::warn_impcast_pointer_to_bool)
+ << /*LambdaPointerConversionOperatorType=*/3
+ << MRecordDecl->getSourceRange() << Range << IsEqual;
+ return;
+ }
+ }
+ }
+
// Expect to find a single Decl. Skip anything more complicated.
ValueDecl *D = nullptr;
if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 9fdd8eb236d1..6289cf75e174 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -2044,7 +2044,8 @@ static bool ShouldDiagnoseUnusedDecl(const LangOptions &LangOpts,
return false;
if (Init) {
- const auto *Construct = dyn_cast<CXXConstructExpr>(Init);
+ const auto *Construct =
+ dyn_cast<CXXConstructExpr>(Init->IgnoreImpCasts());
if (Construct && !Construct->isElidable()) {
const CXXConstructorDecl *CD = Construct->getConstructor();
if (!CD->isTrivial() && !RD->hasAttr<WarnUnusedAttr>() &&
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index c1c28a73fd79..397b5db0dc06 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -2053,12 +2053,6 @@ static void handleTLSModelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- if (S.Context.getTargetInfo().getTriple().isOSAIX() &&
- Model == "local-dynamic") {
- S.Diag(LiteralLoc, diag::err_aix_attr_unsupported_tls_model) << Model;
- return;
- }
-
D->addAttr(::new (S.Context) TLSModelAttr(S.Context, AL, Model));
}
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index d4e1dc67cb50..5bbe381f5c4c 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -7294,7 +7294,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
bool CanPass = canPassInRegisters(*this, Record, CCK);
// Do not change ArgPassingRestrictions if it has already been set to
- // ArgPassingKind::CanNeverPassInRegs.
+ // RecordArgPassingKind::CanNeverPassInRegs.
if (Record->getArgPassingRestrictions() !=
RecordArgPassingKind::CanNeverPassInRegs)
Record->setArgPassingRestrictions(
diff --git a/clang/lib/Sema/SemaDeclObjC.cpp b/clang/lib/Sema/SemaDeclObjC.cpp
index 2011f4084dd2..94a245f0f905 100644
--- a/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/clang/lib/Sema/SemaDeclObjC.cpp
@@ -2233,12 +2233,16 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
Diag(IVI->getLocation(), diag::err_inconsistent_ivar_count);
}
+static bool shouldWarnUndefinedMethod(const ObjCMethodDecl *M) {
+ // No point warning no definition of method which is 'unavailable'.
+ return M->getAvailability() != AR_Unavailable;
+}
+
static void WarnUndefinedMethod(Sema &S, ObjCImplDecl *Impl,
ObjCMethodDecl *method, bool &IncompleteImpl,
unsigned DiagID,
NamedDecl *NeededFor = nullptr) {
- // No point warning no definition of method which is 'unavailable'.
- if (method->getAvailability() == AR_Unavailable)
+ if (!shouldWarnUndefinedMethod(method))
return;
// FIXME: For now ignore 'IncompleteImpl'.
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 2a0e86c37f1b..0a449fc1082b 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -17772,7 +17772,6 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
if (lhq.getAddressSpace() != rhq.getAddressSpace()) {
DiagKind = diag::err_typecheck_incompatible_address_space;
break;
-
} else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) {
DiagKind = diag::err_typecheck_incompatible_ownership;
break;
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index 59758d3bd6d1..c34a40fa7c81 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -890,6 +890,12 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw";
+ // Exceptions that escape a compute construct are ill-formed.
+ if (getLangOpts().OpenACC && getCurScope() &&
+ getCurScope()->isInOpenACCComputeConstructScope(Scope::TryScope))
+ Diag(OpLoc, diag::err_acc_branch_in_out_compute_construct)
+ << /*throw*/ 2 << /*out of*/ 0;
+
if (Ex && !Ex->isTypeDependent()) {
// Initialize the exception result. This implicitly weeds out
// abstract types or types with inaccessible copy constructors.
@@ -1220,7 +1226,7 @@ static QualType adjustCVQualifiersForCXXThisWithinLambda(
: nullptr;
}
}
- return ASTCtx.getPointerType(ClassType);
+ return ThisTy;
}
QualType Sema::getCurrentThisType() {
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 7f75cfc5b54f..f4364a259ad5 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -4962,7 +4962,8 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
if (RC->getModifier() != OMPC_REDUCTION_inscan)
continue;
for (Expr *E : RC->copy_array_temps())
- MarkDeclarationsReferencedInExpr(E);
+ if (E)
+ MarkDeclarationsReferencedInExpr(E);
}
if (auto *AC = dyn_cast<OMPAlignedClause>(C)) {
for (Expr *E : AC->varlists())
diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp
index 0a5c2b23a90c..e72397adec24 100644
--- a/clang/lib/Sema/SemaStmt.cpp
+++ b/clang/lib/Sema/SemaStmt.cpp
@@ -527,6 +527,13 @@ Sema::ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHSVal,
return StmtError();
}
+ if (LangOpts.OpenACC &&
+ getCurScope()->isInOpenACCComputeConstructScope(Scope::SwitchScope)) {
+ Diag(CaseLoc, diag::err_acc_branch_in_out_compute_construct)
+ << /*branch*/ 0 << /*into*/ 1;
+ return StmtError();
+ }
+
auto *CS = CaseStmt::Create(Context, LHSVal.get(), RHSVal.get(),
CaseLoc, DotDotDotLoc, ColonLoc);
getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(CS);
@@ -546,6 +553,13 @@ Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc,
return SubStmt;
}
+ if (LangOpts.OpenACC &&
+ getCurScope()->isInOpenACCComputeConstructScope(Scope::SwitchScope)) {
+ Diag(DefaultLoc, diag::err_acc_branch_in_out_compute_construct)
+ << /*branch*/ 0 << /*into*/ 1;
+ return StmtError();
+ }
+
DefaultStmt *DS = new (Context) DefaultStmt(DefaultLoc, ColonLoc, SubStmt);
getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(DS);
return DS;
@@ -567,6 +581,11 @@ Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
Diag(IdentLoc, diag::warn_reserved_extern_symbol)
<< TheDecl << static_cast<int>(Status);
+ // If this label is in a compute construct scope, we need to make sure we
+ // check gotos in/out.
+ if (getCurScope()->isInOpenACCComputeConstructScope())
+ setFunctionHasBranchProtectedScope();
+
// Otherwise, things are good. Fill in the declaration and return it.
LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt);
TheDecl->setStmt(LS);
@@ -3304,6 +3323,12 @@ StmtResult Sema::ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl) {
setFunctionHasBranchIntoScope();
+
+ // If this goto is in a compute construct scope, we need to make sure we check
+ // gotos in/out.
+ if (getCurScope()->isInOpenACCComputeConstructScope())
+ setFunctionHasBranchProtectedScope();
+
TheDecl->markUsed(Context);
return new (Context) GotoStmt(TheDecl, GotoLoc, LabelLoc);
}
@@ -3332,6 +3357,11 @@ Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc,
setFunctionHasIndirectGoto();
+ // If this goto is in a compute construct scope, we need to make sure we
+ // check gotos in/out.
+ if (getCurScope()->isInOpenACCComputeConstructScope())
+ setFunctionHasBranchProtectedScope();
+
return new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E);
}
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index e91033dd8868..a7910bda874c 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -3141,12 +3141,14 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
diag::note_template_param_prev_default_arg_in_other_module)
<< PrevModuleName;
Invalid = true;
- } else if (MissingDefaultArg && TPC != TPC_FunctionTemplate) {
- // C++ [temp.param]p11:
- // If a template-parameter of a class template has a default
- // template-argument, each subsequent template-parameter shall either
- // have a default template-argument supplied or be a template parameter
- // pack.
+ } else if (MissingDefaultArg &&
+ (TPC == TPC_ClassTemplate || TPC == TPC_FriendClassTemplate ||
+ TPC == TPC_VarTemplate || TPC == TPC_TypeAliasTemplate)) {
+ // C++ 23[temp.param]p14:
+ // If a template-parameter of a class template, variable template, or
+ // alias template has a default template argument, each subsequent
+ // template-parameter shall either have a default template argument
+ // supplied or be a template parameter pack.
Diag((*NewParam)->getLocation(),
diag::err_template_param_default_arg_missing);
Diag(PreviousDefaultArgLoc, diag::note_template_param_prev_default_arg);
diff --git a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 65bdc4cac309..0208f94e1b5a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -174,6 +174,9 @@ using FnCheck = std::function<void(const StreamChecker *, const FnDescription *,
using ArgNoTy = unsigned int;
static const ArgNoTy ArgNone = std::numeric_limits<ArgNoTy>::max();
+const char *FeofNote = "Assuming stream reaches end-of-file here";
+const char *FerrorNote = "Assuming this stream operation fails";
+
struct FnDescription {
FnCheck PreFn;
FnCheck EvalFn;
@@ -218,87 +221,6 @@ inline void assertStreamStateOpened(const StreamState *SS) {
assert(SS->isOpened() && "Stream is expected to be opened");
}
-struct StreamOperationEvaluator {
- SValBuilder &SVB;
- const ASTContext &ACtx;
-
- SymbolRef StreamSym;
- const StreamState *SS = nullptr;
- const CallExpr *CE = nullptr;
-
- StreamOperationEvaluator(CheckerContext &C)
- : SVB(C.getSValBuilder()), ACtx(C.getASTContext()) {
- ;
- }
-
- bool Init(const FnDescription *Desc, const CallEvent &Call, CheckerContext &C,
- ProgramStateRef State) {
- StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
- return false;
- SS = State->get<StreamMap>(StreamSym);
- if (!SS)
- return false;
- CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return false;
-
- assertStreamStateOpened(SS);
-
- return true;
- }
-
- bool isStreamEof() const { return SS->ErrorState == ErrorFEof; }
-
- NonLoc getZeroVal(const CallEvent &Call) {
- return *SVB.makeZeroVal(Call.getResultType()).getAs<NonLoc>();
- }
-
- ProgramStateRef setStreamState(ProgramStateRef State,
- const StreamState &NewSS) {
- return State->set<StreamMap>(StreamSym, NewSS);
- }
-
- ProgramStateRef makeAndBindRetVal(ProgramStateRef State, CheckerContext &C) {
- NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
- return State->BindExpr(CE, C.getLocationContext(), RetVal);
- }
-
- ProgramStateRef bindReturnValue(ProgramStateRef State, CheckerContext &C,
- uint64_t Val) {
- return State->BindExpr(CE, C.getLocationContext(),
- SVB.makeIntVal(Val, CE->getCallReturnType(ACtx)));
- }
-
- ProgramStateRef bindReturnValue(ProgramStateRef State, CheckerContext &C,
- SVal Val) {
- return State->BindExpr(CE, C.getLocationContext(), Val);
- }
-
- ProgramStateRef bindNullReturnValue(ProgramStateRef State,
- CheckerContext &C) {
- return State->BindExpr(CE, C.getLocationContext(),
- C.getSValBuilder().makeNullWithType(CE->getType()));
- }
-
- ProgramStateRef assumeBinOpNN(ProgramStateRef State,
- BinaryOperator::Opcode Op, NonLoc LHS,
- NonLoc RHS) {
- auto Cond = SVB.evalBinOpNN(State, Op, LHS, RHS, SVB.getConditionType())
- .getAs<DefinedOrUnknownSVal>();
- if (!Cond)
- return nullptr;
- return State->assume(*Cond, true);
- }
-
- ConstraintManager::ProgramStatePair
- makeRetValAndAssumeDual(ProgramStateRef State, CheckerContext &C) {
- DefinedSVal RetVal = makeRetVal(C, CE);
- State = State->BindExpr(CE, C.getLocationContext(), RetVal);
- return C.getConstraintManager().assumeDual(State, RetVal);
- }
-};
-
class StreamChecker : public Checker<check::PreCall, eval::Call,
check::DeadSymbols, check::PointerEscape> {
BugType BT_FileNull{this, "NULL stream pointer", "Stream handling error"};
@@ -322,11 +244,59 @@ public:
const CallEvent *Call,
PointerEscapeKind Kind) const;
+ const BugType *getBT_StreamEof() const { return &BT_StreamEof; }
+ const BugType *getBT_IndeterminatePosition() const {
+ return &BT_IndeterminatePosition;
+ }
+
+ const NoteTag *constructSetEofNoteTag(CheckerContext &C,
+ SymbolRef StreamSym) const {
+ return C.getNoteTag([this, StreamSym](PathSensitiveBugReport &BR) {
+ if (!BR.isInteresting(StreamSym) ||
+ &BR.getBugType() != this->getBT_StreamEof())
+ return "";
+
+ BR.markNotInteresting(StreamSym);
+
+ return FeofNote;
+ });
+ }
+
+ const NoteTag *constructSetErrorNoteTag(CheckerContext &C,
+ SymbolRef StreamSym) const {
+ return C.getNoteTag([this, StreamSym](PathSensitiveBugReport &BR) {
+ if (!BR.isInteresting(StreamSym) ||
+ &BR.getBugType() != this->getBT_IndeterminatePosition())
+ return "";
+
+ BR.markNotInteresting(StreamSym);
+
+ return FerrorNote;
+ });
+ }
+
+ const NoteTag *constructSetEofOrErrorNoteTag(CheckerContext &C,
+ SymbolRef StreamSym) const {
+ return C.getNoteTag([this, StreamSym](PathSensitiveBugReport &BR) {
+ if (!BR.isInteresting(StreamSym))
+ return "";
+
+ if (&BR.getBugType() == this->getBT_StreamEof()) {
+ BR.markNotInteresting(StreamSym);
+ return FeofNote;
+ }
+ if (&BR.getBugType() == this->getBT_IndeterminatePosition()) {
+ BR.markNotInteresting(StreamSym);
+ return FerrorNote;
+ }
+
+ return "";
+ });
+ }
+
/// If true, evaluate special testing stream functions.
bool TestMode = false;
- const BugType *getBT_StreamEof() const { return &BT_StreamEof; }
-
private:
CallDescriptionMap<FnDescription> FnDescriptions = {
{{{"fopen"}, 2}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
@@ -557,8 +527,8 @@ private:
/// Generate a message for BugReporterVisitor if the stored symbol is
/// marked as interesting by the actual bug report.
- const NoteTag *constructNoteTag(CheckerContext &C, SymbolRef StreamSym,
- const std::string &Message) const {
+ const NoteTag *constructLeakNoteTag(CheckerContext &C, SymbolRef StreamSym,
+ const std::string &Message) const {
return C.getNoteTag([this, StreamSym,
Message](PathSensitiveBugReport &BR) -> std::string {
if (BR.isInteresting(StreamSym) && &BR.getBugType() == &BT_ResourceLeak)
@@ -567,19 +537,6 @@ private:
});
}
- const NoteTag *constructSetEofNoteTag(CheckerContext &C,
- SymbolRef StreamSym) const {
- return C.getNoteTag([this, StreamSym](PathSensitiveBugReport &BR) {
- if (!BR.isInteresting(StreamSym) ||
- &BR.getBugType() != this->getBT_StreamEof())
- return "";
-
- BR.markNotInteresting(StreamSym);
-
- return "Assuming stream reaches end-of-file here";
- });
- }
-
void initMacroValues(CheckerContext &C) const {
if (EofVal)
return;
@@ -607,6 +564,102 @@ private:
CheckerContext &C);
};
+struct StreamOperationEvaluator {
+ SValBuilder &SVB;
+ const ASTContext &ACtx;
+
+ SymbolRef StreamSym;
+ const StreamState *SS = nullptr;
+ const CallExpr *CE = nullptr;
+ StreamErrorState NewES;
+
+ StreamOperationEvaluator(CheckerContext &C)
+ : SVB(C.getSValBuilder()), ACtx(C.getASTContext()) {
+ ;
+ }
+
+ bool Init(const FnDescription *Desc, const CallEvent &Call, CheckerContext &C,
+ ProgramStateRef State) {
+ StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return false;
+ SS = State->get<StreamMap>(StreamSym);
+ if (!SS)
+ return false;
+ NewES = SS->ErrorState;
+ CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
+ assertStreamStateOpened(SS);
+
+ return true;
+ }
+
+ bool isStreamEof() const { return SS->ErrorState == ErrorFEof; }
+
+ NonLoc getZeroVal(const CallEvent &Call) {
+ return *SVB.makeZeroVal(Call.getResultType()).getAs<NonLoc>();
+ }
+
+ ProgramStateRef setStreamState(ProgramStateRef State,
+ const StreamState &NewSS) {
+ NewES = NewSS.ErrorState;
+ return State->set<StreamMap>(StreamSym, NewSS);
+ }
+
+ ProgramStateRef makeAndBindRetVal(ProgramStateRef State, CheckerContext &C) {
+ NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ return State->BindExpr(CE, C.getLocationContext(), RetVal);
+ }
+
+ ProgramStateRef bindReturnValue(ProgramStateRef State, CheckerContext &C,
+ uint64_t Val) {
+ return State->BindExpr(CE, C.getLocationContext(),
+ SVB.makeIntVal(Val, CE->getCallReturnType(ACtx)));
+ }
+
+ ProgramStateRef bindReturnValue(ProgramStateRef State, CheckerContext &C,
+ SVal Val) {
+ return State->BindExpr(CE, C.getLocationContext(), Val);
+ }
+
+ ProgramStateRef bindNullReturnValue(ProgramStateRef State,
+ CheckerContext &C) {
+ return State->BindExpr(CE, C.getLocationContext(),
+ C.getSValBuilder().makeNullWithType(CE->getType()));
+ }
+
+ ProgramStateRef assumeBinOpNN(ProgramStateRef State,
+ BinaryOperator::Opcode Op, NonLoc LHS,
+ NonLoc RHS) {
+ auto Cond = SVB.evalBinOpNN(State, Op, LHS, RHS, SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!Cond)
+ return nullptr;
+ return State->assume(*Cond, true);
+ }
+
+ ConstraintManager::ProgramStatePair
+ makeRetValAndAssumeDual(ProgramStateRef State, CheckerContext &C) {
+ DefinedSVal RetVal = makeRetVal(C, CE);
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+ return C.getConstraintManager().assumeDual(State, RetVal);
+ }
+
+ const NoteTag *getFailureNoteTag(const StreamChecker *Ch, CheckerContext &C) {
+ bool SetFeof = NewES.FEof && !SS->ErrorState.FEof;
+ bool SetFerror = NewES.FError && !SS->ErrorState.FError;
+ if (SetFeof && !SetFerror)
+ return Ch->constructSetEofNoteTag(C, StreamSym);
+ if (!SetFeof && SetFerror)
+ return Ch->constructSetErrorNoteTag(C, StreamSym);
+ if (SetFeof && SetFerror)
+ return Ch->constructSetEofOrErrorNoteTag(C, StreamSym);
+ return nullptr;
+ }
+};
+
} // end anonymous namespace
const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
@@ -697,7 +750,7 @@ void StreamChecker::evalFopen(const FnDescription *Desc, const CallEvent &Call,
StateNull->set<StreamMap>(RetSym, StreamState::getOpenFailed(Desc));
C.addTransition(StateNotNull,
- constructNoteTag(C, RetSym, "Stream opened here"));
+ constructLeakNoteTag(C, RetSym, "Stream opened here"));
C.addTransition(StateNull);
}
@@ -755,7 +808,7 @@ void StreamChecker::evalFreopen(const FnDescription *Desc,
StateRetNull->set<StreamMap>(StreamSym, StreamState::getOpenFailed(Desc));
C.addTransition(StateRetNotNull,
- constructNoteTag(C, StreamSym, "Stream reopened here"));
+ constructLeakNoteTag(C, StreamSym, "Stream reopened here"));
C.addTransition(StateRetNull);
}
@@ -867,10 +920,7 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
// indicator for the stream is indeterminate.
StateFailed = E.setStreamState(
StateFailed, StreamState::getOpened(Desc, NewES, !NewES.isFEof()));
- if (IsFread && !E.isStreamEof())
- C.addTransition(StateFailed, constructSetEofNoteTag(C, E.StreamSym));
- else
- C.addTransition(StateFailed);
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFgetx(const FnDescription *Desc, const CallEvent &Call,
@@ -929,10 +979,7 @@ void StreamChecker::evalFgetx(const FnDescription *Desc, const CallEvent &Call,
E.isStreamEof() ? ErrorFEof : ErrorFEof | ErrorFError;
StateFailed = E.setStreamState(
StateFailed, StreamState::getOpened(Desc, NewES, !NewES.isFEof()));
- if (!E.isStreamEof())
- C.addTransition(StateFailed, constructSetEofNoteTag(C, E.StreamSym));
- else
- C.addTransition(StateFailed);
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFputx(const FnDescription *Desc, const CallEvent &Call,
@@ -974,7 +1021,7 @@ void StreamChecker::evalFputx(const FnDescription *Desc, const CallEvent &Call,
ProgramStateRef StateFailed = E.bindReturnValue(State, C, *EofVal);
StateFailed = E.setStreamState(
StateFailed, StreamState::getOpened(Desc, ErrorFError, true));
- C.addTransition(StateFailed);
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFprintf(const FnDescription *Desc,
@@ -1008,7 +1055,7 @@ void StreamChecker::evalFprintf(const FnDescription *Desc,
// position indicator for the stream is indeterminate.
StateFailed = E.setStreamState(
StateFailed, StreamState::getOpened(Desc, ErrorFError, true));
- C.addTransition(StateFailed);
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFscanf(const FnDescription *Desc, const CallEvent &Call,
@@ -1058,10 +1105,7 @@ void StreamChecker::evalFscanf(const FnDescription *Desc, const CallEvent &Call,
E.isStreamEof() ? ErrorFEof : ErrorNone | ErrorFEof | ErrorFError;
StateFailed = E.setStreamState(
StateFailed, StreamState::getOpened(Desc, NewES, !NewES.isFEof()));
- if (!E.isStreamEof())
- C.addTransition(StateFailed, constructSetEofNoteTag(C, E.StreamSym));
- else
- C.addTransition(StateFailed);
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalUngetc(const FnDescription *Desc, const CallEvent &Call,
@@ -1129,10 +1173,7 @@ void StreamChecker::evalGetdelim(const FnDescription *Desc,
E.isStreamEof() ? ErrorFEof : ErrorFEof | ErrorFError;
StateFailed = E.setStreamState(
StateFailed, StreamState::getOpened(Desc, NewES, !NewES.isFEof()));
- if (E.isStreamEof())
- C.addTransition(StateFailed, constructSetEofNoteTag(C, E.StreamSym));
- else
- C.addTransition(StateFailed);
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::preFseek(const FnDescription *Desc, const CallEvent &Call,
@@ -1184,7 +1225,7 @@ void StreamChecker::evalFseek(const FnDescription *Desc, const CallEvent &Call,
NewErrS = NewErrS | ErrorFEof;
StateFailed = E.setStreamState(StateFailed,
StreamState::getOpened(Desc, NewErrS, true));
- C.addTransition(StateFailed, constructSetEofNoteTag(C, E.StreamSym));
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFgetpos(const FnDescription *Desc,
@@ -1228,7 +1269,7 @@ void StreamChecker::evalFsetpos(const FnDescription *Desc,
StateFailed, StreamState::getOpened(Desc, ErrorNone | ErrorFError, true));
C.addTransition(StateNotFailed);
- C.addTransition(StateFailed);
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFtell(const FnDescription *Desc, const CallEvent &Call,
@@ -1541,18 +1582,22 @@ ProgramStateRef StreamChecker::ensureNoFilePositionIndeterminate(
if (!N)
return nullptr;
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- BT_IndeterminatePosition, BugMessage, N));
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ BT_IndeterminatePosition, BugMessage, N);
+ R->markInteresting(Sym);
+ C.emitReport(std::move(R));
return State->set<StreamMap>(
Sym, StreamState::getOpened(SS->LastOperation, ErrorFEof, false));
}
// Known or unknown error state without FEOF possible.
// Stop analysis, report error.
- ExplodedNode *N = C.generateErrorNode(State);
- if (N)
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- BT_IndeterminatePosition, BugMessage, N));
+ if (ExplodedNode *N = C.generateErrorNode(State)) {
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ BT_IndeterminatePosition, BugMessage, N);
+ R->markInteresting(Sym);
+ C.emitReport(std::move(R));
+ }
return nullptr;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
index defd83ec8e17..01b191ab0eea 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
@@ -310,8 +310,12 @@ public:
return true;
if (isa<EnumConstantDecl>(decl))
return true;
- if (auto *VD = dyn_cast<VarDecl>(decl))
- return VD->hasConstantInitialization() && VD->getEvaluatedValue();
+ if (auto *VD = dyn_cast<VarDecl>(decl)) {
+ if (VD->hasConstantInitialization() && VD->getEvaluatedValue())
+ return true;
+ auto *Init = VD->getInit();
+ return !Init || Visit(Init);
+ }
}
return false;
}
diff --git a/clang/test/AST/Interp/arrays.cpp b/clang/test/AST/Interp/arrays.cpp
index e1af2e80e3ad..2bf6e9ef3511 100644
--- a/clang/test/AST/Interp/arrays.cpp
+++ b/clang/test/AST/Interp/arrays.cpp
@@ -564,3 +564,8 @@ namespace LocalVLA {
#endif
}
}
+
+char melchizedek[2200000000];
+typedef decltype(melchizedek[1] - melchizedek[0]) ptrdiff_t;
+constexpr ptrdiff_t d1 = &melchizedek[0x7fffffff] - &melchizedek[0]; // ok
+constexpr ptrdiff_t d3 = &melchizedek[0] - &melchizedek[0x80000000u]; // ok
diff --git a/clang/test/AST/Interp/c.c b/clang/test/AST/Interp/c.c
index 2a72c24b43d1..260e5bdfeefb 100644
--- a/clang/test/AST/Interp/c.c
+++ b/clang/test/AST/Interp/c.c
@@ -33,15 +33,15 @@ const int b = 3;
_Static_assert(b == 3, ""); // pedantic-ref-warning {{not an integer constant expression}} \
// pedantic-expected-warning {{not an integer constant expression}}
-/// FIXME: The new interpreter is missing the "initializer of 'c' unknown" diagnostics.
-const int c; // ref-note {{declared here}} \
- // pedantic-ref-note {{declared here}}
+const int c; // all-note {{declared here}}
_Static_assert(c == 0, ""); // ref-error {{not an integral constant expression}} \
// ref-note {{initializer of 'c' is unknown}} \
// pedantic-ref-error {{not an integral constant expression}} \
// pedantic-ref-note {{initializer of 'c' is unknown}} \
// expected-error {{not an integral constant expression}} \
- // pedantic-expected-error {{not an integral constant expression}}
+ // expected-note {{initializer of 'c' is unknown}} \
+ // pedantic-expected-error {{not an integral constant expression}} \
+ // pedantic-expected-note {{initializer of 'c' is unknown}}
_Static_assert(&c != 0, ""); // ref-warning {{always true}} \
// pedantic-ref-warning {{always true}} \
diff --git a/clang/test/AST/Interp/cxx11.cpp b/clang/test/AST/Interp/cxx11.cpp
index 0a1e0f3fd28e..993e3618a378 100644
--- a/clang/test/AST/Interp/cxx11.cpp
+++ b/clang/test/AST/Interp/cxx11.cpp
@@ -22,3 +22,11 @@ int array2[recurse2]; // both-warning {{variable length arrays in C++}} \
// both-note {{initializer of 'recurse2' is not a constant expression}} \
// expected-error {{variable length array declaration not allowed at file scope}} \
// ref-warning {{variable length array folded to constant array as an extension}}
+
+struct S {
+ int m;
+};
+constexpr S s = { 5 };
+constexpr const int *p = &s.m + 1;
+
+constexpr const int *np2 = &(*(int(*)[4])nullptr)[0]; // ok
diff --git a/clang/test/AST/Interp/cxx20.cpp b/clang/test/AST/Interp/cxx20.cpp
index 5c9c62579651..000ffe39eb94 100644
--- a/clang/test/AST/Interp/cxx20.cpp
+++ b/clang/test/AST/Interp/cxx20.cpp
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -fcxx-exceptions -fexperimental-new-constant-interpreter -std=c++20 -verify %s
-// RUN: %clang_cc1 -fcxx-exceptions -std=c++20 -verify=ref %s
+// RUN: %clang_cc1 -fcxx-exceptions -fexperimental-new-constant-interpreter -std=c++20 -verify=both,expected -fcxx-exceptions %s
+// RUN: %clang_cc1 -fcxx-exceptions -std=c++20 -verify=both,ref -fcxx-exceptions %s
void test_alignas_operand() {
alignas(8) char dummy;
@@ -58,13 +58,10 @@ static_assert(pointerAssign2() == 12, "");
constexpr int unInitLocal() {
int a;
- return a; // ref-note {{read of uninitialized object}} \
- // expected-note {{read of uninitialized object}}
+ return a; // both-note {{read of uninitialized object}}
}
-static_assert(unInitLocal() == 0, ""); // ref-error {{not an integral constant expression}} \
- // ref-note {{in call to 'unInitLocal()'}} \
- // expected-error {{not an integral constant expression}} \
- // expected-note {{in call to 'unInitLocal()'}} \
+static_assert(unInitLocal() == 0, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to 'unInitLocal()'}}
constexpr int initializedLocal() {
int a;
@@ -75,25 +72,19 @@ static_assert(initializedLocal() == 20);
constexpr int initializedLocal2() {
int a[2];
- return *a; // expected-note {{read of uninitialized object is not allowed in a constant expression}} \
- // ref-note {{read of uninitialized object is not allowed in a constant expression}}
+ return *a; // both-note {{read of uninitialized object is not allowed in a constant expression}}
}
-static_assert(initializedLocal2() == 20); // expected-error {{not an integral constant expression}} \
- // expected-note {{in call to}} \
- // ref-error {{not an integral constant expression}} \
- // ref-note {{in call to}}
+static_assert(initializedLocal2() == 20); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
struct Int { int a; };
constexpr int initializedLocal3() {
Int i;
- return i.a; // ref-note {{read of uninitialized object is not allowed in a constant expression}} \
- // expected-note {{read of uninitialized object}}
+ return i.a; // both-note {{read of uninitialized object is not allowed in a constant expression}}
}
-static_assert(initializedLocal3() == 20); // expected-error {{not an integral constant expression}} \
- // expected-note {{in call to}} \
- // ref-error {{not an integral constant expression}} \
- // ref-note {{in call to}}
+static_assert(initializedLocal3() == 20); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
@@ -137,22 +128,16 @@ static_assert(!b4); // ref-error {{not an integral constant expression}} \
namespace UninitializedFields {
class A {
public:
- int a; // expected-note 4{{subobject declared here}} \
- // ref-note 4{{subobject declared here}}
+ int a; // both-note 4{{subobject declared here}}
constexpr A() {}
};
- constexpr A a; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{subobject 'a' is not initialized}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{subobject 'a' is not initialized}}
- constexpr A aarr[2]; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{subobject 'a' is not initialized}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{subobject 'a' is not initialized}}
+ constexpr A a; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{subobject 'a' is not initialized}}
+ constexpr A aarr[2]; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{subobject 'a' is not initialized}}
class F {
public:
- int f; // expected-note 3{{subobject declared here}} \
- // ref-note 3{{subobject declared here}}
+ int f; // both-note 3{{subobject declared here}}
constexpr F() {}
constexpr F(bool b) {
@@ -161,26 +146,19 @@ namespace UninitializedFields {
}
};
- constexpr F foo[2] = {true}; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{subobject 'f' is not initialized}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{subobject 'f' is not initialized}}
- constexpr F foo2[3] = {true, false, true}; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{subobject 'f' is not initialized}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{subobject 'f' is not initialized}}
- constexpr F foo3[3] = {true, true, F()}; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{subobject 'f' is not initialized}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{subobject 'f' is not initialized}}
+ constexpr F foo[2] = {true}; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{subobject 'f' is not initialized}}
+ constexpr F foo2[3] = {true, false, true}; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{subobject 'f' is not initialized}}
+ constexpr F foo3[3] = {true, true, F()}; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{subobject 'f' is not initialized}}
class Base {
public:
bool b;
- int a; // expected-note {{subobject declared here}} \
- // ref-note {{subobject declared here}}
+ int a; // both-note {{subobject declared here}}
constexpr Base() : b(true) {}
};
@@ -188,56 +166,44 @@ namespace UninitializedFields {
public:
constexpr Derived() : Base() {} };
- constexpr Derived D; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{subobject 'a' is not initialized}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{subobject 'a' is not initialized}}
+ constexpr Derived D; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{subobject 'a' is not initialized}}
class C2 {
public:
A a;
constexpr C2() {} };
- constexpr C2 c2; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{subobject 'a' is not initialized}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{subobject 'a' is not initialized}}
+ constexpr C2 c2; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{subobject 'a' is not initialized}}
class C3 {
public:
A a[2];
constexpr C3() {}
};
- constexpr C3 c3; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{subobject 'a' is not initialized}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{subobject 'a' is not initialized}}
+ constexpr C3 c3; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{subobject 'a' is not initialized}}
class C4 {
public:
- bool B[2][3]; // expected-note {{subobject declared here}} \
- // ref-note {{subobject declared here}}
+ bool B[2][3]; // both-note {{subobject declared here}}
constexpr C4(){}
};
- constexpr C4 c4; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{subobject 'B' is not initialized}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{subobject 'B' is not initialized}}
+ constexpr C4 c4; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{subobject 'B' is not initialized}}
};
namespace ConstThis {
class Foo {
- const int T = 12; // expected-note {{declared const here}} \
- // ref-note {{declared const here}}
+ const int T = 12; // both-note {{declared const here}}
int a;
public:
constexpr Foo() {
this->a = 10;
- T = 13; // expected-error {{cannot assign to non-static data member 'T' with const-qualified type}} \
- // ref-error {{cannot assign to non-static data member 'T' with const-qualified type}}
+ T = 13; // both-error {{cannot assign to non-static data member 'T' with const-qualified type}}
}
};
- constexpr Foo F; // expected-error {{must be initialized by a constant expression}} \
- // ref-error {{must be initialized by a constant expression}}
+ constexpr Foo F; // both-error {{must be initialized by a constant expression}}
class FooDtor {
@@ -264,8 +230,7 @@ namespace ConstThis {
constexpr ctor_test() {
if (Good)
a = 10;
- int local = 100 / a; // expected-note {{division by zero}} \
- // ref-note {{division by zero}}
+ int local = 100 / a; // both-note {{division by zero}}
}
};
@@ -277,22 +242,17 @@ namespace ConstThis {
constexpr ~dtor_test() {
if (Good)
a = 10;
- int local = 100 / a; // expected-note {{division by zero}} \
- // ref-note {{division by zero}}
+ int local = 100 / a; // both-note {{division by zero}}
}
};
constexpr ctor_test<true> good_ctor;
constexpr dtor_test<true> good_dtor;
- constexpr ctor_test<false> bad_ctor; // expected-error {{must be initialized by a constant expression}} \
- // expected-note {{in call to}} \
- // ref-error {{must be initialized by a constant expression}} \
- // ref-note {{in call to}}
- constexpr dtor_test<false> bad_dtor; // expected-error {{must have constant destruction}} \
- // expected-note {{in call to}} \
- // ref-error {{must have constant destruction}} \
- // ref-note {{in call to}}
+ constexpr ctor_test<false> bad_ctor; // both-error {{must be initialized by a constant expression}} \
+ // both-note {{in call to}}
+ constexpr dtor_test<false> bad_dtor; // both-error {{must have constant destruction}} \
+ // both-note {{in call to}}
};
namespace BaseInit {
@@ -311,10 +271,8 @@ namespace BaseInit {
};
static_assert(Final{1, 2, 3}.c == 3, ""); // OK
- static_assert(Final{1, 2, 3}.a == 0, ""); // expected-error {{not an integral constant expression}} \
- // expected-note {{read of uninitialized object}} \
- // ref-error {{not an integral constant expression}} \
- // ref-note {{read of uninitialized object}}
+ static_assert(Final{1, 2, 3}.a == 0, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{read of uninitialized object}}
struct Mixin {
@@ -333,10 +291,8 @@ namespace BaseInit {
static_assert(Final2{1, 2, 3}.c == 3, ""); // OK
static_assert(Final2{1, 2, 3}.b == 2, ""); // OK
- static_assert(Final2{1, 2, 3}.a == 0, ""); // expected-error {{not an integral constant expression}} \
- // expected-note {{read of uninitialized object}} \
- // ref-error {{not an integral constant expression}} \
- // ref-note {{read of uninitialized object}}
+ static_assert(Final2{1, 2, 3}.a == 0, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{read of uninitialized object}}
struct Mixin3 {
@@ -352,10 +308,8 @@ namespace BaseInit {
static_assert(Final3{1, 2, 3}.c == 3, ""); // OK
static_assert(Final3{1, 2, 3}.b == 2, ""); // OK
- static_assert(Final3{1, 2, 3}.a == 0, ""); // expected-error {{not an integral constant expression}} \
- // expected-note {{read of uninitialized object}} \
- // ref-error {{not an integral constant expression}} \
- // ref-note {{read of uninitialized object}}
+ static_assert(Final3{1, 2, 3}.a == 0, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{read of uninitialized object}}
};
namespace Destructors {
@@ -633,16 +587,13 @@ namespace ImplicitFunction {
/// The operator= call here will fail and the diagnostics should be fine.
b = a; // ref-note {{subobject 'a' is not initialized}} \
- // ref-note {{in call to}} \
// expected-note {{read of uninitialized object}} \
- // expected-note {{in call to}}
+ // both-note {{in call to}}
return 1;
}
- static_assert(callMe() == 1, ""); // ref-error {{not an integral constant expression}} \
- // ref-note {{in call to 'callMe()'}} \
- // expected-error {{not an integral constant expression}} \
- // expected-note {{in call to 'callMe()'}}
+ static_assert(callMe() == 1, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to 'callMe()'}}
}
/// FIXME: Unfortunately, the similar tests in test/SemaCXX/{compare-cxx2a.cpp use member pointers,
@@ -680,8 +631,7 @@ namespace ThreeWayCmp {
static_assert(1.0 <=> 2.f == -1, "");
static_assert(1.0 <=> 1.0 == 0, "");
static_assert(2.0 <=> 1.0 == 1, "");
- constexpr int k = (1 <=> 1, 0); // expected-warning {{comparison result unused}} \
- // ref-warning {{comparison result unused}}
+ constexpr int k = (1 <=> 1, 0); // both-warning {{comparison result unused}}
static_assert(k== 0, "");
/// Pointers.
@@ -690,10 +640,8 @@ namespace ThreeWayCmp {
constexpr const int *pa1 = &a[1];
constexpr const int *pa2 = &a[2];
constexpr const int *pb1 = &b[1];
- static_assert(pa1 <=> pb1 != 0, ""); // expected-error {{not an integral constant expression}} \
- // expected-note {{has unspecified value}} \
- // ref-error {{not an integral constant expression}} \
- // ref-note {{has unspecified value}}
+ static_assert(pa1 <=> pb1 != 0, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{has unspecified value}} \
static_assert(pa1 <=> pa1 == 0, "");
static_assert(pa1 <=> pa2 == -1, "");
static_assert(pa2 <=> pa1 == 1, "");
@@ -799,3 +747,30 @@ void f2() {
// access info for unnamed bit-field
}
}
+
+namespace FailingDestructor {
+ struct D {
+ int n;
+ bool can_destroy;
+
+ constexpr ~D() {
+ if (!can_destroy)
+ throw "oh no";
+ }
+ };
+ template<D d>
+ void f() {} // both-note {{invalid explicitly-specified argument}}
+
+ void g() {
+ f<D{0, false}>(); // both-error {{no matching function}}
+ }
+}
+
+
+void overflowInSwitchCase(int n) {
+ switch (n) {
+ case (int)(float)1e300: // both-error {{constant expression}} \
+ // both-note {{value +Inf is outside the range of representable values of type 'int'}}
+ break;
+ }
+}
diff --git a/clang/test/AST/Interp/cxx98.cpp b/clang/test/AST/Interp/cxx98.cpp
index 1acc74a8290a..73e453720663 100644
--- a/clang/test/AST/Interp/cxx98.cpp
+++ b/clang/test/AST/Interp/cxx98.cpp
@@ -18,12 +18,13 @@ template struct C<cval>;
/// FIXME: This example does not get properly diagnosed in the new interpreter.
extern const int recurse1;
-const int recurse2 = recurse1; // ref-note {{here}}
+const int recurse2 = recurse1; // both-note {{declared here}}
const int recurse1 = 1;
int array1[recurse1];
int array2[recurse2]; // ref-warning 2{{variable length array}} \
// ref-note {{initializer of 'recurse2' is not a constant expression}} \
// expected-warning {{variable length array}} \
+ // expected-note {{read of non-const variable 'recurse2'}} \
// expected-error {{variable length array}}
int NCI; // both-note {{declared here}}
diff --git a/clang/test/AST/Interp/functions.cpp b/clang/test/AST/Interp/functions.cpp
index 9daf8722050f..38f761f563be 100644
--- a/clang/test/AST/Interp/functions.cpp
+++ b/clang/test/AST/Interp/functions.cpp
@@ -555,3 +555,13 @@ namespace Local {
return t;
}
}
+
+namespace VariadicOperator {
+ struct Callable {
+ float& operator()(...);
+ };
+
+ void test_callable(Callable c) {
+ float &fr = c(10);
+ }
+}
diff --git a/clang/test/AST/Interp/lambda.cpp b/clang/test/AST/Interp/lambda.cpp
index a5e0d0f1fd9f..d056bb304eeb 100644
--- a/clang/test/AST/Interp/lambda.cpp
+++ b/clang/test/AST/Interp/lambda.cpp
@@ -235,3 +235,16 @@ namespace LambdaToAPValue {
static_assert(g() == f(), "");
}
}
+
+namespace ns2_capture_this_byval {
+ struct S {
+ int s;
+ constexpr S(int s) : s{s} { }
+ constexpr auto f(S o) {
+ return [*this,o] (auto a) { return s + o.s + a.s; };
+ }
+ };
+
+ constexpr auto L = S{5}.f(S{10});
+ static_assert(L(S{100}) == 115, "");
+} // end test_captures_1::ns2_capture_this_byval
diff --git a/clang/test/AST/Interp/literals.cpp b/clang/test/AST/Interp/literals.cpp
index 8ea1c1155143..10b687c1408a 100644
--- a/clang/test/AST/Interp/literals.cpp
+++ b/clang/test/AST/Interp/literals.cpp
@@ -1131,3 +1131,40 @@ namespace nullptrsub {
f = (char *)((char *)0 - (char *)0);
}
}
+
+namespace incdecbool {
+#if __cplusplus >= 201402L
+ constexpr bool incb(bool c) {
+ if (!c)
+ ++c;
+ else {++c; c++; }
+#if __cplusplus >= 202002L
+ // both-error@-3 {{ISO C++17 does not allow incrementing expression of type bool}}
+ // both-error@-3 2{{ISO C++17 does not allow incrementing expression of type bool}}
+#else
+ // both-warning@-6 {{incrementing expression of type bool is deprecated and incompatible with C++17}}
+#endif
+ return c;
+ }
+ static_assert(incb(false), "");
+ static_assert(incb(true), "");
+ static_assert(incb(true) == 1, "");
+#endif
+
+
+#if __cplusplus == 201103L
+ constexpr bool foo() { // both-error {{never produces a constant expression}}
+ bool b = true; // both-warning {{variable declaration in a constexpr function is a C++14 extension}}
+ b++; // both-warning {{incrementing expression of type bool is deprecated and incompatible with C++17}} \
+ // both-warning {{use of this statement in a constexpr function is a C++14 extension}} \
+ // both-note 2{{subexpression not valid in a constant expression}}
+
+ return b;
+ }
+ static_assert(foo() == 1, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
+#endif
+
+
+
+}
diff --git a/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp b/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp
index ac16a31293f3..80a9a263dab1 100644
--- a/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp
@@ -199,6 +199,8 @@ public:
bool trivial23() const { return OptionSet<Flags>::fromRaw(v).contains(Flags::Flag1); }
int trivial24() const { ASSERT(v); return v; }
unsigned trivial25() const { return __c11_atomic_load((volatile _Atomic(unsigned) *)&v, __ATOMIC_RELAXED); }
+ bool trivial26() { bool hasValue = v; return !hasValue; }
+ bool trivial27(int v) { bool value; value = v ? 1 : 0; return value; }
static RefCounted& singleton() {
static RefCounted s_RefCounted;
@@ -262,6 +264,15 @@ public:
return __c11_atomic_load((volatile _Atomic(unsigned) *)another(), __ATOMIC_RELAXED);
}
+ void nonTrivial11() {
+ Number num(0.3);
+ }
+
+ bool nonTrivial12() {
+ bool val = otherFunction();
+ return val;
+ }
+
unsigned v { 0 };
Number* number { nullptr };
Enum enumValue { Enum::Value1 };
@@ -309,6 +320,8 @@ public:
getFieldTrivial().trivial23(); // no-warning
getFieldTrivial().trivial24(); // no-warning
getFieldTrivial().trivial25(); // no-warning
+ getFieldTrivial().trivial26(); // no-warning
+ getFieldTrivial().trivial27(5); // no-warning
RefCounted::singleton().trivial18(); // no-warning
RefCounted::singleton().someFunction(); // no-warning
@@ -334,6 +347,10 @@ public:
// expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
getFieldTrivial().nonTrivial10();
// expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial11();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().nonTrivial12();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
}
};
diff --git a/clang/test/Analysis/stream-note.c b/clang/test/Analysis/stream-note.c
index abb4784c078a..f77cd4aa6284 100644
--- a/clang/test/Analysis/stream-note.c
+++ b/clang/test/Analysis/stream-note.c
@@ -166,3 +166,70 @@ void check_eof_notes_feof_or_no_error(void) {
}
fclose(F);
}
+
+void check_indeterminate_notes(void) {
+ FILE *F;
+ F = fopen("foo1.c", "r");
+ if (F == NULL) // expected-note {{Taking false branch}} \
+ // expected-note {{'F' is not equal to NULL}}
+ return;
+ int R = fgetc(F); // no note
+ if (R >= 0) { // expected-note {{Taking true branch}} \
+ // expected-note {{'R' is >= 0}}
+ fgetc(F); // expected-note {{Assuming this stream operation fails}}
+ if (ferror(F)) // expected-note {{Taking true branch}}
+ fgetc(F); // expected-warning {{File position of the stream might be 'indeterminate' after a failed operation. Can cause undefined behavior}} \
+ // expected-note {{File position of the stream might be 'indeterminate' after a failed operation. Can cause undefined behavior}}
+ }
+ fclose(F);
+}
+
+void check_indeterminate_after_clearerr(void) {
+ FILE *F;
+ char Buf[10];
+ F = fopen("foo1.c", "r");
+ if (F == NULL) // expected-note {{Taking false branch}} \
+ // expected-note {{'F' is not equal to NULL}}
+ return;
+ fread(Buf, 1, 1, F); // expected-note {{Assuming this stream operation fails}}
+ if (ferror(F)) { // expected-note {{Taking true branch}}
+ clearerr(F);
+ fread(Buf, 1, 1, F); // expected-warning {{might be 'indeterminate' after a failed operation}} \
+ // expected-note {{might be 'indeterminate' after a failed operation}}
+ }
+ fclose(F);
+}
+
+void check_indeterminate_eof(void) {
+ FILE *F;
+ char Buf[2];
+ F = fopen("foo1.c", "r");
+ if (F == NULL) // expected-note {{Taking false branch}} \
+ // expected-note {{'F' is not equal to NULL}} \
+ // expected-note {{Taking false branch}} \
+ // expected-note {{'F' is not equal to NULL}}
+ return;
+ fgets(Buf, sizeof(Buf), F); // expected-note {{Assuming this stream operation fails}} \
+ // expected-note {{Assuming stream reaches end-of-file here}}
+
+ fgets(Buf, sizeof(Buf), F); // expected-warning {{might be 'indeterminate'}} \
+ // expected-note {{might be 'indeterminate'}} \
+ // expected-warning {{stream is in EOF state}} \
+ // expected-note {{stream is in EOF state}}
+ fclose(F);
+}
+
+void check_indeterminate_fseek(void) {
+ FILE *F = fopen("file", "r");
+ if (!F) // expected-note {{Taking false branch}} \
+ // expected-note {{'F' is non-null}}
+ return;
+ int Ret = fseek(F, 1, SEEK_SET); // expected-note {{Assuming this stream operation fails}}
+ if (Ret) { // expected-note {{Taking true branch}} \
+ // expected-note {{'Ret' is not equal to 0}}
+ char Buf[2];
+ fwrite(Buf, 1, 2, F); // expected-warning {{might be 'indeterminate'}} \
+ // expected-note {{might be 'indeterminate'}}
+ }
+ fclose(F);
+}
diff --git a/clang/test/CXX/drs/dr18xx.cpp b/clang/test/CXX/drs/dr18xx.cpp
index a7cee4ef8902..e78730e8992c 100644
--- a/clang/test/CXX/drs/dr18xx.cpp
+++ b/clang/test/CXX/drs/dr18xx.cpp
@@ -282,6 +282,7 @@ namespace dr1837 { // dr1837: 3.3
struct A {
int f();
bool b = [] {
+ // since-cxx11-warning@-1 {{address of lambda function pointer conversion operator will always evaluate to 'true'}}
struct Local {
static_assert(sizeof(this->f()) == sizeof(int), "");
};
diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks.mm b/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks.mm
index cb56f6816ad0..e93c37f3b9ae 100644
--- a/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks.mm
+++ b/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks.mm
@@ -65,10 +65,10 @@ void nesting() {
namespace overloading {
void bool_conversion() {
- if ([](){}) {
+ if ([](){}) { // expected-warning{{address of lambda function pointer conversion operator will always evaluate to 'true'}}
}
- bool b = []{};
+ bool b = []{}; // expected-warning{{address of lambda function pointer conversion operator will always evaluate to 'true'}}
b = (bool)[]{};
}
@@ -108,8 +108,9 @@ namespace overloading {
using decltype(a)::operator id<void(*)()>; // expected-note {{here}}
} extern d;
- bool r1 = c;
- bool r2 = d; // expected-error {{private}}
+ bool r1 = c; // expected-warning{{address of lambda function pointer conversion operator will always evaluate to 'true'}}
+ bool r2 = d; // expected-error {{private}} \
+ expected-warning{{address of lambda function pointer conversion operator will always evaluate to 'true'}}
}
namespace PR13117 {
diff --git a/clang/test/CodeGen/PowerPC/aix-tls-model.cpp b/clang/test/CodeGen/PowerPC/aix-tls-model.cpp
index 9fdd6855a89e..cd0a08aa9a3b 100644
--- a/clang/test/CodeGen/PowerPC/aix-tls-model.cpp
+++ b/clang/test/CodeGen/PowerPC/aix-tls-model.cpp
@@ -1,11 +1,11 @@
// RUN: %clang_cc1 %s -triple powerpc-unknown-aix -target-cpu pwr8 -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-GD
// RUN: %clang_cc1 %s -triple powerpc-unknown-aix -target-cpu pwr8 -ftls-model=global-dynamic -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-GD
-// RUN: not %clang_cc1 %s -triple powerpc-unknown-aix -target-cpu pwr8 -ftls-model=local-dynamic -emit-llvm 2>&1 | FileCheck %s -check-prefix=CHECK-LD-ERROR
+// RUN: %clang_cc1 %s -triple powerpc-unknown-aix -target-cpu pwr8 -ftls-model=local-dynamic -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-LD
// RUN: %clang_cc1 %s -triple powerpc-unknown-aix -target-cpu pwr8 -ftls-model=initial-exec -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-IE
// RUN: %clang_cc1 %s -triple powerpc-unknown-aix -target-cpu pwr8 -ftls-model=local-exec -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-LE
// RUN: %clang_cc1 %s -triple powerpc64-unknown-aix -target-cpu pwr8 -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-GD
// RUN: %clang_cc1 %s -triple powerpc64-unknown-aix -target-cpu pwr8 -ftls-model=global-dynamic -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-GD
-// RUN: not %clang_cc1 %s -triple powerpc64-unknown-aix -target-cpu pwr8 -ftls-model=local-dynamic -emit-llvm 2>&1 | FileCheck %s -check-prefix=CHECK-LD-ERROR
+// RUN: %clang_cc1 %s -triple powerpc64-unknown-aix -target-cpu pwr8 -ftls-model=local-dynamic -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-LD
// RUN: %clang_cc1 %s -triple powerpc64-unknown-aix -target-cpu pwr8 -ftls-model=initial-exec -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-IE
// RUN: %clang_cc1 %s -triple powerpc64-unknown-aix -target-cpu pwr8 -ftls-model=local-exec -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-LE
@@ -21,7 +21,10 @@ int f() {
// CHECK-GD: @z2 ={{.*}} global i32 0
// CHECK-GD: @x ={{.*}} thread_local global i32 0
// CHECK-GD: @_ZZ1fvE1y = internal thread_local global i32 0
-// CHECK-LD-ERROR: error: TLS model 'local-dynamic' is not yet supported on AIX
+// CHECK-LD: @z1 ={{.*}} global i32 0
+// CHECK-LD: @z2 ={{.*}} global i32 0
+// CHECK-LD: @x ={{.*}} thread_local(localdynamic) global i32 0
+// CHECK-LD: @_ZZ1fvE1y = internal thread_local(localdynamic) global i32 0
// CHECK-IE: @z1 ={{.*}} global i32 0
// CHECK-IE: @z2 ={{.*}} global i32 0
// CHECK-IE: @x ={{.*}} thread_local(initialexec) global i32 0
diff --git a/clang/test/CodeGen/aarch64-cpu-supports.c b/clang/test/CodeGen/aarch64-cpu-supports.c
index 872fec6827ef..c54b7475a3fd 100644
--- a/clang/test/CodeGen/aarch64-cpu-supports.c
+++ b/clang/test/CodeGen/aarch64-cpu-supports.c
@@ -34,6 +34,11 @@
// CHECK-NEXT: store i32 3, ptr [[RETVAL]], align 4
// CHECK-NEXT: br label [[RETURN]]
// CHECK: if.end4:
+// CHECK-NEXT: br i1 false, label [[IF_THEN5:%.*]], label [[IF_END6:%.*]]
+// CHECK: if.then5:
+// CHECK-NEXT: store i32 4, ptr [[RETVAL]], align 4
+// CHECK-NEXT: br label [[RETURN]]
+// CHECK: if.end6:
// CHECK-NEXT: store i32 0, ptr [[RETVAL]], align 4
// CHECK-NEXT: br label [[RETURN]]
// CHECK: return:
@@ -50,5 +55,8 @@ int main(void) {
if (__builtin_cpu_supports("sme2+ls64_v+wfxt"))
return 3;
+ if (__builtin_cpu_supports("avx2"))
+ return 4;
+
return 0;
}
diff --git a/clang/test/CodeGen/attr-target-clones-aarch64.c b/clang/test/CodeGen/attr-target-clones-aarch64.c
index 5ea3f4a9b0b1..276a7b87b7a1 100644
--- a/clang/test/CodeGen/attr-target-clones-aarch64.c
+++ b/clang/test/CodeGen/attr-target-clones-aarch64.c
@@ -43,7 +43,7 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK: @ftc_inline3 = weak_odr ifunc i32 (), ptr @ftc_inline3.resolver
//.
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc._MlseMaes(
+// CHECK-LABEL: @ftc._MaesMlse(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 0
//
@@ -69,7 +69,7 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
// CHECK: resolver_return:
-// CHECK-NEXT: ret ptr @ftc._MlseMaes
+// CHECK-NEXT: ret ptr @ftc._MaesMlse
// CHECK: resolver_else:
// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 68719476736
@@ -89,7 +89,7 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_def._Msha2Mmemtag2(
+// CHECK-LABEL: @ftc_def._Mmemtag2Msha2(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
//
@@ -109,7 +109,7 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
// CHECK: resolver_return:
-// CHECK-NEXT: ret ptr @ftc_def._Msha2Mmemtag2
+// CHECK-NEXT: ret ptr @ftc_def._Mmemtag2Msha2
// CHECK: resolver_else:
// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 4096
@@ -155,7 +155,7 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_dup2._MdotprodMcrc(
+// CHECK-LABEL: @ftc_dup2._McrcMdotprod(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 3
//
@@ -175,7 +175,7 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
// CHECK: resolver_return:
-// CHECK-NEXT: ret ptr @ftc_dup2._MdotprodMcrc
+// CHECK-NEXT: ret ptr @ftc_dup2._McrcMdotprod
// CHECK: resolver_else:
// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 256
@@ -239,7 +239,7 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
// CHECK-NEXT: br i1 [[TMP7]], label [[RESOLVER_RETURN1:%.*]], label [[RESOLVER_ELSE2:%.*]]
// CHECK: resolver_return1:
-// CHECK-NEXT: ret ptr @ftc_inline1._MrcpcMpredres
+// CHECK-NEXT: ret ptr @ftc_inline1._MpredresMrcpc
// CHECK: resolver_else2:
// CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], 513
@@ -283,7 +283,7 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
// CHECK: resolver_return:
-// CHECK-NEXT: ret ptr @ftc_inline3._MsveMsb
+// CHECK-NEXT: ret ptr @ftc_inline3._MsbMsve
// CHECK: resolver_else:
// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 1125899906842624
@@ -303,7 +303,7 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_inline1._MrcpcMpredres(
+// CHECK-LABEL: @ftc_inline1._MpredresMrcpc(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
//
@@ -345,7 +345,7 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_inline3._MsveMsb(
+// CHECK-LABEL: @ftc_inline3._MsbMsve(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 3
//
diff --git a/clang/test/CodeGen/attr-target-version.c b/clang/test/CodeGen/attr-target-version.c
index c27d48f3ecf6..56a42499d0a7 100644
--- a/clang/test/CodeGen/attr-target-version.c
+++ b/clang/test/CodeGen/attr-target-version.c
@@ -25,7 +25,7 @@ int foo() {
}
inline int __attribute__((target_version("sha1+pmull+f64mm"))) fmv_inline(void) { return 1; }
-inline int __attribute__((target_version("fp16+fcma+sme+ fp16 "))) fmv_inline(void) { return 2; }
+inline int __attribute__((target_version("fp16+fcma+rdma+sme+ fp16 "))) fmv_inline(void) { return 2; }
inline int __attribute__((target_version("sha3+i8mm+f32mm"))) fmv_inline(void) { return 12; }
inline int __attribute__((target_version("dit+sve-ebf16"))) fmv_inline(void) { return 8; }
inline int __attribute__((target_version("dpb+rcpc2 "))) fmv_inline(void) { return 6; }
@@ -106,14 +106,14 @@ int hoo(void) {
// CHECK: @fmv_c = weak_odr ifunc void (), ptr @fmv_c.resolver
//.
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv._MrngMflagmMfp16fml
+// CHECK-LABEL: define {{[^@]+}}@fmv._MflagmMfp16fmlMrng
// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_one._MsimdMls64
+// CHECK-LABEL: define {{[^@]+}}@fmv_one._Mls64Msimd
// CHECK-SAME: () #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
@@ -147,7 +147,7 @@ int hoo(void) {
// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
// CHECK: resolver_return:
-// CHECK-NEXT: ret ptr @fmv._MrngMflagmMfp16fml
+// CHECK-NEXT: ret ptr @fmv._MflagmMfp16fmlMrng
// CHECK: resolver_else:
// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 72057594037927940
@@ -187,7 +187,7 @@ int hoo(void) {
// CHECK-NEXT: [[TMP23:%.*]] = and i1 true, [[TMP22]]
// CHECK-NEXT: br i1 [[TMP23]], label [[RESOLVER_RETURN9:%.*]], label [[RESOLVER_ELSE10:%.*]]
// CHECK: resolver_return9:
-// CHECK-NEXT: ret ptr @fmv._MfpMaes
+// CHECK-NEXT: ret ptr @fmv._MaesMfp
// CHECK: resolver_else10:
// CHECK-NEXT: [[TMP24:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP25:%.*]] = and i64 [[TMP24]], 4224
@@ -218,12 +218,12 @@ int hoo(void) {
//
// CHECK-LABEL: define {{[^@]+}}@fmv_one.resolver() comdat {
// CHECK-NEXT: resolver_entry:
-// CHECK-NEXT: ret ptr @fmv_one._MsimdMls64
+// CHECK-NEXT: ret ptr @fmv_one._Mls64Msimd
//
//
// CHECK-LABEL: define {{[^@]+}}@fmv_two.resolver() comdat {
// CHECK-NEXT: resolver_entry:
-// CHECK-NEXT: ret ptr @fmv_two._MsimdMfp16
+// CHECK-NEXT: ret ptr @fmv_two._Mfp16Msimd
//
//
// CHECK-LABEL: define {{[^@]+}}@fmv_e.resolver() comdat {
@@ -261,12 +261,12 @@ int hoo(void) {
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
-// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 4398048608256
-// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 4398048608256
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 4398048608320
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 4398048608320
// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
// CHECK: resolver_return:
-// CHECK-NEXT: ret ptr @fmv_inline._Mfp16Mfp16MfcmaMsme
+// CHECK-NEXT: ret ptr @fmv_inline._MfcmaMfp16Mfp16MrdmMsme
// CHECK: resolver_else:
// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 864726312827224064
@@ -274,7 +274,7 @@ int hoo(void) {
// CHECK-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
// CHECK-NEXT: br i1 [[TMP7]], label [[RESOLVER_RETURN1:%.*]], label [[RESOLVER_ELSE2:%.*]]
// CHECK: resolver_return1:
-// CHECK-NEXT: ret ptr @fmv_inline._Mrcpc3Mmemtag3Mmops
+// CHECK-NEXT: ret ptr @fmv_inline._Mmemtag3MmopsMrcpc3
// CHECK: resolver_else2:
// CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], 893353197568
@@ -282,7 +282,7 @@ int hoo(void) {
// CHECK-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]]
// CHECK-NEXT: br i1 [[TMP11]], label [[RESOLVER_RETURN3:%.*]], label [[RESOLVER_ELSE4:%.*]]
// CHECK: resolver_return3:
-// CHECK-NEXT: ret ptr @fmv_inline._Msve2Msve2-pmull128Msve2-bitperm
+// CHECK-NEXT: ret ptr @fmv_inline._Msve2Msve2-bitpermMsve2-pmull128
// CHECK: resolver_else4:
// CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], 34359773184
@@ -290,7 +290,7 @@ int hoo(void) {
// CHECK-NEXT: [[TMP15:%.*]] = and i1 true, [[TMP14]]
// CHECK-NEXT: br i1 [[TMP15]], label [[RESOLVER_RETURN5:%.*]], label [[RESOLVER_ELSE6:%.*]]
// CHECK: resolver_return5:
-// CHECK-NEXT: ret ptr @fmv_inline._Msha1MpmullMf64mm
+// CHECK-NEXT: ret ptr @fmv_inline._Mf64mmMpmullMsha1
// CHECK: resolver_else6:
// CHECK-NEXT: [[TMP16:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP17:%.*]] = and i64 [[TMP16]], 17246986240
@@ -298,7 +298,7 @@ int hoo(void) {
// CHECK-NEXT: [[TMP19:%.*]] = and i1 true, [[TMP18]]
// CHECK-NEXT: br i1 [[TMP19]], label [[RESOLVER_RETURN7:%.*]], label [[RESOLVER_ELSE8:%.*]]
// CHECK: resolver_return7:
-// CHECK-NEXT: ret ptr @fmv_inline._Msha3Mi8mmMf32mm
+// CHECK-NEXT: ret ptr @fmv_inline._Mf32mmMi8mmMsha3
// CHECK: resolver_else8:
// CHECK-NEXT: [[TMP20:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP21:%.*]] = and i64 [[TMP20]], 19791209299968
@@ -306,7 +306,7 @@ int hoo(void) {
// CHECK-NEXT: [[TMP23:%.*]] = and i1 true, [[TMP22]]
// CHECK-NEXT: br i1 [[TMP23]], label [[RESOLVER_RETURN9:%.*]], label [[RESOLVER_ELSE10:%.*]]
// CHECK: resolver_return9:
-// CHECK-NEXT: ret ptr @fmv_inline._Msve2-sm4Mmemtag2
+// CHECK-NEXT: ret ptr @fmv_inline._Mmemtag2Msve2-sm4
// CHECK: resolver_else10:
// CHECK-NEXT: [[TMP24:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP25:%.*]] = and i64 [[TMP24]], 1236950581248
@@ -338,7 +338,7 @@ int hoo(void) {
// CHECK-NEXT: [[TMP39:%.*]] = and i1 true, [[TMP38]]
// CHECK-NEXT: br i1 [[TMP39]], label [[RESOLVER_RETURN17:%.*]], label [[RESOLVER_ELSE18:%.*]]
// CHECK: resolver_return17:
-// CHECK-NEXT: ret ptr @fmv_inline._MrcpcMfrintts
+// CHECK-NEXT: ret ptr @fmv_inline._MfrinttsMrcpc
// CHECK: resolver_else18:
// CHECK-NEXT: [[TMP40:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP41:%.*]] = and i64 [[TMP40]], 8650752
@@ -362,7 +362,7 @@ int hoo(void) {
// CHECK-NEXT: [[TMP51:%.*]] = and i1 true, [[TMP50]]
// CHECK-NEXT: br i1 [[TMP51]], label [[RESOLVER_RETURN23:%.*]], label [[RESOLVER_ELSE24:%.*]]
// CHECK: resolver_return23:
-// CHECK-NEXT: ret ptr @fmv_inline._MsimdMfp16fml
+// CHECK-NEXT: ret ptr @fmv_inline._Mfp16fmlMsimd
// CHECK: resolver_else24:
// CHECK-NEXT: [[TMP52:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP53:%.*]] = and i64 [[TMP52]], 16400
@@ -370,7 +370,7 @@ int hoo(void) {
// CHECK-NEXT: [[TMP55:%.*]] = and i1 true, [[TMP54]]
// CHECK-NEXT: br i1 [[TMP55]], label [[RESOLVER_RETURN25:%.*]], label [[RESOLVER_ELSE26:%.*]]
// CHECK: resolver_return25:
-// CHECK-NEXT: ret ptr @fmv_inline._MdotprodMaes
+// CHECK-NEXT: ret ptr @fmv_inline._MaesMdotprod
// CHECK: resolver_else26:
// CHECK-NEXT: [[TMP56:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP57:%.*]] = and i64 [[TMP56]], 192
@@ -484,7 +484,7 @@ int hoo(void) {
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv._MfpMaes
+// CHECK-LABEL: define {{[^@]+}}@fmv._MaesMfp
// CHECK-SAME: () #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 6
@@ -547,7 +547,7 @@ int hoo(void) {
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_two._MsimdMfp16
+// CHECK-LABEL: define {{[^@]+}}@fmv_two._Mfp16Msimd
// CHECK-SAME: () #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 4
@@ -568,21 +568,21 @@ int hoo(void) {
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Msha1MpmullMf64mm
+// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mf64mmMpmullMsha1
// CHECK-SAME: () #[[ATTR12:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mfp16Mfp16MfcmaMsme
+// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MfcmaMfp16Mfp16MrdmMsme
// CHECK-SAME: () #[[ATTR13:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 2
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Msha3Mi8mmMf32mm
+// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mf32mmMi8mmMsha3
// CHECK-SAME: () #[[ATTR14:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 12
@@ -610,7 +610,7 @@ int hoo(void) {
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MrcpcMfrintts
+// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MfrinttsMrcpc
// CHECK-SAME: () #[[ATTR18:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 3
@@ -631,35 +631,35 @@ int hoo(void) {
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Msve2Msve2-pmull128Msve2-bitperm
+// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Msve2Msve2-bitpermMsve2-pmull128
// CHECK-SAME: () #[[ATTR21:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 9
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Msve2-sm4Mmemtag2
+// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mmemtag2Msve2-sm4
// CHECK-SAME: () #[[ATTR22:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 10
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mrcpc3Mmemtag3Mmops
+// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mmemtag3MmopsMrcpc3
// CHECK-SAME: () #[[ATTR23:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 11
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MdotprodMaes
+// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MaesMdotprod
// CHECK-SAME: () #[[ATTR6]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 13
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MsimdMfp16fml
+// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mfp16fmlMsimd
// CHECK-SAME: () #[[ATTR7]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 14
@@ -829,7 +829,7 @@ int hoo(void) {
// CHECK: attributes #[[ATTR10]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+fullfp16,+ls64,+sme,+sme2" }
// CHECK: attributes #[[ATTR11]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ccpp,+fullfp16,+ls64" }
// CHECK: attributes #[[ATTR12]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+aes,+f64mm,+fp-armv8,+fullfp16,+ls64,+neon,+sve" }
-// CHECK: attributes #[[ATTR13]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+complxnum,+fp-armv8,+fullfp16,+ls64,+neon,+sme" }
+// CHECK: attributes #[[ATTR13]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+complxnum,+fp-armv8,+fullfp16,+ls64,+neon,+rdm,+sme" }
// CHECK: attributes #[[ATTR14]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+f32mm,+fp-armv8,+fullfp16,+i8mm,+ls64,+neon,+sha2,+sha3,+sve" }
// CHECK: attributes #[[ATTR15]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+dit,+fp-armv8,+fullfp16,+ls64,+neon,+sve" }
// CHECK: attributes #[[ATTR16]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ccpp,+fullfp16,+ls64,+rcpc" }
diff --git a/clang/test/CodeGen/builtins-hexagon.c b/clang/test/CodeGen/builtins-hexagon.c
index 9a1b733da5cd..52073f27ae70 100644
--- a/clang/test/CodeGen/builtins-hexagon.c
+++ b/clang/test/CodeGen/builtins-hexagon.c
@@ -1,5 +1,5 @@
// REQUIRES: hexagon-registered-target
-// RUN: %clang_cc1 -triple hexagon-unknown-elf -target-cpu hexagonv65 -target-feature +hvxv65 -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple hexagon-unknown-elf -target-cpu hexagonv65 -target-feature +hvxv65 -target-feature +hvx-length128b -emit-llvm %s -o - | FileCheck %s
void test() {
int v64 __attribute__((__vector_size__(64)));
diff --git a/clang/test/CodeGen/builtins.c b/clang/test/CodeGen/builtins.c
index 73866116e07e..4f9641d357b7 100644
--- a/clang/test/CodeGen/builtins.c
+++ b/clang/test/CodeGen/builtins.c
@@ -948,14 +948,14 @@ void test_builtin_popcountg(unsigned char uc, unsigned short us,
volatile int pop;
pop = __builtin_popcountg(uc);
// CHECK: %1 = load i8, ptr %uc.addr, align 1
- // CHECK-NEXT: %conv = zext i8 %1 to i32
- // CHECK-NEXT: %2 = call i32 @llvm.ctpop.i32(i32 %conv)
- // CHECK-NEXT: store volatile i32 %2, ptr %pop, align 4
+ // CHECK-NEXT: %2 = call i8 @llvm.ctpop.i8(i8 %1)
+ // CHECK-NEXT: %cast = sext i8 %2 to i32
+ // CHECK-NEXT: store volatile i32 %cast, ptr %pop, align 4
pop = __builtin_popcountg(us);
// CHECK-NEXT: %3 = load i16, ptr %us.addr, align 2
- // CHECK-NEXT: %conv1 = zext i16 %3 to i32
- // CHECK-NEXT: %4 = call i32 @llvm.ctpop.i32(i32 %conv1)
- // CHECK-NEXT: store volatile i32 %4, ptr %pop, align 4
+ // CHECK-NEXT: %4 = call i16 @llvm.ctpop.i16(i16 %3)
+ // CHECK-NEXT: %cast1 = sext i16 %4 to i32
+ // CHECK-NEXT: store volatile i32 %cast1, ptr %pop, align 4
pop = __builtin_popcountg(ui);
// CHECK-NEXT: %5 = load i32, ptr %ui.addr, align 4
// CHECK-NEXT: %6 = call i32 @llvm.ctpop.i32(i32 %5)
@@ -963,23 +963,23 @@ void test_builtin_popcountg(unsigned char uc, unsigned short us,
pop = __builtin_popcountg(ul);
// CHECK-NEXT: %7 = load i64, ptr %ul.addr, align 8
// CHECK-NEXT: %8 = call i64 @llvm.ctpop.i64(i64 %7)
- // CHECK-NEXT: %cast = trunc i64 %8 to i32
- // CHECK-NEXT: store volatile i32 %cast, ptr %pop, align 4
+ // CHECK-NEXT: %cast2 = trunc i64 %8 to i32
+ // CHECK-NEXT: store volatile i32 %cast2, ptr %pop, align 4
pop = __builtin_popcountg(ull);
// CHECK-NEXT: %9 = load i64, ptr %ull.addr, align 8
// CHECK-NEXT: %10 = call i64 @llvm.ctpop.i64(i64 %9)
- // CHECK-NEXT: %cast2 = trunc i64 %10 to i32
- // CHECK-NEXT: store volatile i32 %cast2, ptr %pop, align 4
+ // CHECK-NEXT: %cast3 = trunc i64 %10 to i32
+ // CHECK-NEXT: store volatile i32 %cast3, ptr %pop, align 4
pop = __builtin_popcountg(ui128);
// CHECK-NEXT: %11 = load i128, ptr %ui128.addr, align 16
// CHECK-NEXT: %12 = call i128 @llvm.ctpop.i128(i128 %11)
- // CHECK-NEXT: %cast3 = trunc i128 %12 to i32
- // CHECK-NEXT: store volatile i32 %cast3, ptr %pop, align 4
+ // CHECK-NEXT: %cast4 = trunc i128 %12 to i32
+ // CHECK-NEXT: store volatile i32 %cast4, ptr %pop, align 4
pop = __builtin_popcountg(ubi128);
// CHECK-NEXT: %13 = load i128, ptr %ubi128.addr, align 8
// CHECK-NEXT: %14 = call i128 @llvm.ctpop.i128(i128 %13)
- // CHECK-NEXT: %cast4 = trunc i128 %14 to i32
- // CHECK-NEXT: store volatile i32 %cast4, ptr %pop, align 4
+ // CHECK-NEXT: %cast5 = trunc i128 %14 to i32
+ // CHECK-NEXT: store volatile i32 %cast5, ptr %pop, align 4
// CHECK-NEXT: ret void
}
diff --git a/clang/test/CodeGen/fat-lto-objects.c b/clang/test/CodeGen/fat-lto-objects.c
index afce798c5c81..b50567c024fc 100644
--- a/clang/test/CodeGen/fat-lto-objects.c
+++ b/clang/test/CodeGen/fat-lto-objects.c
@@ -11,10 +11,11 @@
// RUN: llvm-objcopy --dump-section=.llvm.lto=%t.full.split.bc %t.full.split.o
// RUN: llvm-dis %t.full.split.bc -o - | FileCheck %s --check-prefixes=FULL,SPLIT,NOUNIFIED
+/// Full LTO always sets EnableSplitLTOUnit when the summary is used.
// RUN: %clang -cc1 -triple x86_64-unknown-linux-gnu -flto=full -ffat-lto-objects -emit-obj < %s -o %t.full.nosplit.o
// RUN: llvm-readelf -S %t.full.nosplit.o | FileCheck %s --check-prefixes=ELF
// RUN: llvm-objcopy --dump-section=.llvm.lto=%t.full.nosplit.bc %t.full.nosplit.o
-// RUN: llvm-dis %t.full.nosplit.bc -o - | FileCheck %s --check-prefixes=FULL,NOSPLIT,NOUNIFIED
+// RUN: llvm-dis %t.full.nosplit.bc -o - | FileCheck %s --check-prefixes=FULL,SPLIT,NOUNIFIED
// RUN: %clang -cc1 -triple x86_64-unknown-linux-gnu -flto=thin -fsplit-lto-unit -ffat-lto-objects -emit-obj < %s -o %t.thin.split.o
// RUN: llvm-readelf -S %t.thin.split.o | FileCheck %s --check-prefixes=ELF
@@ -34,6 +35,21 @@
// RUN: %clang -cc1 -triple x86_64-unknown-linux-gnu -flto=full -ffat-lto-objects -fsplit-lto-unit -S < %s -o - \
// RUN: | FileCheck %s --check-prefixes=ASM
+/// Make sure that FatLTO generates .llvm.lto sections that are the same as the output from normal LTO compilations
+// RUN: %clang -O2 --target=x86_64-unknown-linux-gnu -fPIE -flto=full -ffat-lto-objects -c %s -o %t.fatlto.full.o
+// RUN: llvm-objcopy --dump-section=.llvm.lto=%t.fatlto.full.bc %t.fatlto.full.o
+// RUN: llvm-dis < %t.fatlto.full.bc -o %t.fatlto.full.ll
+// RUN: %clang -O2 --target=x86_64-unknown-linux-gnu -fPIE -flto=full -c %s -o %t.nofat.full.bc
+// RUN: llvm-dis < %t.nofat.full.bc -o %t.nofat.full.ll
+// RUN: diff %t.fatlto.full.ll %t.nofat.full.ll
+
+// RUN: %clang -O2 --target=x86_64-unknown-linux-gnu -fPIE -flto=thin -ffat-lto-objects -c %s -o %t.fatlto.thin.o
+// RUN: llvm-objcopy --dump-section=.llvm.lto=%t.fatlto.thin.bc %t.fatlto.thin.o
+// RUN: llvm-dis < %t.fatlto.thin.bc -o %t.fatlto.thin.ll
+// RUN: %clang -O2 --target=x86_64-unknown-linux-gnu -fPIE -flto=thin -c %s -o %t.nofat.thin.bc
+// RUN: llvm-dis < %t.nofat.thin.bc -o %t.nofat.thin.ll
+// RUN: diff %t.fatlto.thin.ll %t.nofat.thin.ll
+
/// Be sure we enable split LTO units correctly under -ffat-lto-objects.
// SPLIT: ![[#]] = !{i32 1, !"EnableSplitLTOUnit", i32 1}
// NOSPLIT: ![[#]] = !{i32 1, !"EnableSplitLTOUnit", i32 0}
@@ -51,6 +67,9 @@
// ASM-NEXT: .asciz "BC
// ASM-NEXT: .size .Lllvm.embedded.object
+const char* foo = "foo";
+
int test(void) {
+ const char* bar = "bar";
return 0xabcd;
}
diff --git a/clang/test/CodeGenCXX/attr-target-clones-aarch64.cpp b/clang/test/CodeGenCXX/attr-target-clones-aarch64.cpp
index 2d3f44894799..14963867798d 100644
--- a/clang/test/CodeGenCXX/attr-target-clones-aarch64.cpp
+++ b/clang/test/CodeGenCXX/attr-target-clones-aarch64.cpp
@@ -36,22 +36,33 @@ void run_foo_tml() {
}
+
+
+//.
// CHECK: @__aarch64_cpu_features = external dso_local global { i64 }
+// CHECK: @_Z7foo_ovli.ifunc = weak_odr alias i32 (i32), ptr @_Z7foo_ovli
+// CHECK: @_Z7foo_ovlv.ifunc = weak_odr alias i32 (), ptr @_Z7foo_ovlv
+// CHECK: @_ZN7MyClassIssE7foo_tmlEv.ifunc = weak_odr alias i32 (ptr), ptr @_ZN7MyClassIssE7foo_tmlEv
+// CHECK: @_ZN7MyClassIisE7foo_tmlEv.ifunc = weak_odr alias i32 (ptr), ptr @_ZN7MyClassIisE7foo_tmlEv
// CHECK: @_Z7foo_ovli = weak_odr ifunc i32 (i32), ptr @_Z7foo_ovli.resolver
// CHECK: @_Z7foo_ovlv = weak_odr ifunc i32 (), ptr @_Z7foo_ovlv.resolver
// CHECK: @_ZN7MyClassIssE7foo_tmlEv = weak_odr ifunc i32 (ptr), ptr @_ZN7MyClassIssE7foo_tmlEv.resolver
// CHECK: @_ZN7MyClassIisE7foo_tmlEv = weak_odr ifunc i32 (ptr), ptr @_ZN7MyClassIisE7foo_tmlEv.resolver
-
+//.
// CHECK-LABEL: @_Z7foo_ovli._Mfp16Mls64_v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32 [[TMP0:%.*]], ptr [[DOTADDR]], align 4
// CHECK-NEXT: ret i32 1
+//
+//
// CHECK-LABEL: @_Z7foo_ovli.default(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32 [[TMP0:%.*]], ptr [[DOTADDR]], align 4
// CHECK-NEXT: ret i32 1
+//
+//
// CHECK-LABEL: @_Z7foo_ovli.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -63,13 +74,19 @@ void run_foo_tml() {
// CHECK: resolver_return:
// CHECK-NEXT: ret ptr @_Z7foo_ovli._Mfp16Mls64_v
// CHECK: resolver_else:
-// CHECK-NEXT: ret ptr @_Z7foo_ovli
+// CHECK-NEXT: ret ptr @_Z7foo_ovli.default
+//
+//
// CHECK-LABEL: @_Z7foo_ovlv._Mls64Mls64_accdata(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 2
+//
+//
// CHECK-LABEL: @_Z7foo_ovlv.default(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 2
+//
+//
// CHECK-LABEL: @_Z7foo_ovlv.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -82,12 +99,16 @@ void run_foo_tml() {
// CHECK-NEXT: ret ptr @_Z7foo_ovlv._Mls64Mls64_accdata
// CHECK: resolver_else:
// CHECK-NEXT: ret ptr @_Z7foo_ovlv.default
+//
+//
// CHECK-LABEL: @_Z3barv(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CALL:%.*]] = call noundef i32 @_Z7foo_ovli(i32 noundef 1)
// CHECK-NEXT: [[CALL1:%.*]] = call noundef i32 @_Z7foo_ovlv()
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[CALL]], [[CALL1]]
// CHECK-NEXT: ret i32 [[ADD]]
+//
+//
// CHECK-LABEL: @_Z11run_foo_tmlv(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[MC1:%.*]] = alloca [[STRUCT_MYCLASS:%.*]], align 1
@@ -99,6 +120,8 @@ void run_foo_tml() {
// CHECK-NEXT: [[CALL2:%.*]] = call noundef i32 @_ZN7MyClassIfsE7foo_tmlEv(ptr noundef nonnull align 1 dereferenceable(1) [[MC3]])
// CHECK-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZN7MyClassIdfE7foo_tmlEv(ptr noundef nonnull align 1 dereferenceable(1) [[MC4]])
// CHECK-NEXT: ret void
+//
+//
// CHECK-LABEL: @_ZN7MyClassIssE7foo_tmlEv.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -108,7 +131,7 @@ void run_foo_tml() {
// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
// CHECK: resolver_return:
-// CHECK-NEXT: ret ptr @_ZN7MyClassIssE7foo_tmlEv._MssbsMsme-f64f64
+// CHECK-NEXT: ret ptr @_ZN7MyClassIssE7foo_tmlEv._Msme-f64f64Mssbs
// CHECK: resolver_else:
// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 16777216
@@ -118,7 +141,9 @@ void run_foo_tml() {
// CHECK: resolver_return1:
// CHECK-NEXT: ret ptr @_ZN7MyClassIssE7foo_tmlEv._Mfrintts
// CHECK: resolver_else2:
-// CHECK-NEXT: ret ptr @_ZN7MyClassIssE7foo_tmlEv
+// CHECK-NEXT: ret ptr @_ZN7MyClassIssE7foo_tmlEv.default
+//
+//
// CHECK-LABEL: @_ZN7MyClassIisE7foo_tmlEv.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -128,7 +153,7 @@ void run_foo_tml() {
// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
// CHECK: resolver_return:
-// CHECK-NEXT: ret ptr @_ZN7MyClassIisE7foo_tmlEv._MssbsMsme-f64f64
+// CHECK-NEXT: ret ptr @_ZN7MyClassIisE7foo_tmlEv._Msme-f64f64Mssbs
// CHECK: resolver_else:
// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 16777216
@@ -138,58 +163,79 @@ void run_foo_tml() {
// CHECK: resolver_return1:
// CHECK-NEXT: ret ptr @_ZN7MyClassIisE7foo_tmlEv._Mfrintts
// CHECK: resolver_else2:
-// CHECK-NEXT: ret ptr @_ZN7MyClassIisE7foo_tmlEv
+// CHECK-NEXT: ret ptr @_ZN7MyClassIisE7foo_tmlEv.default
+//
+//
// CHECK-LABEL: @_ZN7MyClassIfsE7foo_tmlEv(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[THIS:%.*]], ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: ret i32 3
+//
+//
// CHECK-LABEL: @_ZN7MyClassIdfE7foo_tmlEv(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[THIS:%.*]], ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: ret i32 4
+//
+//
// CHECK-LABEL: @_ZN7MyClassIssE7foo_tmlEv._Mfrintts(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[THIS:%.*]], ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: ret i32 1
-// CHECK-LABEL: @_ZN7MyClassIssE7foo_tmlEv._MssbsMsme-f64f64(
+//
+//
+// CHECK-LABEL: @_ZN7MyClassIssE7foo_tmlEv._Msme-f64f64Mssbs(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[THIS:%.*]], ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: ret i32 1
+//
+//
// CHECK-LABEL: @_ZN7MyClassIssE7foo_tmlEv.default(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[THIS:%.*]], ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: ret i32 1
+//
+//
// CHECK-LABEL: @_ZN7MyClassIisE7foo_tmlEv._Mfrintts(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[THIS:%.*]], ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: ret i32 2
-// CHECK-LABEL: @_ZN7MyClassIisE7foo_tmlEv._MssbsMsme-f64f64(
+//
+//
+// CHECK-LABEL: @_ZN7MyClassIisE7foo_tmlEv._Msme-f64f64Mssbs(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[THIS:%.*]], ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: ret i32 2
+//
+//
// CHECK-LABEL: @_ZN7MyClassIisE7foo_tmlEv.default(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[THIS:%.*]], ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-NEXT: ret i32 2
-
-// CHECK: attributes #0 = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon" }
-// CHECK: attributes #1 = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
-// CHECK: attributes #2 = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ls64" }
-// CHECK: attributes #3 = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fptoint" }
-// CHECK: attributes #4 = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+sme,+sme-f64f64" }
+//
+//.
+// CHECK: attributes #[[ATTR0:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon" }
+// CHECK: attributes #[[ATTR1:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+// CHECK: attributes #[[ATTR2:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ls64" }
+// CHECK: attributes #[[ATTR3:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fptoint" }
+// CHECK: attributes #[[ATTR4:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+sme,+sme-f64f64" }
+//.
+// CHECK: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
+// CHECK: [[META1:![0-9]+]] = !{!"{{.*}}clang version {{.*}}"}
+//.
diff --git a/clang/test/CodeGenCXX/attr-target-version.cpp b/clang/test/CodeGenCXX/attr-target-version.cpp
index b63815db7e40..82a928a385e1 100644
--- a/clang/test/CodeGenCXX/attr-target-version.cpp
+++ b/clang/test/CodeGenCXX/attr-target-version.cpp
@@ -40,7 +40,7 @@ int bar() {
// CHECK-NEXT: ret i32 1
//
//
-// CHECK-LABEL: @_Z3foov._Msm4Mebf16(
+// CHECK-LABEL: @_Z3foov._Mebf16Msm4(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 3
//
@@ -101,7 +101,7 @@ int bar() {
// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
// CHECK: resolver_return:
-// CHECK-NEXT: ret ptr @_Z3foov._Msm4Mebf16
+// CHECK-NEXT: ret ptr @_Z3foov._Mebf16Msm4
// CHECK: resolver_else:
// CHECK-NEXT: ret ptr @_Z3foov.default
//
diff --git a/clang/test/CodeGenHLSL/builtins/abs.hlsl b/clang/test/CodeGenHLSL/builtins/abs.hlsl
index 54c9d1a9dded..ad65cab2721a 100644
--- a/clang/test/CodeGenHLSL/builtins/abs.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/abs.hlsl
@@ -1,141 +1,93 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
using hlsl::abs;
#ifdef __HLSL_ENABLE_16_BIT
-// CHECK: define noundef i16 @
-// CHECK: call i16 @llvm.abs.i16(
-int16_t test_abs_int16_t ( int16_t p0 ) {
- return abs ( p0 );
-}
-// CHECK: define noundef <2 x i16> @
-// CHECK: call <2 x i16> @llvm.abs.v2i16(
-int16_t2 test_abs_int16_t2 ( int16_t2 p0 ) {
- return abs ( p0 );
-}
-// CHECK: define noundef <3 x i16> @
-// CHECK: call <3 x i16> @llvm.abs.v3i16(
-int16_t3 test_abs_int16_t3 ( int16_t3 p0 ) {
- return abs ( p0 );
-}
-// CHECK: define noundef <4 x i16> @
-// CHECK: call <4 x i16> @llvm.abs.v4i16(
-int16_t4 test_abs_int16_t4 ( int16_t4 p0 ) {
- return abs ( p0 );
-}
+// NATIVE_HALF: define noundef i16 @
+// NATIVE_HALF: call i16 @llvm.abs.i16(
+int16_t test_abs_int16_t(int16_t p0) { return abs(p0); }
+// NATIVE_HALF: define noundef <2 x i16> @
+// NATIVE_HALF: call <2 x i16> @llvm.abs.v2i16(
+int16_t2 test_abs_int16_t2(int16_t2 p0) { return abs(p0); }
+// NATIVE_HALF: define noundef <3 x i16> @
+// NATIVE_HALF: call <3 x i16> @llvm.abs.v3i16(
+int16_t3 test_abs_int16_t3(int16_t3 p0) { return abs(p0); }
+// NATIVE_HALF: define noundef <4 x i16> @
+// NATIVE_HALF: call <4 x i16> @llvm.abs.v4i16(
+int16_t4 test_abs_int16_t4(int16_t4 p0) { return abs(p0); }
#endif // __HLSL_ENABLE_16_BIT
-// CHECK: define noundef half @
-// CHECK: call half @llvm.fabs.f16(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.fabs.f16(
// NO_HALF: define noundef float @"?test_abs_half@@YA$halff@$halff@@Z"(
// NO_HALF: call float @llvm.fabs.f32(float %0)
-half test_abs_half ( half p0 ) {
- return abs ( p0 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.fabs.v2f16(
+half test_abs_half(half p0) { return abs(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.fabs.v2f16(
// NO_HALF: define noundef <2 x float> @"?test_abs_half2@@YAT?$__vector@$halff@$01@__clang@@T12@@Z"(
// NO_HALF: call <2 x float> @llvm.fabs.v2f32(
-half2 test_abs_half2 ( half2 p0 ) {
- return abs ( p0 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.fabs.v3f16(
+half2 test_abs_half2(half2 p0) { return abs(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.fabs.v3f16(
// NO_HALF: define noundef <3 x float> @"?test_abs_half3@@YAT?$__vector@$halff@$02@__clang@@T12@@Z"(
// NO_HALF: call <3 x float> @llvm.fabs.v3f32(
-half3 test_abs_half3 ( half3 p0 ) {
- return abs ( p0 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.fabs.v4f16(
+half3 test_abs_half3(half3 p0) { return abs(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.fabs.v4f16(
// NO_HALF: define noundef <4 x float> @"?test_abs_half4@@YAT?$__vector@$halff@$03@__clang@@T12@@Z"(
// NO_HALF: call <4 x float> @llvm.fabs.v4f32(
-half4 test_abs_half4 ( half4 p0 ) {
- return abs ( p0 );
-}
+half4 test_abs_half4(half4 p0) { return abs(p0); }
// CHECK: define noundef i32 @
// CHECK: call i32 @llvm.abs.i32(
-// NO_HALF: define noundef i32 @"?test_abs_int@@YAHH@Z"
-int test_abs_int ( int p0 ) {
- return abs ( p0 );
-}
+int test_abs_int(int p0) { return abs(p0); }
// CHECK: define noundef <2 x i32> @
// CHECK: call <2 x i32> @llvm.abs.v2i32(
-int2 test_abs_int2 ( int2 p0 ) {
- return abs ( p0 );
-}
+int2 test_abs_int2(int2 p0) { return abs(p0); }
// CHECK: define noundef <3 x i32> @
// CHECK: call <3 x i32> @llvm.abs.v3i32(
-int3 test_abs_int3 ( int3 p0 ) {
- return abs ( p0 );
-}
+int3 test_abs_int3(int3 p0) { return abs(p0); }
// CHECK: define noundef <4 x i32> @
// CHECK: call <4 x i32> @llvm.abs.v4i32(
-int4 test_abs_int4 ( int4 p0 ) {
- return abs ( p0 );
-}
+int4 test_abs_int4(int4 p0) { return abs(p0); }
// CHECK: define noundef float @
// CHECK: call float @llvm.fabs.f32(
-float test_abs_float ( float p0 ) {
- return abs ( p0 );
-}
+float test_abs_float(float p0) { return abs(p0); }
// CHECK: define noundef <2 x float> @
// CHECK: call <2 x float> @llvm.fabs.v2f32(
-float2 test_abs_float2 ( float2 p0 ) {
- return abs ( p0 );
-}
+float2 test_abs_float2(float2 p0) { return abs(p0); }
// CHECK: define noundef <3 x float> @
// CHECK: call <3 x float> @llvm.fabs.v3f32(
-float3 test_abs_float3 ( float3 p0 ) {
- return abs ( p0 );
-}
+float3 test_abs_float3(float3 p0) { return abs(p0); }
// CHECK: define noundef <4 x float> @
// CHECK: call <4 x float> @llvm.fabs.v4f32(
-float4 test_abs_float4 ( float4 p0 ) {
- return abs ( p0 );
-}
+float4 test_abs_float4(float4 p0) { return abs(p0); }
// CHECK: define noundef i64 @
// CHECK: call i64 @llvm.abs.i64(
-int64_t test_abs_int64_t ( int64_t p0 ) {
- return abs ( p0 );
-}
+int64_t test_abs_int64_t(int64_t p0) { return abs(p0); }
// CHECK: define noundef <2 x i64> @
// CHECK: call <2 x i64> @llvm.abs.v2i64(
-int64_t2 test_abs_int64_t2 ( int64_t2 p0 ) {
- return abs ( p0 );
-}
+int64_t2 test_abs_int64_t2(int64_t2 p0) { return abs(p0); }
// CHECK: define noundef <3 x i64> @
// CHECK: call <3 x i64> @llvm.abs.v3i64(
-int64_t3 test_abs_int64_t3 ( int64_t3 p0 ) {
- return abs ( p0 );
-}
+int64_t3 test_abs_int64_t3(int64_t3 p0) { return abs(p0); }
// CHECK: define noundef <4 x i64> @
// CHECK: call <4 x i64> @llvm.abs.v4i64(
-int64_t4 test_abs_int64_t4 ( int64_t4 p0 ) {
- return abs ( p0 );
-}
+int64_t4 test_abs_int64_t4(int64_t4 p0) { return abs(p0); }
// CHECK: define noundef double @
// CHECK: call double @llvm.fabs.f64(
-double test_abs_double ( double p0 ) {
- return abs ( p0 );
-}
+double test_abs_double(double p0) { return abs(p0); }
// CHECK: define noundef <2 x double> @
// CHECK: call <2 x double> @llvm.fabs.v2f64(
-double2 test_abs_double2 ( double2 p0 ) {
- return abs ( p0 );
-}
+double2 test_abs_double2(double2 p0) { return abs(p0); }
// CHECK: define noundef <3 x double> @
// CHECK: call <3 x double> @llvm.fabs.v3f64(
-double3 test_abs_double3 ( double3 p0 ) {
- return abs ( p0 );
-}
+double3 test_abs_double3(double3 p0) { return abs(p0); }
// CHECK: define noundef <4 x double> @
// CHECK: call <4 x double> @llvm.fabs.v4f64(
-double4 test_abs_double4 ( double4 p0 ) {
- return abs ( p0 );
-}
+double4 test_abs_double4(double4 p0) { return abs(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/ceil.hlsl b/clang/test/CodeGenHLSL/builtins/ceil.hlsl
index f1672816e72b..06d0d4c2cf54 100644
--- a/clang/test/CodeGenHLSL/builtins/ceil.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/ceil.hlsl
@@ -1,79 +1,56 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
using hlsl::ceil;
-// CHECK: define noundef half @
-// CHECK: call half @llvm.ceil.f16(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.ceil.f16(
// NO_HALF: define noundef float @"?test_ceil_half@@YA$halff@$halff@@Z"(
// NO_HALF: call float @llvm.ceil.f32(float %0)
-half test_ceil_half ( half p0 ) {
- return ceil ( p0 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.ceil.v2f16(
+half test_ceil_half(half p0) { return ceil(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.ceil.v2f16(
// NO_HALF: define noundef <2 x float> @"?test_ceil_half2@@YAT?$__vector@$halff@$01@__clang@@T12@@Z"(
// NO_HALF: call <2 x float> @llvm.ceil.v2f32(
-half2 test_ceil_half2 ( half2 p0 ) {
- return ceil ( p0 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.ceil.v3f16(
+half2 test_ceil_half2(half2 p0) { return ceil(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.ceil.v3f16(
// NO_HALF: define noundef <3 x float> @"?test_ceil_half3@@YAT?$__vector@$halff@$02@__clang@@T12@@Z"(
// NO_HALF: call <3 x float> @llvm.ceil.v3f32(
-half3 test_ceil_half3 ( half3 p0 ) {
- return ceil ( p0 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.ceil.v4f16(
+half3 test_ceil_half3(half3 p0) { return ceil(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.ceil.v4f16(
// NO_HALF: define noundef <4 x float> @"?test_ceil_half4@@YAT?$__vector@$halff@$03@__clang@@T12@@Z"(
// NO_HALF: call <4 x float> @llvm.ceil.v4f32(
-half4 test_ceil_half4 ( half4 p0 ) {
- return ceil ( p0 );
-}
+half4 test_ceil_half4(half4 p0) { return ceil(p0); }
// CHECK: define noundef float @
// CHECK: call float @llvm.ceil.f32(
-float test_ceil_float ( float p0 ) {
- return ceil ( p0 );
-}
+float test_ceil_float(float p0) { return ceil(p0); }
// CHECK: define noundef <2 x float> @
// CHECK: call <2 x float> @llvm.ceil.v2f32(
-float2 test_ceil_float2 ( float2 p0 ) {
- return ceil ( p0 );
-}
+float2 test_ceil_float2(float2 p0) { return ceil(p0); }
// CHECK: define noundef <3 x float> @
// CHECK: call <3 x float> @llvm.ceil.v3f32(
-float3 test_ceil_float3 ( float3 p0 ) {
- return ceil ( p0 );
-}
+float3 test_ceil_float3(float3 p0) { return ceil(p0); }
// CHECK: define noundef <4 x float> @
// CHECK: call <4 x float> @llvm.ceil.v4f32(
-float4 test_ceil_float4 ( float4 p0 ) {
- return ceil ( p0 );
-}
+float4 test_ceil_float4(float4 p0) { return ceil(p0); }
// CHECK: define noundef double @
// CHECK: call double @llvm.ceil.f64(
-double test_ceil_double ( double p0 ) {
- return ceil ( p0 );
-}
+double test_ceil_double(double p0) { return ceil(p0); }
// CHECK: define noundef <2 x double> @
// CHECK: call <2 x double> @llvm.ceil.v2f64(
-double2 test_ceil_double2 ( double2 p0 ) {
- return ceil ( p0 );
-}
+double2 test_ceil_double2(double2 p0) { return ceil(p0); }
// CHECK: define noundef <3 x double> @
// CHECK: call <3 x double> @llvm.ceil.v3f64(
-double3 test_ceil_double3 ( double3 p0 ) {
- return ceil ( p0 );
-}
+double3 test_ceil_double3(double3 p0) { return ceil(p0); }
// CHECK: define noundef <4 x double> @
// CHECK: call <4 x double> @llvm.ceil.v4f64(
-double4 test_ceil_double4 ( double4 p0 ) {
- return ceil ( p0 );
-}
+double4 test_ceil_double4(double4 p0) { return ceil(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/cos.hlsl b/clang/test/CodeGenHLSL/builtins/cos.hlsl
index 2fc1571949b2..fb416fcaa49d 100644
--- a/clang/test/CodeGenHLSL/builtins/cos.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/cos.hlsl
@@ -1,56 +1,41 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
-// CHECK: define noundef half @
-// CHECK: call half @llvm.cos.f16(
-// NO_HALF: define noundef float @"?test_cos_half@@YA$halff@$halff@@Z"(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.cos.f16(
+// NO_HALF: define noundef float @"?test_cos_half
// NO_HALF: call float @llvm.cos.f32(
-half test_cos_half ( half p0 ) {
- return cos ( p0 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.cos.v2f16
-// NO_HALF: define noundef <2 x float> @"?test_cos_float2@@YAT?$__vector@M$01@__clang@@T12@@Z"(
+half test_cos_half(half p0) { return cos(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.cos.v2f16
+// NO_HALF: define noundef <2 x float> @"?test_cos_half2
// NO_HALF: call <2 x float> @llvm.cos.v2f32(
-half2 test_cos_half2 ( half2 p0 ) {
- return cos ( p0 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.cos.v3f16
-// NO_HALF: define noundef <3 x float> @"?test_cos_float3@@YAT?$__vector@M$02@__clang@@T12@@Z"(
+half2 test_cos_half2(half2 p0) { return cos(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.cos.v3f16
+// NO_HALF: define noundef <3 x float> @"?test_cos_half3
// NO_HALF: call <3 x float> @llvm.cos.v3f32(
-half3 test_cos_half3 ( half3 p0 ) {
- return cos ( p0 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.cos.v4f16
-// NO_HALF: define noundef <4 x float> @"?test_cos_float4@@YAT?$__vector@M$03@__clang@@T12@@Z"(
+half3 test_cos_half3(half3 p0) { return cos(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.cos.v4f16
+// NO_HALF: define noundef <4 x float> @"?test_cos_half4
// NO_HALF: call <4 x float> @llvm.cos.v4f32(
-half4 test_cos_half4 ( half4 p0 ) {
- return cos ( p0 );
-}
+half4 test_cos_half4(half4 p0) { return cos(p0); }
-// CHECK: define noundef float @
+// CHECK: define noundef float @"?test_cos_float
// CHECK: call float @llvm.cos.f32(
-float test_cos_float ( float p0 ) {
- return cos ( p0 );
-}
-// CHECK: define noundef <2 x float> @
+float test_cos_float(float p0) { return cos(p0); }
+// CHECK: define noundef <2 x float> @"?test_cos_float2
// CHECK: call <2 x float> @llvm.cos.v2f32
-float2 test_cos_float2 ( float2 p0 ) {
- return cos ( p0 );
-}
-// CHECK: define noundef <3 x float> @
+float2 test_cos_float2(float2 p0) { return cos(p0); }
+// CHECK: define noundef <3 x float> @"?test_cos_float3
// CHECK: call <3 x float> @llvm.cos.v3f32
-float3 test_cos_float3 ( float3 p0 ) {
- return cos ( p0 );
-}
-// CHECK: define noundef <4 x float> @
+float3 test_cos_float3(float3 p0) { return cos(p0); }
+// CHECK: define noundef <4 x float> @"?test_cos_float4
// CHECK: call <4 x float> @llvm.cos.v4f32
-float4 test_cos_float4 ( float4 p0 ) {
- return cos ( p0 );
-}
+float4 test_cos_float4(float4 p0) { return cos(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/dot.hlsl b/clang/test/CodeGenHLSL/builtins/dot.hlsl
index b2c1bae31d13..c064d118caf3 100644
--- a/clang/test/CodeGenHLSL/builtins/dot.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/dot.hlsl
@@ -9,230 +9,160 @@
#ifdef __HLSL_ENABLE_16_BIT
// NATIVE_HALF: %dx.dot = mul i16 %0, %1
// NATIVE_HALF: ret i16 %dx.dot
-int16_t test_dot_short ( int16_t p0, int16_t p1 ) {
- return dot ( p0, p1 );
-}
+int16_t test_dot_short(int16_t p0, int16_t p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.dot.v2i16(<2 x i16> %0, <2 x i16> %1)
// NATIVE_HALF: ret i16 %dx.dot
-int16_t test_dot_short2 ( int16_t2 p0, int16_t2 p1 ) {
- return dot ( p0, p1 );
-}
+int16_t test_dot_short2(int16_t2 p0, int16_t2 p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.dot.v3i16(<3 x i16> %0, <3 x i16> %1)
// NATIVE_HALF: ret i16 %dx.dot
-int16_t test_dot_short3 ( int16_t3 p0, int16_t3 p1 ) {
- return dot ( p0, p1 );
-}
+int16_t test_dot_short3(int16_t3 p0, int16_t3 p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.dot.v4i16(<4 x i16> %0, <4 x i16> %1)
// NATIVE_HALF: ret i16 %dx.dot
-int16_t test_dot_short4 ( int16_t4 p0, int16_t4 p1 ) {
- return dot ( p0, p1 );
-}
+int16_t test_dot_short4(int16_t4 p0, int16_t4 p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = mul i16 %0, %1
// NATIVE_HALF: ret i16 %dx.dot
-uint16_t test_dot_ushort ( uint16_t p0, uint16_t p1 ) {
- return dot ( p0, p1 );
-}
+uint16_t test_dot_ushort(uint16_t p0, uint16_t p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.dot.v2i16(<2 x i16> %0, <2 x i16> %1)
// NATIVE_HALF: ret i16 %dx.dot
-uint16_t test_dot_ushort2 ( uint16_t2 p0, uint16_t2 p1 ) {
- return dot ( p0, p1 );
-}
+uint16_t test_dot_ushort2(uint16_t2 p0, uint16_t2 p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.dot.v3i16(<3 x i16> %0, <3 x i16> %1)
// NATIVE_HALF: ret i16 %dx.dot
-uint16_t test_dot_ushort3 ( uint16_t3 p0, uint16_t3 p1 ) {
- return dot ( p0, p1 );
-}
+uint16_t test_dot_ushort3(uint16_t3 p0, uint16_t3 p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.dot.v4i16(<4 x i16> %0, <4 x i16> %1)
// NATIVE_HALF: ret i16 %dx.dot
-uint16_t test_dot_ushort4 ( uint16_t4 p0, uint16_t4 p1 ) {
- return dot ( p0, p1 );
-}
+uint16_t test_dot_ushort4(uint16_t4 p0, uint16_t4 p1) { return dot(p0, p1); }
#endif
// CHECK: %dx.dot = mul i32 %0, %1
// CHECK: ret i32 %dx.dot
-int test_dot_int ( int p0, int p1 ) {
- return dot ( p0, p1 );
-}
+int test_dot_int(int p0, int p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i32 @llvm.dx.dot.v2i32(<2 x i32> %0, <2 x i32> %1)
// CHECK: ret i32 %dx.dot
-int test_dot_int2 ( int2 p0, int2 p1 ) {
- return dot ( p0, p1 );
-}
+int test_dot_int2(int2 p0, int2 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i32 @llvm.dx.dot.v3i32(<3 x i32> %0, <3 x i32> %1)
// CHECK: ret i32 %dx.dot
-int test_dot_int3 ( int3 p0, int3 p1 ) {
- return dot ( p0, p1 );
-}
+int test_dot_int3(int3 p0, int3 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i32 @llvm.dx.dot.v4i32(<4 x i32> %0, <4 x i32> %1)
// CHECK: ret i32 %dx.dot
-int test_dot_int4 ( int4 p0, int4 p1 ) {
- return dot ( p0, p1 );
-}
+int test_dot_int4(int4 p0, int4 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = mul i32 %0, %1
// CHECK: ret i32 %dx.dot
-uint test_dot_uint ( uint p0, uint p1 ) {
- return dot ( p0, p1 );
-}
+uint test_dot_uint(uint p0, uint p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i32 @llvm.dx.dot.v2i32(<2 x i32> %0, <2 x i32> %1)
// CHECK: ret i32 %dx.dot
-uint test_dot_uint2 ( uint2 p0, uint2 p1 ) {
- return dot ( p0, p1 );
-}
+uint test_dot_uint2(uint2 p0, uint2 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i32 @llvm.dx.dot.v3i32(<3 x i32> %0, <3 x i32> %1)
// CHECK: ret i32 %dx.dot
-uint test_dot_uint3 ( uint3 p0, uint3 p1 ) {
- return dot ( p0, p1 );
-}
+uint test_dot_uint3(uint3 p0, uint3 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i32 @llvm.dx.dot.v4i32(<4 x i32> %0, <4 x i32> %1)
// CHECK: ret i32 %dx.dot
-uint test_dot_uint4 ( uint4 p0, uint4 p1 ) {
- return dot ( p0, p1 );
-}
+uint test_dot_uint4(uint4 p0, uint4 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = mul i64 %0, %1
// CHECK: ret i64 %dx.dot
-int64_t test_dot_long ( int64_t p0, int64_t p1 ) {
- return dot ( p0, p1 );
-}
+int64_t test_dot_long(int64_t p0, int64_t p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i64 @llvm.dx.dot.v2i64(<2 x i64> %0, <2 x i64> %1)
// CHECK: ret i64 %dx.dot
-int64_t test_dot_long2 ( int64_t2 p0, int64_t2 p1 ) {
- return dot ( p0, p1 );
-}
+int64_t test_dot_long2(int64_t2 p0, int64_t2 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i64 @llvm.dx.dot.v3i64(<3 x i64> %0, <3 x i64> %1)
// CHECK: ret i64 %dx.dot
-int64_t test_dot_long3 ( int64_t3 p0, int64_t3 p1 ) {
- return dot ( p0, p1 );
-}
+int64_t test_dot_long3(int64_t3 p0, int64_t3 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i64 @llvm.dx.dot.v4i64(<4 x i64> %0, <4 x i64> %1)
// CHECK: ret i64 %dx.dot
-int64_t test_dot_long4 ( int64_t4 p0, int64_t4 p1 ) {
- return dot ( p0, p1 );
-}
+int64_t test_dot_long4(int64_t4 p0, int64_t4 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = mul i64 %0, %1
// CHECK: ret i64 %dx.dot
-uint64_t test_dot_ulong ( uint64_t p0, uint64_t p1 ) {
- return dot ( p0, p1 );
-}
+uint64_t test_dot_ulong(uint64_t p0, uint64_t p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i64 @llvm.dx.dot.v2i64(<2 x i64> %0, <2 x i64> %1)
// CHECK: ret i64 %dx.dot
-uint64_t test_dot_ulong2 ( uint64_t2 p0, uint64_t2 p1 ) {
- return dot ( p0, p1 );
-}
+uint64_t test_dot_ulong2(uint64_t2 p0, uint64_t2 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i64 @llvm.dx.dot.v3i64(<3 x i64> %0, <3 x i64> %1)
// CHECK: ret i64 %dx.dot
-uint64_t test_dot_ulong3 ( uint64_t3 p0, uint64_t3 p1 ) {
- return dot ( p0, p1 );
-}
+uint64_t test_dot_ulong3(uint64_t3 p0, uint64_t3 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call i64 @llvm.dx.dot.v4i64(<4 x i64> %0, <4 x i64> %1)
// CHECK: ret i64 %dx.dot
-uint64_t test_dot_ulong4 ( uint64_t4 p0, uint64_t4 p1 ) {
- return dot ( p0, p1 );
-}
+uint64_t test_dot_ulong4(uint64_t4 p0, uint64_t4 p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = fmul half %0, %1
// NATIVE_HALF: ret half %dx.dot
// NO_HALF: %dx.dot = fmul float %0, %1
// NO_HALF: ret float %dx.dot
-half test_dot_half ( half p0, half p1 ) {
- return dot ( p0, p1 );
-}
+half test_dot_half(half p0, half p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = call half @llvm.dx.dot.v2f16(<2 x half> %0, <2 x half> %1)
// NATIVE_HALF: ret half %dx.dot
// NO_HALF: %dx.dot = call float @llvm.dx.dot.v2f32(<2 x float> %0, <2 x float> %1)
// NO_HALF: ret float %dx.dot
-half test_dot_half2 ( half2 p0, half2 p1 ) {
- return dot ( p0, p1 );
-}
+half test_dot_half2(half2 p0, half2 p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = call half @llvm.dx.dot.v3f16(<3 x half> %0, <3 x half> %1)
// NATIVE_HALF: ret half %dx.dot
// NO_HALF: %dx.dot = call float @llvm.dx.dot.v3f32(<3 x float> %0, <3 x float> %1)
// NO_HALF: ret float %dx.dot
-half test_dot_half3 ( half3 p0, half3 p1 ) {
- return dot ( p0, p1 );
-}
+half test_dot_half3(half3 p0, half3 p1) { return dot(p0, p1); }
// NATIVE_HALF: %dx.dot = call half @llvm.dx.dot.v4f16(<4 x half> %0, <4 x half> %1)
// NATIVE_HALF: ret half %dx.dot
// NO_HALF: %dx.dot = call float @llvm.dx.dot.v4f32(<4 x float> %0, <4 x float> %1)
// NO_HALF: ret float %dx.dot
-half test_dot_half4 ( half4 p0, half4 p1 ) {
- return dot ( p0, p1 );
-}
+half test_dot_half4(half4 p0, half4 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = fmul float %0, %1
// CHECK: ret float %dx.dot
-float test_dot_float ( float p0, float p1 ) {
- return dot ( p0, p1 );
-}
+float test_dot_float(float p0, float p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call float @llvm.dx.dot.v2f32(<2 x float> %0, <2 x float> %1)
// CHECK: ret float %dx.dot
-float test_dot_float2 ( float2 p0, float2 p1 ) {
- return dot ( p0, p1 );
-}
+float test_dot_float2(float2 p0, float2 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call float @llvm.dx.dot.v3f32(<3 x float> %0, <3 x float> %1)
// CHECK: ret float %dx.dot
-float test_dot_float3 ( float3 p0, float3 p1 ) {
- return dot ( p0, p1 );
-}
+float test_dot_float3(float3 p0, float3 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call float @llvm.dx.dot.v4f32(<4 x float> %0, <4 x float> %1)
// CHECK: ret float %dx.dot
-float test_dot_float4 ( float4 p0, float4 p1) {
- return dot ( p0, p1 );
-}
+float test_dot_float4(float4 p0, float4 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call float @llvm.dx.dot.v2f32(<2 x float> %splat.splat, <2 x float> %1)
// CHECK: ret float %dx.dot
-float test_dot_float2_splat ( float p0, float2 p1 ) {
- return dot( p0, p1 );
-}
+float test_dot_float2_splat(float p0, float2 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call float @llvm.dx.dot.v3f32(<3 x float> %splat.splat, <3 x float> %1)
// CHECK: ret float %dx.dot
-float test_dot_float3_splat ( float p0, float3 p1 ) {
- return dot( p0, p1 );
-}
+float test_dot_float3_splat(float p0, float3 p1) { return dot(p0, p1); }
// CHECK: %dx.dot = call float @llvm.dx.dot.v4f32(<4 x float> %splat.splat, <4 x float> %1)
// CHECK: ret float %dx.dot
-float test_dot_float4_splat ( float p0, float4 p1 ) {
- return dot( p0, p1 );
-}
+float test_dot_float4_splat(float p0, float4 p1) { return dot(p0, p1); }
// CHECK: %conv = sitofp i32 %1 to float
// CHECK: %splat.splatinsert = insertelement <2 x float> poison, float %conv, i64 0
// CHECK: %splat.splat = shufflevector <2 x float> %splat.splatinsert, <2 x float> poison, <2 x i32> zeroinitializer
// CHECK: %dx.dot = call float @llvm.dx.dot.v2f32(<2 x float> %0, <2 x float> %splat.splat)
// CHECK: ret float %dx.dot
-float test_builtin_dot_float2_int_splat ( float2 p0, int p1 ) {
- return dot ( p0, p1 );
+float test_builtin_dot_float2_int_splat(float2 p0, int p1) {
+ return dot(p0, p1);
}
// CHECK: %conv = sitofp i32 %1 to float
@@ -240,26 +170,24 @@ float test_builtin_dot_float2_int_splat ( float2 p0, int p1 ) {
// CHECK: %splat.splat = shufflevector <3 x float> %splat.splatinsert, <3 x float> poison, <3 x i32> zeroinitializer
// CHECK: %dx.dot = call float @llvm.dx.dot.v3f32(<3 x float> %0, <3 x float> %splat.splat)
// CHECK: ret float %dx.dot
-float test_builtin_dot_float3_int_splat ( float3 p0, int p1 ) {
- return dot ( p0, p1 );
+float test_builtin_dot_float3_int_splat(float3 p0, int p1) {
+ return dot(p0, p1);
}
// CHECK: %dx.dot = fmul double %0, %1
// CHECK: ret double %dx.dot
-double test_dot_double ( double p0, double p1 ) {
- return dot ( p0, p1 );
-}
+double test_dot_double(double p0, double p1) { return dot(p0, p1); }
// CHECK: %conv = zext i1 %tobool to i32
// CHECK: %dx.dot = mul i32 %conv, %1
// CHECK: ret i32 %dx.dot
-int test_dot_bool_scalar_arg0_type_promotion ( bool p0, int p1 ) {
- return dot ( p0, p1 );
+int test_dot_bool_scalar_arg0_type_promotion(bool p0, int p1) {
+ return dot(p0, p1);
}
// CHECK: %conv = zext i1 %tobool to i32
// CHECK: %dx.dot = mul i32 %0, %conv
// CHECK: ret i32 %dx.dot
-int test_dot_bool_scalar_arg1_type_promotion ( int p0, bool p1 ) {
- return dot ( p0, p1 );
+int test_dot_bool_scalar_arg1_type_promotion(int p0, bool p1) {
+ return dot(p0, p1);
}
diff --git a/clang/test/CodeGenHLSL/builtins/floor.hlsl b/clang/test/CodeGenHLSL/builtins/floor.hlsl
index 357661761b76..d2a2f6e52f1e 100644
--- a/clang/test/CodeGenHLSL/builtins/floor.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/floor.hlsl
@@ -1,79 +1,56 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
using hlsl::floor;
-// CHECK: define noundef half @
-// CHECK: call half @llvm.floor.f16(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.floor.f16(
// NO_HALF: define noundef float @"?test_floor_half@@YA$halff@$halff@@Z"(
// NO_HALF: call float @llvm.floor.f32(float %0)
-half test_floor_half ( half p0 ) {
- return floor ( p0 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.floor.v2f16(
+half test_floor_half(half p0) { return floor(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.floor.v2f16(
// NO_HALF: define noundef <2 x float> @"?test_floor_half2@@YAT?$__vector@$halff@$01@__clang@@T12@@Z"(
// NO_HALF: call <2 x float> @llvm.floor.v2f32(
-half2 test_floor_half2 ( half2 p0 ) {
- return floor ( p0 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.floor.v3f16(
+half2 test_floor_half2(half2 p0) { return floor(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.floor.v3f16(
// NO_HALF: define noundef <3 x float> @"?test_floor_half3@@YAT?$__vector@$halff@$02@__clang@@T12@@Z"(
// NO_HALF: call <3 x float> @llvm.floor.v3f32(
-half3 test_floor_half3 ( half3 p0 ) {
- return floor ( p0 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.floor.v4f16(
+half3 test_floor_half3(half3 p0) { return floor(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.floor.v4f16(
// NO_HALF: define noundef <4 x float> @"?test_floor_half4@@YAT?$__vector@$halff@$03@__clang@@T12@@Z"(
// NO_HALF: call <4 x float> @llvm.floor.v4f32(
-half4 test_floor_half4 ( half4 p0 ) {
- return floor ( p0 );
-}
+half4 test_floor_half4(half4 p0) { return floor(p0); }
// CHECK: define noundef float @
// CHECK: call float @llvm.floor.f32(
-float test_floor_float ( float p0 ) {
- return floor ( p0 );
-}
+float test_floor_float(float p0) { return floor(p0); }
// CHECK: define noundef <2 x float> @
// CHECK: call <2 x float> @llvm.floor.v2f32(
-float2 test_floor_float2 ( float2 p0 ) {
- return floor ( p0 );
-}
+float2 test_floor_float2(float2 p0) { return floor(p0); }
// CHECK: define noundef <3 x float> @
// CHECK: call <3 x float> @llvm.floor.v3f32(
-float3 test_floor_float3 ( float3 p0 ) {
- return floor ( p0 );
-}
+float3 test_floor_float3(float3 p0) { return floor(p0); }
// CHECK: define noundef <4 x float> @
// CHECK: call <4 x float> @llvm.floor.v4f32(
-float4 test_floor_float4 ( float4 p0 ) {
- return floor ( p0 );
-}
+float4 test_floor_float4(float4 p0) { return floor(p0); }
// CHECK: define noundef double @
// CHECK: call double @llvm.floor.f64(
-double test_floor_double ( double p0 ) {
- return floor ( p0 );
-}
+double test_floor_double(double p0) { return floor(p0); }
// CHECK: define noundef <2 x double> @
// CHECK: call <2 x double> @llvm.floor.v2f64(
-double2 test_floor_double2 ( double2 p0 ) {
- return floor ( p0 );
-}
+double2 test_floor_double2(double2 p0) { return floor(p0); }
// CHECK: define noundef <3 x double> @
// CHECK: call <3 x double> @llvm.floor.v3f64(
-double3 test_floor_double3 ( double3 p0 ) {
- return floor ( p0 );
-}
+double3 test_floor_double3(double3 p0) { return floor(p0); }
// CHECK: define noundef <4 x double> @
// CHECK: call <4 x double> @llvm.floor.v4f64(
-double4 test_floor_double4 ( double4 p0 ) {
- return floor ( p0 );
-}
+double4 test_floor_double4(double4 p0) { return floor(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/frac.hlsl b/clang/test/CodeGenHLSL/builtins/frac.hlsl
new file mode 100644
index 000000000000..7c4d1468e96d
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/frac.hlsl
@@ -0,0 +1,53 @@
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
+
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: %dx.frac = call half @llvm.dx.frac.f16(
+// NATIVE_HALF: ret half %dx.frac
+// NO_HALF: define noundef float @"?test_frac_half@@YA$halff@$halff@@Z"(
+// NO_HALF: %dx.frac = call float @llvm.dx.frac.f32(
+// NO_HALF: ret float %dx.frac
+half test_frac_half(half p0) { return frac(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: %dx.frac = call <2 x half> @llvm.dx.frac.v2f16
+// NATIVE_HALF: ret <2 x half> %dx.frac
+// NO_HALF: define noundef <2 x float> @
+// NO_HALF: %dx.frac = call <2 x float> @llvm.dx.frac.v2f32(
+// NO_HALF: ret <2 x float> %dx.frac
+half2 test_frac_half2(half2 p0) { return frac(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: %dx.frac = call <3 x half> @llvm.dx.frac.v3f16
+// NATIVE_HALF: ret <3 x half> %dx.frac
+// NO_HALF: define noundef <3 x float> @
+// NO_HALF: %dx.frac = call <3 x float> @llvm.dx.frac.v3f32(
+// NO_HALF: ret <3 x float> %dx.frac
+half3 test_frac_half3(half3 p0) { return frac(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: %dx.frac = call <4 x half> @llvm.dx.frac.v4f16
+// NATIVE_HALF: ret <4 x half> %dx.frac
+// NO_HALF: define noundef <4 x float> @
+// NO_HALF: %dx.frac = call <4 x float> @llvm.dx.frac.v4f32(
+// NO_HALF: ret <4 x float> %dx.frac
+half4 test_frac_half4(half4 p0) { return frac(p0); }
+
+// CHECK: define noundef float @
+// CHECK: %dx.frac = call float @llvm.dx.frac.f32(
+// CHECK: ret float %dx.frac
+float test_frac_float(float p0) { return frac(p0); }
+// CHECK: define noundef <2 x float> @
+// CHECK: %dx.frac = call <2 x float> @llvm.dx.frac.v2f32
+// CHECK: ret <2 x float> %dx.frac
+float2 test_frac_float2(float2 p0) { return frac(p0); }
+// CHECK: define noundef <3 x float> @
+// CHECK: %dx.frac = call <3 x float> @llvm.dx.frac.v3f32
+// CHECK: ret <3 x float> %dx.frac
+float3 test_frac_float3(float3 p0) { return frac(p0); }
+// CHECK: define noundef <4 x float> @
+// CHECK: %dx.frac = call <4 x float> @llvm.dx.frac.v4f32
+// CHECK: ret <4 x float> %dx.frac
+float4 test_frac_float4(float4 p0) { return frac(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/lerp-builtin.hlsl b/clang/test/CodeGenHLSL/builtins/lerp-builtin.hlsl
new file mode 100644
index 000000000000..1f16dec68212
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/lerp-builtin.hlsl
@@ -0,0 +1,37 @@
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple dxil-pc-shadermodel6.3-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -o - | FileCheck %s
+
+
+
+// CHECK-LABEL: builtin_lerp_half_scalar
+// CHECK: %3 = fsub double %conv1, %conv
+// CHECK: %4 = fmul double %conv2, %3
+// CHECK: %dx.lerp = fadd double %conv, %4
+// CHECK: %conv3 = fptrunc double %dx.lerp to half
+// CHECK: ret half %conv3
+half builtin_lerp_half_scalar (half p0) {
+ return __builtin_hlsl_lerp ( p0, p0, p0 );
+}
+
+// CHECK-LABEL: builtin_lerp_float_scalar
+// CHECK: %3 = fsub double %conv1, %conv
+// CHECK: %4 = fmul double %conv2, %3
+// CHECK: %dx.lerp = fadd double %conv, %4
+// CHECK: %conv3 = fptrunc double %dx.lerp to float
+// CHECK: ret float %conv3
+float builtin_lerp_float_scalar ( float p0) {
+ return __builtin_hlsl_lerp ( p0, p0, p0 );
+}
+
+// CHECK-LABEL: builtin_lerp_half_vector
+// CHECK: %dx.lerp = call <3 x half> @llvm.dx.lerp.v3f16(<3 x half> %0, <3 x half> %1, <3 x half> %2)
+// CHECK: ret <3 x half> %dx.lerp
+half3 builtin_lerp_half_vector (half3 p0) {
+ return __builtin_hlsl_lerp ( p0, p0, p0 );
+}
+
+// CHECK-LABEL: builtin_lerp_floar_vector
+// CHECK: %dx.lerp = call <2 x float> @llvm.dx.lerp.v2f32(<2 x float> %0, <2 x float> %1, <2 x float> %2)
+// CHECK: ret <2 x float> %dx.lerp
+float2 builtin_lerp_floar_vector ( float2 p0) {
+ return __builtin_hlsl_lerp ( p0, p0, p0 );
+}
diff --git a/clang/test/CodeGenHLSL/builtins/lerp.hlsl b/clang/test/CodeGenHLSL/builtins/lerp.hlsl
new file mode 100644
index 000000000000..a6b3d9643d67
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/lerp.hlsl
@@ -0,0 +1,83 @@
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
+
+// NATIVE_HALF: %3 = fsub half %1, %0
+// NATIVE_HALF: %4 = fmul half %2, %3
+// NATIVE_HALF: %dx.lerp = fadd half %0, %4
+// NATIVE_HALF: ret half %dx.lerp
+// NO_HALF: %3 = fsub float %1, %0
+// NO_HALF: %4 = fmul float %2, %3
+// NO_HALF: %dx.lerp = fadd float %0, %4
+// NO_HALF: ret float %dx.lerp
+half test_lerp_half(half p0) { return lerp(p0, p0, p0); }
+
+// NATIVE_HALF: %dx.lerp = call <2 x half> @llvm.dx.lerp.v2f16(<2 x half> %0, <2 x half> %1, <2 x half> %2)
+// NATIVE_HALF: ret <2 x half> %dx.lerp
+// NO_HALF: %dx.lerp = call <2 x float> @llvm.dx.lerp.v2f32(<2 x float> %0, <2 x float> %1, <2 x float> %2)
+// NO_HALF: ret <2 x float> %dx.lerp
+half2 test_lerp_half2(half2 p0, half2 p1) { return lerp(p0, p0, p0); }
+
+// NATIVE_HALF: %dx.lerp = call <3 x half> @llvm.dx.lerp.v3f16(<3 x half> %0, <3 x half> %1, <3 x half> %2)
+// NATIVE_HALF: ret <3 x half> %dx.lerp
+// NO_HALF: %dx.lerp = call <3 x float> @llvm.dx.lerp.v3f32(<3 x float> %0, <3 x float> %1, <3 x float> %2)
+// NO_HALF: ret <3 x float> %dx.lerp
+half3 test_lerp_half3(half3 p0, half3 p1) { return lerp(p0, p0, p0); }
+
+// NATIVE_HALF: %dx.lerp = call <4 x half> @llvm.dx.lerp.v4f16(<4 x half> %0, <4 x half> %1, <4 x half> %2)
+// NATIVE_HALF: ret <4 x half> %dx.lerp
+// NO_HALF: %dx.lerp = call <4 x float> @llvm.dx.lerp.v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2)
+// NO_HALF: ret <4 x float> %dx.lerp
+half4 test_lerp_half4(half4 p0, half4 p1) { return lerp(p0, p0, p0); }
+
+// CHECK: %3 = fsub float %1, %0
+// CHECK: %4 = fmul float %2, %3
+// CHECK: %dx.lerp = fadd float %0, %4
+// CHECK: ret float %dx.lerp
+float test_lerp_float(float p0, float p1) { return lerp(p0, p0, p0); }
+
+// CHECK: %dx.lerp = call <2 x float> @llvm.dx.lerp.v2f32(<2 x float> %0, <2 x float> %1, <2 x float> %2)
+// CHECK: ret <2 x float> %dx.lerp
+float2 test_lerp_float2(float2 p0, float2 p1) { return lerp(p0, p0, p0); }
+
+// CHECK: %dx.lerp = call <3 x float> @llvm.dx.lerp.v3f32(<3 x float> %0, <3 x float> %1, <3 x float> %2)
+// CHECK: ret <3 x float> %dx.lerp
+float3 test_lerp_float3(float3 p0, float3 p1) { return lerp(p0, p0, p0); }
+
+// CHECK: %dx.lerp = call <4 x float> @llvm.dx.lerp.v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2)
+// CHECK: ret <4 x float> %dx.lerp
+float4 test_lerp_float4(float4 p0, float4 p1) { return lerp(p0, p0, p0); }
+
+// CHECK: %dx.lerp = call <2 x float> @llvm.dx.lerp.v2f32(<2 x float> %splat.splat, <2 x float> %1, <2 x float> %2)
+// CHECK: ret <2 x float> %dx.lerp
+float2 test_lerp_float2_splat(float p0, float2 p1) { return lerp(p0, p1, p1); }
+
+// CHECK: %dx.lerp = call <3 x float> @llvm.dx.lerp.v3f32(<3 x float> %splat.splat, <3 x float> %1, <3 x float> %2)
+// CHECK: ret <3 x float> %dx.lerp
+float3 test_lerp_float3_splat(float p0, float3 p1) { return lerp(p0, p1, p1); }
+
+// CHECK: %dx.lerp = call <4 x float> @llvm.dx.lerp.v4f32(<4 x float> %splat.splat, <4 x float> %1, <4 x float> %2)
+// CHECK: ret <4 x float> %dx.lerp
+float4 test_lerp_float4_splat(float p0, float4 p1) { return lerp(p0, p1, p1); }
+
+// CHECK: %conv = sitofp i32 %2 to float
+// CHECK: %splat.splatinsert = insertelement <2 x float> poison, float %conv, i64 0
+// CHECK: %splat.splat = shufflevector <2 x float> %splat.splatinsert, <2 x float> poison, <2 x i32> zeroinitializer
+// CHECK: %dx.lerp = call <2 x float> @llvm.dx.lerp.v2f32(<2 x float> %0, <2 x float> %1, <2 x float> %splat.splat)
+// CHECK: ret <2 x float> %dx.lerp
+float2 test_lerp_float2_int_splat(float2 p0, int p1) {
+ return lerp(p0, p0, p1);
+}
+
+// CHECK: %conv = sitofp i32 %2 to float
+// CHECK: %splat.splatinsert = insertelement <3 x float> poison, float %conv, i64 0
+// CHECK: %splat.splat = shufflevector <3 x float> %splat.splatinsert, <3 x float> poison, <3 x i32> zeroinitializer
+// CHECK: %dx.lerp = call <3 x float> @llvm.dx.lerp.v3f32(<3 x float> %0, <3 x float> %1, <3 x float> %splat.splat)
+// CHECK: ret <3 x float> %dx.lerp
+float3 test_lerp_float3_int_splat(float3 p0, int p1) {
+ return lerp(p0, p0, p1);
+}
diff --git a/clang/test/CodeGenHLSL/builtins/log.hlsl b/clang/test/CodeGenHLSL/builtins/log.hlsl
index 6a8e4ac2e5f2..ecbdf1e98ac3 100644
--- a/clang/test/CodeGenHLSL/builtins/log.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/log.hlsl
@@ -1,56 +1,41 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
-// CHECK: define noundef half @
-// CHECK: call half @llvm.log.f16(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.log.f16(
// NO_HALF: define noundef float @"?test_log_half@@YA$halff@$halff@@Z"(
// NO_HALF: call float @llvm.log.f32(
-half test_log_half ( half p0 ) {
- return log ( p0 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.log.v2f16
-// NO_HALF: define noundef <2 x float> @"?test_log_float2@@YAT?$__vector@M$01@__clang@@T12@@Z"(
+half test_log_half(half p0) { return log(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.log.v2f16
+// NO_HALF: define noundef <2 x float> @"?test_log_half2
// NO_HALF: call <2 x float> @llvm.log.v2f32(
-half2 test_log_half2 ( half2 p0 ) {
- return log ( p0 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.log.v3f16
-// NO_HALF: define noundef <3 x float> @"?test_log_float3@@YAT?$__vector@M$02@__clang@@T12@@Z"(
+half2 test_log_half2(half2 p0) { return log(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.log.v3f16
+// NO_HALF: define noundef <3 x float> @"?test_log_half3
// NO_HALF: call <3 x float> @llvm.log.v3f32(
-half3 test_log_half3 ( half3 p0 ) {
- return log ( p0 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.log.v4f16
-// NO_HALF: define noundef <4 x float> @"?test_log_float4@@YAT?$__vector@M$03@__clang@@T12@@Z"(
+half3 test_log_half3(half3 p0) { return log(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.log.v4f16
+// NO_HALF: define noundef <4 x float> @"?test_log_half4
// NO_HALF: call <4 x float> @llvm.log.v4f32(
-half4 test_log_half4 ( half4 p0 ) {
- return log ( p0 );
-}
+half4 test_log_half4(half4 p0) { return log(p0); }
-// CHECK: define noundef float @
+// CHECK: define noundef float @"?test_log_float
// CHECK: call float @llvm.log.f32(
-float test_log_float ( float p0 ) {
- return log ( p0 );
-}
-// CHECK: define noundef <2 x float> @
+float test_log_float(float p0) { return log(p0); }
+// CHECK: define noundef <2 x float> @"?test_log_float2
// CHECK: call <2 x float> @llvm.log.v2f32
-float2 test_log_float2 ( float2 p0 ) {
- return log ( p0 );
-}
-// CHECK: define noundef <3 x float> @
+float2 test_log_float2(float2 p0) { return log(p0); }
+// CHECK: define noundef <3 x float> @"?test_log_float3
// CHECK: call <3 x float> @llvm.log.v3f32
-float3 test_log_float3 ( float3 p0 ) {
- return log ( p0 );
-}
-// CHECK: define noundef <4 x float> @
+float3 test_log_float3(float3 p0) { return log(p0); }
+// CHECK: define noundef <4 x float> @"?test_log_float4
// CHECK: call <4 x float> @llvm.log.v4f32
-float4 test_log_float4 ( float4 p0 ) {
- return log ( p0 );
-}
+float4 test_log_float4(float4 p0) { return log(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/log10.hlsl b/clang/test/CodeGenHLSL/builtins/log10.hlsl
index 8ce24fd530dd..638b86e8d5ea 100644
--- a/clang/test/CodeGenHLSL/builtins/log10.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/log10.hlsl
@@ -1,56 +1,41 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
-// CHECK: define noundef half @
-// CHECK: call half @llvm.log10.f16(
-// NO_HALF: define noundef float @"?test_log10_half@@YA$halff@$halff@@Z"(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.log10.f16(
+// NO_HALF: define noundef float @"?test_log10_half
// NO_HALF: call float @llvm.log10.f32(
-half test_log10_half ( half p0 ) {
- return log10 ( p0 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.log10.v2f16
-// NO_HALF: define noundef <2 x float> @"?test_log10_float2@@YAT?$__vector@M$01@__clang@@T12@@Z"(
+half test_log10_half(half p0) { return log10(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.log10.v2f16
+// NO_HALF: define noundef <2 x float> @"?test_log10_half2
// NO_HALF: call <2 x float> @llvm.log10.v2f32(
-half2 test_log10_half2 ( half2 p0 ) {
- return log10 ( p0 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.log10.v3f16
-// NO_HALF: define noundef <3 x float> @"?test_log10_float3@@YAT?$__vector@M$02@__clang@@T12@@Z"(
+half2 test_log10_half2(half2 p0) { return log10(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.log10.v3f16
+// NO_HALF: define noundef <3 x float> @"?test_log10_half3
// NO_HALF: call <3 x float> @llvm.log10.v3f32(
-half3 test_log10_half3 ( half3 p0 ) {
- return log10 ( p0 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.log10.v4f16
-// NO_HALF: define noundef <4 x float> @"?test_log10_float4@@YAT?$__vector@M$03@__clang@@T12@@Z"(
+half3 test_log10_half3(half3 p0) { return log10(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.log10.v4f16
+// NO_HALF: define noundef <4 x float> @"?test_log10_half4
// NO_HALF: call <4 x float> @llvm.log10.v4f32(
-half4 test_log10_half4 ( half4 p0 ) {
- return log10 ( p0 );
-}
+half4 test_log10_half4(half4 p0) { return log10(p0); }
-// CHECK: define noundef float @
+// CHECK: define noundef float @"?test_log10_float
// CHECK: call float @llvm.log10.f32(
-float test_log10_float ( float p0 ) {
- return log10 ( p0 );
-}
-// CHECK: define noundef <2 x float> @
+float test_log10_float(float p0) { return log10(p0); }
+// CHECK: define noundef <2 x float> @"?test_log10_float2
// CHECK: call <2 x float> @llvm.log10.v2f32
-float2 test_log10_float2 ( float2 p0 ) {
- return log10 ( p0 );
-}
-// CHECK: define noundef <3 x float> @
+float2 test_log10_float2(float2 p0) { return log10(p0); }
+// CHECK: define noundef <3 x float> @"?test_log10_float3
// CHECK: call <3 x float> @llvm.log10.v3f32
-float3 test_log10_float3 ( float3 p0 ) {
- return log10 ( p0 );
-}
-// CHECK: define noundef <4 x float> @
+float3 test_log10_float3(float3 p0) { return log10(p0); }
+// CHECK: define noundef <4 x float> @"?test_log10_float4
// CHECK: call <4 x float> @llvm.log10.v4f32
-float4 test_log10_float4 ( float4 p0 ) {
- return log10 ( p0 );
-}
+float4 test_log10_float4(float4 p0) { return log10(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/log2.hlsl b/clang/test/CodeGenHLSL/builtins/log2.hlsl
index f0f0a6c7c50e..9ed8185a06b0 100644
--- a/clang/test/CodeGenHLSL/builtins/log2.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/log2.hlsl
@@ -1,56 +1,41 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
-// CHECK: define noundef half @
-// CHECK: call half @llvm.log2.f16(
-// NO_HALF: define noundef float @"?test_log2_half@@YA$halff@$halff@@Z"(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.log2.f16(
+// NO_HALF: define noundef float @"?test_log2_half
// NO_HALF: call float @llvm.log2.f32(
-half test_log2_half ( half p0 ) {
- return log2 ( p0 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.log2.v2f16
-// NO_HALF: define noundef <2 x float> @"?test_log2_float2@@YAT?$__vector@M$01@__clang@@T12@@Z"(
+half test_log2_half(half p0) { return log2(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.log2.v2f16
+// NO_HALF: define noundef <2 x float> @"?test_log2_half2
// NO_HALF: call <2 x float> @llvm.log2.v2f32(
-half2 test_log2_half2 ( half2 p0 ) {
- return log2 ( p0 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.log2.v3f16
-// NO_HALF: define noundef <3 x float> @"?test_log2_float3@@YAT?$__vector@M$02@__clang@@T12@@Z"(
+half2 test_log2_half2(half2 p0) { return log2(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.log2.v3f16
+// NO_HALF: define noundef <3 x float> @"?test_log2_half3
// NO_HALF: call <3 x float> @llvm.log2.v3f32(
-half3 test_log2_half3 ( half3 p0 ) {
- return log2 ( p0 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.log2.v4f16
-// NO_HALF: define noundef <4 x float> @"?test_log2_float4@@YAT?$__vector@M$03@__clang@@T12@@Z"(
+half3 test_log2_half3(half3 p0) { return log2(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.log2.v4f16
+// NO_HALF: define noundef <4 x float> @"?test_log2_half4
// NO_HALF: call <4 x float> @llvm.log2.v4f32(
-half4 test_log2_half4 ( half4 p0 ) {
- return log2 ( p0 );
-}
+half4 test_log2_half4(half4 p0) { return log2(p0); }
-// CHECK: define noundef float @
+// CHECK: define noundef float @"?test_log2_float
// CHECK: call float @llvm.log2.f32(
-float test_log2_float ( float p0 ) {
- return log2 ( p0 );
-}
-// CHECK: define noundef <2 x float> @
+float test_log2_float(float p0) { return log2(p0); }
+// CHECK: define noundef <2 x float> @"?test_log2_float2
// CHECK: call <2 x float> @llvm.log2.v2f32
-float2 test_log2_float2 ( float2 p0 ) {
- return log2 ( p0 );
-}
-// CHECK: define noundef <3 x float> @
+float2 test_log2_float2(float2 p0) { return log2(p0); }
+// CHECK: define noundef <3 x float> @"?test_log2_float3
// CHECK: call <3 x float> @llvm.log2.v3f32
-float3 test_log2_float3 ( float3 p0 ) {
- return log2 ( p0 );
-}
-// CHECK: define noundef <4 x float> @
+float3 test_log2_float3(float3 p0) { return log2(p0); }
+// CHECK: define noundef <4 x float> @"?test_log2_float4
// CHECK: call <4 x float> @llvm.log2.v4f32
-float4 test_log2_float4 ( float4 p0 ) {
- return log2 ( p0 );
-}
+float4 test_log2_float4(float4 p0) { return log2(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/max.hlsl b/clang/test/CodeGenHLSL/builtins/max.hlsl
index d8879c3332fb..272d1e8a10bd 100644
--- a/clang/test/CodeGenHLSL/builtins/max.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/max.hlsl
@@ -1,206 +1,134 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
#ifdef __HLSL_ENABLE_16_BIT
-// CHECK: define noundef i16 @
-// CHECK: call i16 @llvm.smax.i16(
-int16_t test_max_short ( int16_t p0, int16_t p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <2 x i16> @
-// CHECK: call <2 x i16> @llvm.smax.v2i16(
-int16_t2 test_max_short2 ( int16_t2 p0, int16_t2 p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <3 x i16> @
-// CHECK: call <3 x i16> @llvm.smax.v3i16
-int16_t3 test_max_short3 ( int16_t3 p0, int16_t3 p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <4 x i16> @
-// CHECK: call <4 x i16> @llvm.smax.v4i16
-int16_t4 test_max_short4 ( int16_t4 p0, int16_t4 p1 ) {
- return max ( p0, p1 );
-}
+// NATIVE_HALF: define noundef i16 @
+// NATIVE_HALF: call i16 @llvm.smax.i16(
+int16_t test_max_short(int16_t p0, int16_t p1) { return max(p0, p1); }
+// NATIVE_HALF: define noundef <2 x i16> @
+// NATIVE_HALF: call <2 x i16> @llvm.smax.v2i16(
+int16_t2 test_max_short2(int16_t2 p0, int16_t2 p1) { return max(p0, p1); }
+// NATIVE_HALF: define noundef <3 x i16> @
+// NATIVE_HALF: call <3 x i16> @llvm.smax.v3i16
+int16_t3 test_max_short3(int16_t3 p0, int16_t3 p1) { return max(p0, p1); }
+// NATIVE_HALF: define noundef <4 x i16> @
+// NATIVE_HALF: call <4 x i16> @llvm.smax.v4i16
+int16_t4 test_max_short4(int16_t4 p0, int16_t4 p1) { return max(p0, p1); }
-// CHECK: define noundef i16 @
-// CHECK: call i16 @llvm.umax.i16(
-uint16_t test_max_ushort ( uint16_t p0, uint16_t p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <2 x i16> @
-// CHECK: call <2 x i16> @llvm.umax.v2i16
-uint16_t2 test_max_ushort2 ( uint16_t2 p0, uint16_t2 p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <3 x i16> @
-// CHECK: call <3 x i16> @llvm.umax.v3i16
-uint16_t3 test_max_ushort3 ( uint16_t3 p0, uint16_t3 p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <4 x i16> @
-// CHECK: call <4 x i16> @llvm.umax.v4i16
-uint16_t4 test_max_ushort4 ( uint16_t4 p0, uint16_t4 p1 ) {
- return max ( p0, p1 );
-}
+// NATIVE_HALF: define noundef i16 @
+// NATIVE_HALF: call i16 @llvm.umax.i16(
+uint16_t test_max_ushort(uint16_t p0, uint16_t p1) { return max(p0, p1); }
+// NATIVE_HALF: define noundef <2 x i16> @
+// NATIVE_HALF: call <2 x i16> @llvm.umax.v2i16
+uint16_t2 test_max_ushort2(uint16_t2 p0, uint16_t2 p1) { return max(p0, p1); }
+// NATIVE_HALF: define noundef <3 x i16> @
+// NATIVE_HALF: call <3 x i16> @llvm.umax.v3i16
+uint16_t3 test_max_ushort3(uint16_t3 p0, uint16_t3 p1) { return max(p0, p1); }
+// NATIVE_HALF: define noundef <4 x i16> @
+// NATIVE_HALF: call <4 x i16> @llvm.umax.v4i16
+uint16_t4 test_max_ushort4(uint16_t4 p0, uint16_t4 p1) { return max(p0, p1); }
#endif
// CHECK: define noundef i32 @
// CHECK: call i32 @llvm.smax.i32(
-int test_max_int ( int p0, int p1 ) {
- return max ( p0, p1 );
-}
+int test_max_int(int p0, int p1) { return max(p0, p1); }
// CHECK: define noundef <2 x i32> @
// CHECK: call <2 x i32> @llvm.smax.v2i32
-int2 test_max_int2 ( int2 p0, int2 p1 ) {
- return max ( p0, p1 );
-}
+int2 test_max_int2(int2 p0, int2 p1) { return max(p0, p1); }
// CHECK: define noundef <3 x i32> @
// CHECK: call <3 x i32> @llvm.smax.v3i32
-int3 test_max_int3 ( int3 p0, int3 p1 ) {
- return max ( p0, p1 );
-}
+int3 test_max_int3(int3 p0, int3 p1) { return max(p0, p1); }
// CHECK: define noundef <4 x i32> @
// CHECK: call <4 x i32> @llvm.smax.v4i32
-int4 test_max_int4 ( int4 p0, int4 p1) {
- return max ( p0, p1 );
-}
+int4 test_max_int4(int4 p0, int4 p1) { return max(p0, p1); }
// CHECK: define noundef i32 @
// CHECK: call i32 @llvm.umax.i32(
-int test_max_uint ( uint p0, uint p1 ) {
- return max ( p0, p1 );
-}
+int test_max_uint(uint p0, uint p1) { return max(p0, p1); }
// CHECK: define noundef <2 x i32> @
// CHECK: call <2 x i32> @llvm.umax.v2i32
-uint2 test_max_uint2 ( uint2 p0, uint2 p1 ) {
- return max ( p0, p1 );
-}
+uint2 test_max_uint2(uint2 p0, uint2 p1) { return max(p0, p1); }
// CHECK: define noundef <3 x i32> @
// CHECK: call <3 x i32> @llvm.umax.v3i32
-uint3 test_max_uint3 ( uint3 p0, uint3 p1 ) {
- return max ( p0, p1 );
-}
+uint3 test_max_uint3(uint3 p0, uint3 p1) { return max(p0, p1); }
// CHECK: define noundef <4 x i32> @
// CHECK: call <4 x i32> @llvm.umax.v4i32
-uint4 test_max_uint4 ( uint4 p0, uint4 p1) {
- return max ( p0, p1 );
-}
+uint4 test_max_uint4(uint4 p0, uint4 p1) { return max(p0, p1); }
// CHECK: define noundef i64 @
// CHECK: call i64 @llvm.smax.i64(
-int64_t test_max_long ( int64_t p0, int64_t p1 ) {
- return max ( p0, p1 );
-}
+int64_t test_max_long(int64_t p0, int64_t p1) { return max(p0, p1); }
// CHECK: define noundef <2 x i64> @
// CHECK: call <2 x i64> @llvm.smax.v2i64
-int64_t2 test_max_long2 ( int64_t2 p0, int64_t2 p1 ) {
- return max ( p0, p1 );
-}
+int64_t2 test_max_long2(int64_t2 p0, int64_t2 p1) { return max(p0, p1); }
// CHECK: define noundef <3 x i64> @
// CHECK: call <3 x i64> @llvm.smax.v3i64
-int64_t3 test_max_long3 ( int64_t3 p0, int64_t3 p1 ) {
- return max ( p0, p1 );
-}
+int64_t3 test_max_long3(int64_t3 p0, int64_t3 p1) { return max(p0, p1); }
// CHECK: define noundef <4 x i64> @
// CHECK: call <4 x i64> @llvm.smax.v4i64
-int64_t4 test_max_long4 ( int64_t4 p0, int64_t4 p1) {
- return max ( p0, p1 );
-}
+int64_t4 test_max_long4(int64_t4 p0, int64_t4 p1) { return max(p0, p1); }
// CHECK: define noundef i64 @
// CHECK: call i64 @llvm.umax.i64(
-uint64_t test_max_long ( uint64_t p0, uint64_t p1 ) {
- return max ( p0, p1 );
-}
+uint64_t test_max_long(uint64_t p0, uint64_t p1) { return max(p0, p1); }
// CHECK: define noundef <2 x i64> @
// CHECK: call <2 x i64> @llvm.umax.v2i64
-uint64_t2 test_max_long2 ( uint64_t2 p0, uint64_t2 p1 ) {
- return max ( p0, p1 );
-}
+uint64_t2 test_max_long2(uint64_t2 p0, uint64_t2 p1) { return max(p0, p1); }
// CHECK: define noundef <3 x i64> @
// CHECK: call <3 x i64> @llvm.umax.v3i64
-uint64_t3 test_max_long3 ( uint64_t3 p0, uint64_t3 p1 ) {
- return max ( p0, p1 );
-}
+uint64_t3 test_max_long3(uint64_t3 p0, uint64_t3 p1) { return max(p0, p1); }
// CHECK: define noundef <4 x i64> @
// CHECK: call <4 x i64> @llvm.umax.v4i64
-uint64_t4 test_max_long4 ( uint64_t4 p0, uint64_t4 p1) {
- return max ( p0, p1 );
-}
+uint64_t4 test_max_long4(uint64_t4 p0, uint64_t4 p1) { return max(p0, p1); }
-
-// CHECK: define noundef half @
-// CHECK: call half @llvm.maxnum.f16(
-// NO_HALF: define noundef float @"?test_max_half@@YA$halff@$halff@0@Z"(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.maxnum.f16(
+// NO_HALF: define noundef float @"?test_max_half
// NO_HALF: call float @llvm.maxnum.f32(
-half test_max_half ( half p0, half p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.maxnum.v2f16
-// NO_HALF: define noundef <2 x float> @"?test_max_float2@@YAT?$__vector@M$01@__clang@@T12@0@Z"(
+half test_max_half(half p0, half p1) { return max(p0, p1); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.maxnum.v2f16
+// NO_HALF: define noundef <2 x float> @"?test_max_half2
// NO_HALF: call <2 x float> @llvm.maxnum.v2f32(
-half2 test_max_half2 ( half2 p0, half2 p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.maxnum.v3f16
-// NO_HALF: define noundef <3 x float> @"?test_max_float3@@YAT?$__vector@M$02@__clang@@T12@0@Z"(
+half2 test_max_half2(half2 p0, half2 p1) { return max(p0, p1); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.maxnum.v3f16
+// NO_HALF: define noundef <3 x float> @"?test_max_half3
// NO_HALF: call <3 x float> @llvm.maxnum.v3f32(
-half3 test_max_half3 ( half3 p0, half3 p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.maxnum.v4f16
-// NO_HALF: define noundef <4 x float> @"?test_max_float4@@YAT?$__vector@M$03@__clang@@T12@0@Z"(
+half3 test_max_half3(half3 p0, half3 p1) { return max(p0, p1); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.maxnum.v4f16
+// NO_HALF: define noundef <4 x float> @"?test_max_half4
// NO_HALF: call <4 x float> @llvm.maxnum.v4f32(
-half4 test_max_half4 ( half4 p0, half4 p1 ) {
- return max ( p0, p1 );
-}
+half4 test_max_half4(half4 p0, half4 p1) { return max(p0, p1); }
-// CHECK: define noundef float @
+// CHECK: define noundef float @"?test_max_float
// CHECK: call float @llvm.maxnum.f32(
-float test_max_float ( float p0, float p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <2 x float> @
+float test_max_float(float p0, float p1) { return max(p0, p1); }
+// CHECK: define noundef <2 x float> @"?test_max_float2
// CHECK: call <2 x float> @llvm.maxnum.v2f32
-float2 test_max_float2 ( float2 p0, float2 p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <3 x float> @
+float2 test_max_float2(float2 p0, float2 p1) { return max(p0, p1); }
+// CHECK: define noundef <3 x float> @"?test_max_float3
// CHECK: call <3 x float> @llvm.maxnum.v3f32
-float3 test_max_float3 ( float3 p0, float3 p1 ) {
- return max ( p0, p1 );
-}
-// CHECK: define noundef <4 x float> @
+float3 test_max_float3(float3 p0, float3 p1) { return max(p0, p1); }
+// CHECK: define noundef <4 x float> @"?test_max_float4
// CHECK: call <4 x float> @llvm.maxnum.v4f32
-float4 test_max_float4 ( float4 p0, float4 p1) {
- return max ( p0, p1 );
-}
+float4 test_max_float4(float4 p0, float4 p1) { return max(p0, p1); }
// CHECK: define noundef double @
// CHECK: call double @llvm.maxnum.f64(
-double test_max_double ( double p0, double p1 ) {
- return max ( p0, p1 );
-}
+double test_max_double(double p0, double p1) { return max(p0, p1); }
// CHECK: define noundef <2 x double> @
// CHECK: call <2 x double> @llvm.maxnum.v2f64
-double2 test_max_double2 ( double2 p0, double2 p1 ) {
- return max ( p0, p1 );
-}
+double2 test_max_double2(double2 p0, double2 p1) { return max(p0, p1); }
// CHECK: define noundef <3 x double> @
// CHECK: call <3 x double> @llvm.maxnum.v3f64
-double3 test_max_double3 ( double3 p0, double3 p1 ) {
- return max ( p0, p1 );
-}
+double3 test_max_double3(double3 p0, double3 p1) { return max(p0, p1); }
// CHECK: define noundef <4 x double> @
// CHECK: call <4 x double> @llvm.maxnum.v4f64
-double4 test_max_double4 ( double4 p0, double4 p1) {
- return max ( p0, p1 );
-}
+double4 test_max_double4(double4 p0, double4 p1) { return max(p0, p1); }
diff --git a/clang/test/CodeGenHLSL/builtins/min.hlsl b/clang/test/CodeGenHLSL/builtins/min.hlsl
index 743053cbdd26..a0c233dac4d5 100644
--- a/clang/test/CodeGenHLSL/builtins/min.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/min.hlsl
@@ -1,207 +1,134 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
#ifdef __HLSL_ENABLE_16_BIT
-// CHECK: define noundef i16 @
-// CHECK: call i16 @llvm.smin.i16(
-int16_t test_min_short ( int16_t p0, int16_t p1 ) {
- return min ( p0, p1 );
-}
-// CHECK: define noundef <2 x i16> @
-// CHECK: call <2 x i16> @llvm.smin.v2i16(
-int16_t2 test_min_short2 ( int16_t2 p0, int16_t2 p1 ) {
- return min ( p0, p1 );
-}
-// CHECK: define noundef <3 x i16> @
-// CHECK: call <3 x i16> @llvm.smin.v3i16
-int16_t3 test_min_short3 ( int16_t3 p0, int16_t3 p1 ) {
- return min ( p0, p1 );
-}
-// CHECK: define noundef <4 x i16> @
-// CHECK: call <4 x i16> @llvm.smin.v4i16
-int16_t4 test_min_short4 ( int16_t4 p0, int16_t4 p1 ) {
- return min ( p0, p1 );
-}
+// NATIVE_HALF: define noundef i16 @
+// NATIVE_HALF: call i16 @llvm.smin.i16(
+int16_t test_min_short(int16_t p0, int16_t p1) { return min(p0, p1); }
+// NATIVE_HALF: define noundef <2 x i16> @
+// NATIVE_HALF: call <2 x i16> @llvm.smin.v2i16(
+int16_t2 test_min_short2(int16_t2 p0, int16_t2 p1) { return min(p0, p1); }
+// NATIVE_HALF: define noundef <3 x i16> @
+// NATIVE_HALF: call <3 x i16> @llvm.smin.v3i16
+int16_t3 test_min_short3(int16_t3 p0, int16_t3 p1) { return min(p0, p1); }
+// NATIVE_HALF: define noundef <4 x i16> @
+// NATIVE_HALF: call <4 x i16> @llvm.smin.v4i16
+int16_t4 test_min_short4(int16_t4 p0, int16_t4 p1) { return min(p0, p1); }
-
-// CHECK: define noundef i16 @
-// CHECK: call i16 @llvm.umin.i16(
-uint16_t test_min_ushort ( uint16_t p0, uint16_t p1 ) {
- return min ( p0, p1 );
-}
-// CHECK: define noundef <2 x i16> @
-// CHECK: call <2 x i16> @llvm.umin.v2i16
-uint16_t2 test_min_ushort2 ( uint16_t2 p0, uint16_t2 p1 ) {
- return min ( p0, p1 );
-}
-// CHECK: define noundef <3 x i16> @
-// CHECK: call <3 x i16> @llvm.umin.v3i16
-uint16_t3 test_min_ushort3 ( uint16_t3 p0, uint16_t3 p1 ) {
- return min ( p0, p1 );
-}
-// CHECK: define noundef <4 x i16> @
-// CHECK: call <4 x i16> @llvm.umin.v4i16
-uint16_t4 test_min_ushort4 ( uint16_t4 p0, uint16_t4 p1 ) {
- return min ( p0, p1 );
-}
+// NATIVE_HALF: define noundef i16 @
+// NATIVE_HALF: call i16 @llvm.umin.i16(
+uint16_t test_min_ushort(uint16_t p0, uint16_t p1) { return min(p0, p1); }
+// NATIVE_HALF: define noundef <2 x i16> @
+// NATIVE_HALF: call <2 x i16> @llvm.umin.v2i16
+uint16_t2 test_min_ushort2(uint16_t2 p0, uint16_t2 p1) { return min(p0, p1); }
+// NATIVE_HALF: define noundef <3 x i16> @
+// NATIVE_HALF: call <3 x i16> @llvm.umin.v3i16
+uint16_t3 test_min_ushort3(uint16_t3 p0, uint16_t3 p1) { return min(p0, p1); }
+// NATIVE_HALF: define noundef <4 x i16> @
+// NATIVE_HALF: call <4 x i16> @llvm.umin.v4i16
+uint16_t4 test_min_ushort4(uint16_t4 p0, uint16_t4 p1) { return min(p0, p1); }
#endif
// CHECK: define noundef i32 @
// CHECK: call i32 @llvm.smin.i32(
-int test_min_int ( int p0, int p1 ) {
- return min ( p0, p1 );
-}
+int test_min_int(int p0, int p1) { return min(p0, p1); }
// CHECK: define noundef <2 x i32> @
// CHECK: call <2 x i32> @llvm.smin.v2i32
-int2 test_min_int2 ( int2 p0, int2 p1 ) {
- return min ( p0, p1 );
-}
+int2 test_min_int2(int2 p0, int2 p1) { return min(p0, p1); }
// CHECK: define noundef <3 x i32> @
// CHECK: call <3 x i32> @llvm.smin.v3i32
-int3 test_min_int3 ( int3 p0, int3 p1 ) {
- return min ( p0, p1 );
-}
+int3 test_min_int3(int3 p0, int3 p1) { return min(p0, p1); }
// CHECK: define noundef <4 x i32> @
// CHECK: call <4 x i32> @llvm.smin.v4i32
-int4 test_min_int4 ( int4 p0, int4 p1) {
- return min ( p0, p1 );
-}
+int4 test_min_int4(int4 p0, int4 p1) { return min(p0, p1); }
// CHECK: define noundef i32 @
// CHECK: call i32 @llvm.umin.i32(
-int test_min_uint ( uint p0, uint p1 ) {
- return min ( p0, p1 );
-}
+int test_min_uint(uint p0, uint p1) { return min(p0, p1); }
// CHECK: define noundef <2 x i32> @
// CHECK: call <2 x i32> @llvm.umin.v2i32
-uint2 test_min_uint2 ( uint2 p0, uint2 p1 ) {
- return min ( p0, p1 );
-}
+uint2 test_min_uint2(uint2 p0, uint2 p1) { return min(p0, p1); }
// CHECK: define noundef <3 x i32> @
// CHECK: call <3 x i32> @llvm.umin.v3i32
-uint3 test_min_uint3 ( uint3 p0, uint3 p1 ) {
- return min ( p0, p1 );
-}
+uint3 test_min_uint3(uint3 p0, uint3 p1) { return min(p0, p1); }
// CHECK: define noundef <4 x i32> @
// CHECK: call <4 x i32> @llvm.umin.v4i32
-uint4 test_min_uint4 ( uint4 p0, uint4 p1) {
- return min ( p0, p1 );
-}
+uint4 test_min_uint4(uint4 p0, uint4 p1) { return min(p0, p1); }
// CHECK: define noundef i64 @
// CHECK: call i64 @llvm.smin.i64(
-int64_t test_min_long ( int64_t p0, int64_t p1 ) {
- return min ( p0, p1 );
-}
+int64_t test_min_long(int64_t p0, int64_t p1) { return min(p0, p1); }
// CHECK: define noundef <2 x i64> @
// CHECK: call <2 x i64> @llvm.smin.v2i64
-int64_t2 test_min_long2 ( int64_t2 p0, int64_t2 p1 ) {
- return min ( p0, p1 );
-}
+int64_t2 test_min_long2(int64_t2 p0, int64_t2 p1) { return min(p0, p1); }
// CHECK: define noundef <3 x i64> @
// CHECK: call <3 x i64> @llvm.smin.v3i64
-int64_t3 test_min_long3 ( int64_t3 p0, int64_t3 p1 ) {
- return min ( p0, p1 );
-}
+int64_t3 test_min_long3(int64_t3 p0, int64_t3 p1) { return min(p0, p1); }
// CHECK: define noundef <4 x i64> @
// CHECK: call <4 x i64> @llvm.smin.v4i64
-int64_t4 test_min_long4 ( int64_t4 p0, int64_t4 p1) {
- return min ( p0, p1 );
-}
+int64_t4 test_min_long4(int64_t4 p0, int64_t4 p1) { return min(p0, p1); }
// CHECK: define noundef i64 @
// CHECK: call i64 @llvm.umin.i64(
-uint64_t test_min_long ( uint64_t p0, uint64_t p1 ) {
- return min ( p0, p1 );
-}
+uint64_t test_min_long(uint64_t p0, uint64_t p1) { return min(p0, p1); }
// CHECK: define noundef <2 x i64> @
// CHECK: call <2 x i64> @llvm.umin.v2i64
-uint64_t2 test_min_long2 ( uint64_t2 p0, uint64_t2 p1 ) {
- return min ( p0, p1 );
-}
+uint64_t2 test_min_long2(uint64_t2 p0, uint64_t2 p1) { return min(p0, p1); }
// CHECK: define noundef <3 x i64> @
// CHECK: call <3 x i64> @llvm.umin.v3i64
-uint64_t3 test_min_long3 ( uint64_t3 p0, uint64_t3 p1 ) {
- return min ( p0, p1 );
-}
+uint64_t3 test_min_long3(uint64_t3 p0, uint64_t3 p1) { return min(p0, p1); }
// CHECK: define noundef <4 x i64> @
// CHECK: call <4 x i64> @llvm.umin.v4i64
-uint64_t4 test_min_long4 ( uint64_t4 p0, uint64_t4 p1) {
- return min ( p0, p1 );
-}
-
+uint64_t4 test_min_long4(uint64_t4 p0, uint64_t4 p1) { return min(p0, p1); }
-// CHECK: define noundef half @
-// CHECK: call half @llvm.minnum.f16(
-// NO_HALF: define noundef float @"?test_min_half@@YA$halff@$halff@0@Z"(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.minnum.f16(
+// NO_HALF: define noundef float @"?test_min_half
// NO_HALF: call float @llvm.minnum.f32(
-half test_min_half ( half p0, half p1 ) {
- return min ( p0, p1 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.minnum.v2f16
-// NO_HALF: define noundef <2 x float> @"?test_min_float2@@YAT?$__vector@M$01@__clang@@T12@0@Z"(
+half test_min_half(half p0, half p1) { return min(p0, p1); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.minnum.v2f16
+// NO_HALF: define noundef <2 x float> @"?test_min_half2
// NO_HALF: call <2 x float> @llvm.minnum.v2f32(
-half2 test_min_half2 ( half2 p0, half2 p1 ) {
- return min ( p0, p1 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.minnum.v3f16
-// NO_HALF: define noundef <3 x float> @"?test_min_float3@@YAT?$__vector@M$02@__clang@@T12@0@Z"(
+half2 test_min_half2(half2 p0, half2 p1) { return min(p0, p1); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.minnum.v3f16
+// NO_HALF: define noundef <3 x float> @"?test_min_half3
// NO_HALF: call <3 x float> @llvm.minnum.v3f32(
-half3 test_min_half3 ( half3 p0, half3 p1 ) {
- return min ( p0, p1 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.minnum.v4f16
-// NO_HALF: define noundef <4 x float> @"?test_min_float4@@YAT?$__vector@M$03@__clang@@T12@0@Z"(
+half3 test_min_half3(half3 p0, half3 p1) { return min(p0, p1); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.minnum.v4f16
+// NO_HALF: define noundef <4 x float> @"?test_min_half4
// NO_HALF: call <4 x float> @llvm.minnum.v4f32(
-half4 test_min_half4 ( half4 p0, half4 p1 ) {
- return min ( p0, p1 );
-}
+half4 test_min_half4(half4 p0, half4 p1) { return min(p0, p1); }
// CHECK: define noundef float @
// CHECK: call float @llvm.minnum.f32(
-float test_min_float ( float p0, float p1 ) {
- return min ( p0, p1 );
-}
+float test_min_float(float p0, float p1) { return min(p0, p1); }
// CHECK: define noundef <2 x float> @
// CHECK: call <2 x float> @llvm.minnum.v2f32
-float2 test_min_float2 ( float2 p0, float2 p1 ) {
- return min ( p0, p1 );
-}
+float2 test_min_float2(float2 p0, float2 p1) { return min(p0, p1); }
// CHECK: define noundef <3 x float> @
// CHECK: call <3 x float> @llvm.minnum.v3f32
-float3 test_min_float3 ( float3 p0, float3 p1 ) {
- return min ( p0, p1 );
-}
+float3 test_min_float3(float3 p0, float3 p1) { return min(p0, p1); }
// CHECK: define noundef <4 x float> @
// CHECK: call <4 x float> @llvm.minnum.v4f32
-float4 test_min_float4 ( float4 p0, float4 p1) {
- return min ( p0, p1 );
-}
+float4 test_min_float4(float4 p0, float4 p1) { return min(p0, p1); }
// CHECK: define noundef double @
// CHECK: call double @llvm.minnum.f64(
-double test_min_double ( double p0, double p1 ) {
- return min ( p0, p1 );
-}
+double test_min_double(double p0, double p1) { return min(p0, p1); }
// CHECK: define noundef <2 x double> @
// CHECK: call <2 x double> @llvm.minnum.v2f64
-double2 test_min_double2 ( double2 p0, double2 p1 ) {
- return min ( p0, p1 );
-}
+double2 test_min_double2(double2 p0, double2 p1) { return min(p0, p1); }
// CHECK: define noundef <3 x double> @
// CHECK: call <3 x double> @llvm.minnum.v3f64
-double3 test_min_double3 ( double3 p0, double3 p1 ) {
- return min ( p0, p1 );
-}
+double3 test_min_double3(double3 p0, double3 p1) { return min(p0, p1); }
// CHECK: define noundef <4 x double> @
// CHECK: call <4 x double> @llvm.minnum.v4f64
-double4 test_min_double4 ( double4 p0, double4 p1) {
- return min ( p0, p1 );
-}
+double4 test_min_double4(double4 p0, double4 p1) { return min(p0, p1); }
diff --git a/clang/test/CodeGenHLSL/builtins/pow.hlsl b/clang/test/CodeGenHLSL/builtins/pow.hlsl
index 86bfe98058a6..e996ca2f3364 100644
--- a/clang/test/CodeGenHLSL/builtins/pow.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/pow.hlsl
@@ -1,89 +1,54 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
-// CHECK: define noundef half @
-// CHECK: call half @llvm.pow.f16(
-// NO_HALF: define noundef float @"?test_pow_half@@YA$halff@$halff@0@Z"(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.pow.f16(
+// NO_HALF: define noundef float @"?test_pow_half
// NO_HALF: call float @llvm.pow.f32(
-half test_pow_half(half p0, half p1)
-{
- return pow(p0, p1);
-}
-// CHECK: define noundef <2 x half> @"?test_pow_half2@@YAT?$__vector@$f16@$01@__clang@@T12@0@Z"(
-// CHECK: call <2 x half> @llvm.pow.v2f16
-// NO_HALF: define noundef <2 x float> @"?test_pow_float2@@YAT?$__vector@M$01@__clang@@T12@0@Z"(
+half test_pow_half(half p0, half p1) { return pow(p0, p1); }
+// NATIVE_HALF: define noundef <2 x half> @"?test_pow_half2
+// NATIVE_HALF: call <2 x half> @llvm.pow.v2f16
+// NO_HALF: define noundef <2 x float> @"?test_pow_half2
// NO_HALF: call <2 x float> @llvm.pow.v2f32(
-half2 test_pow_half2(half2 p0, half2 p1)
-{
- return pow(p0, p1);
-}
-// CHECK: define noundef <3 x half> @"?test_pow_half3@@YAT?$__vector@$f16@$02@__clang@@T12@0@Z"(
-// CHECK: call <3 x half> @llvm.pow.v3f16
-// NO_HALF: define noundef <3 x float> @"?test_pow_float3@@YAT?$__vector@M$02@__clang@@T12@0@Z"(
+half2 test_pow_half2(half2 p0, half2 p1) { return pow(p0, p1); }
+// NATIVE_HALF: define noundef <3 x half> @"?test_pow_half3
+// NATIVE_HALF: call <3 x half> @llvm.pow.v3f16
+// NO_HALF: define noundef <3 x float> @"?test_pow_half3
// NO_HALF: call <3 x float> @llvm.pow.v3f32(
-half3 test_pow_half3(half3 p0, half3 p1)
-{
- return pow(p0, p1);
-}
-// CHECK: define noundef <4 x half> @"?test_pow_half4@@YAT?$__vector@$f16@$03@__clang@@T12@0@Z"(
-// CHECK: call <4 x half> @llvm.pow.v4f16
-// NO_HALF: define noundef <4 x float> @"?test_pow_float4@@YAT?$__vector@M$03@__clang@@T12@0@Z"(
+half3 test_pow_half3(half3 p0, half3 p1) { return pow(p0, p1); }
+// NATIVE_HALF: define noundef <4 x half> @"?test_pow_half4
+// NATIVE_HALF: call <4 x half> @llvm.pow.v4f16
+// NO_HALF: define noundef <4 x float> @"?test_pow_half4
// NO_HALF: call <4 x float> @llvm.pow.v4f32(
-half4 test_pow_half4(half4 p0, half4 p1)
-{
- return pow(p0, p1);
-}
+half4 test_pow_half4(half4 p0, half4 p1) { return pow(p0, p1); }
-// CHECK: define noundef float @"?test_pow_float@@YAMMM@Z"(
+// CHECK: define noundef float @"?test_pow_float
// CHECK: call float @llvm.pow.f32(
-float test_pow_float(float p0, float p1)
-{
- return pow(p0, p1);
-}
-// CHECK: define noundef <2 x float> @"?test_pow_float2@@YAT?$__vector@M$01@__clang@@T12@0@Z"(
+float test_pow_float(float p0, float p1) { return pow(p0, p1); }
+// CHECK: define noundef <2 x float> @"?test_pow_float2
// CHECK: call <2 x float> @llvm.pow.v2f32
-float2 test_pow_float2(float2 p0, float2 p1)
-{
- return pow(p0, p1);
-}
-// CHECK: define noundef <3 x float> @"?test_pow_float3@@YAT?$__vector@M$02@__clang@@T12@0@Z"(
+float2 test_pow_float2(float2 p0, float2 p1) { return pow(p0, p1); }
+// CHECK: define noundef <3 x float> @"?test_pow_float3
// CHECK: call <3 x float> @llvm.pow.v3f32
-float3 test_pow_float3(float3 p0, float3 p1)
-{
- return pow(p0, p1);
-}
-// CHECK: define noundef <4 x float> @"?test_pow_float4@@YAT?$__vector@M$03@__clang@@T12@0@Z"(
+float3 test_pow_float3(float3 p0, float3 p1) { return pow(p0, p1); }
+// CHECK: define noundef <4 x float> @"?test_pow_float4
// CHECK: call <4 x float> @llvm.pow.v4f32
-float4 test_pow_float4(float4 p0, float4 p1)
-{
- return pow(p0, p1);
-}
+float4 test_pow_float4(float4 p0, float4 p1) { return pow(p0, p1); }
// CHECK: define noundef double @"?test_pow_double@@YANNN@Z"(
// CHECK: call double @llvm.pow.f64(
-double test_pow_double(double p0, double p1)
-{
- return pow(p0, p1);
-}
+double test_pow_double(double p0, double p1) { return pow(p0, p1); }
// CHECK: define noundef <2 x double> @"?test_pow_double2@@YAT?$__vector@N$01@__clang@@T12@0@Z"(
// CHECK: call <2 x double> @llvm.pow.v2f64
-double2 test_pow_double2(double2 p0, double2 p1)
-{
- return pow(p0, p1);
-}
+double2 test_pow_double2(double2 p0, double2 p1) { return pow(p0, p1); }
// CHECK: define noundef <3 x double> @"?test_pow_double3@@YAT?$__vector@N$02@__clang@@T12@0@Z"(
// CHECK: call <3 x double> @llvm.pow.v3f64
-double3 test_pow_double3(double3 p0, double3 p1)
-{
- return pow(p0, p1);
-}
+double3 test_pow_double3(double3 p0, double3 p1) { return pow(p0, p1); }
// CHECK: define noundef <4 x double> @"?test_pow_double4@@YAT?$__vector@N$03@__clang@@T12@0@Z"(
// CHECK: call <4 x double> @llvm.pow.v4f64
-double4 test_pow_double4(double4 p0, double4 p1)
-{
- return pow(p0, p1);
-}
+double4 test_pow_double4(double4 p0, double4 p1) { return pow(p0, p1); }
diff --git a/clang/test/CodeGenHLSL/builtins/round.hlsl b/clang/test/CodeGenHLSL/builtins/round.hlsl
new file mode 100644
index 000000000000..b9f35bd3712d
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/round.hlsl
@@ -0,0 +1,53 @@
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
+
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: %elt.round = call half @llvm.round.f16(
+// NATIVE_HALF: ret half %elt.round
+// NO_HALF: define noundef float @"?test_round_half@@YA$halff@$halff@@Z"(
+// NO_HALF: %elt.round = call float @llvm.round.f32(
+// NO_HALF: ret float %elt.round
+half test_round_half(half p0) { return round(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: %elt.round = call <2 x half> @llvm.round.v2f16
+// NATIVE_HALF: ret <2 x half> %elt.round
+// NO_HALF: define noundef <2 x float> @
+// NO_HALF: %elt.round = call <2 x float> @llvm.round.v2f32(
+// NO_HALF: ret <2 x float> %elt.round
+half2 test_round_half2(half2 p0) { return round(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: %elt.round = call <3 x half> @llvm.round.v3f16
+// NATIVE_HALF: ret <3 x half> %elt.round
+// NO_HALF: define noundef <3 x float> @
+// NO_HALF: %elt.round = call <3 x float> @llvm.round.v3f32(
+// NO_HALF: ret <3 x float> %elt.round
+half3 test_round_half3(half3 p0) { return round(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: %elt.round = call <4 x half> @llvm.round.v4f16
+// NATIVE_HALF: ret <4 x half> %elt.round
+// NO_HALF: define noundef <4 x float> @
+// NO_HALF: %elt.round = call <4 x float> @llvm.round.v4f32(
+// NO_HALF: ret <4 x float> %elt.round
+half4 test_round_half4(half4 p0) { return round(p0); }
+
+// CHECK: define noundef float @
+// CHECK: %elt.round = call float @llvm.round.f32(
+// CHECK: ret float %elt.round
+float test_round_float(float p0) { return round(p0); }
+// CHECK: define noundef <2 x float> @
+// CHECK: %elt.round = call <2 x float> @llvm.round.v2f32
+// CHECK: ret <2 x float> %elt.round
+float2 test_round_float2(float2 p0) { return round(p0); }
+// CHECK: define noundef <3 x float> @
+// CHECK: %elt.round = call <3 x float> @llvm.round.v3f32
+// CHECK: ret <3 x float> %elt.round
+float3 test_round_float3(float3 p0) { return round(p0); }
+// CHECK: define noundef <4 x float> @
+// CHECK: %elt.round = call <4 x float> @llvm.round.v4f32
+// CHECK: ret <4 x float> %elt.round
+float4 test_round_float4(float4 p0) { return round(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/sin.hlsl b/clang/test/CodeGenHLSL/builtins/sin.hlsl
index 2445e6063a70..ffb522149138 100644
--- a/clang/test/CodeGenHLSL/builtins/sin.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/sin.hlsl
@@ -1,56 +1,41 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
-// CHECK: define noundef half @
-// CHECK: call half @llvm.sin.f16(
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: call half @llvm.sin.f16(
// NO_HALF: define noundef float @"?test_sin_half@@YA$halff@$halff@@Z"(
// NO_HALF: call float @llvm.sin.f32(
-half test_sin_half ( half p0 ) {
- return sin ( p0 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.sin.v2f16
-// NO_HALF: define noundef <2 x float> @"?test_sin_float2@@YAT?$__vector@M$01@__clang@@T12@@Z"(
+half test_sin_half(half p0) { return sin(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.sin.v2f16
+// NO_HALF: define noundef <2 x float> @"?test_sin_half2
// NO_HALF: call <2 x float> @llvm.sin.v2f32(
-half2 test_sin_half2 ( half2 p0 ) {
- return sin ( p0 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.sin.v3f16
-// NO_HALF: define noundef <3 x float> @"?test_sin_float3@@YAT?$__vector@M$02@__clang@@T12@@Z"(
+half2 test_sin_half2(half2 p0) { return sin(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.sin.v3f16
+// NO_HALF: define noundef <3 x float> @"?test_sin_half3
// NO_HALF: call <3 x float> @llvm.sin.v3f32(
-half3 test_sin_half3 ( half3 p0 ) {
- return sin ( p0 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.sin.v4f16
-// NO_HALF: define noundef <4 x float> @"?test_sin_float4@@YAT?$__vector@M$03@__clang@@T12@@Z"(
+half3 test_sin_half3(half3 p0) { return sin(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.sin.v4f16
+// NO_HALF: define noundef <4 x float> @"?test_sin_half4
// NO_HALF: call <4 x float> @llvm.sin.v4f32(
-half4 test_sin_half4 ( half4 p0 ) {
- return sin ( p0 );
-}
+half4 test_sin_half4(half4 p0) { return sin(p0); }
// CHECK: define noundef float @
// CHECK: call float @llvm.sin.f32(
-float test_sin_float ( float p0 ) {
- return sin ( p0 );
-}
+float test_sin_float(float p0) { return sin(p0); }
// CHECK: define noundef <2 x float> @
// CHECK: call <2 x float> @llvm.sin.v2f32
-float2 test_sin_float2 ( float2 p0 ) {
- return sin ( p0 );
-}
+float2 test_sin_float2(float2 p0) { return sin(p0); }
// CHECK: define noundef <3 x float> @
// CHECK: call <3 x float> @llvm.sin.v3f32
-float3 test_sin_float3 ( float3 p0 ) {
- return sin ( p0 );
-}
+float3 test_sin_float3(float3 p0) { return sin(p0); }
// CHECK: define noundef <4 x float> @
// CHECK: call <4 x float> @llvm.sin.v4f32
-float4 test_sin_float4 ( float4 p0 ) {
- return sin ( p0 );
-}
+float4 test_sin_float4(float4 p0) { return sin(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/trunc.hlsl b/clang/test/CodeGenHLSL/builtins/trunc.hlsl
index 4ae3cd20257e..6078aae5f873 100644
--- a/clang/test/CodeGenHLSL/builtins/trunc.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/trunc.hlsl
@@ -1,56 +1,47 @@
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
-// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
-// CHECK: define noundef half @
-// CHECK: call half @llvm.trunc.f16(
-// NO_HALF: define noundef float @"?test_trunc_half@@YA$halff@$halff@@Z"(
+// NATIVE_HALF: define noundef half @"?test_trunc_half
+// NATIVE_HALF: call half @llvm.trunc.f16(
+// NO_HALF: define noundef float @"?test_trunc_half
// NO_HALF: call float @llvm.trunc.f32(
-half test_trunc_half ( half p0 ) {
- return trunc ( p0 );
-}
-// CHECK: define noundef <2 x half> @
-// CHECK: call <2 x half> @llvm.trunc.v2f16
-// NO_HALF: define noundef <2 x float> @"?test_trunc_float2@@YAT?$__vector@M$01@__clang@@T12@@Z"(
+half test_trunc_half(half p0) { return trunc(p0); }
+
+// NATIVE_HALF: define noundef <2 x half> @"?test_trunc_half2
+// NATIVE_HALF: call <2 x half> @llvm.trunc.v2f16
+// NO_HALF: define noundef <2 x float> @"?test_trunc_half2
// NO_HALF: call <2 x float> @llvm.trunc.v2f32(
-half2 test_trunc_half2 ( half2 p0 ) {
- return trunc ( p0 );
-}
-// CHECK: define noundef <3 x half> @
-// CHECK: call <3 x half> @llvm.trunc.v3f16
-// NO_HALF: define noundef <3 x float> @"?test_trunc_float3@@YAT?$__vector@M$02@__clang@@T12@@Z"(
+half2 test_trunc_half2(half2 p0) { return trunc(p0); }
+
+// NATIVE_HALF: define noundef <3 x half> @"?test_trunc_half3
+// NATIVE_HALF: call <3 x half> @llvm.trunc.v3f16
+// NO_HALF: define noundef <3 x float> @"?test_trunc_half3
// NO_HALF: call <3 x float> @llvm.trunc.v3f32(
-half3 test_trunc_half3 ( half3 p0 ) {
- return trunc ( p0 );
-}
-// CHECK: define noundef <4 x half> @
-// CHECK: call <4 x half> @llvm.trunc.v4f16
-// NO_HALF: define noundef <4 x float> @"?test_trunc_float4@@YAT?$__vector@M$03@__clang@@T12@@Z"(
+half3 test_trunc_half3(half3 p0) { return trunc(p0); }
+
+// NATIVE_HALF: define noundef <4 x half> @"?test_trunc_half4
+// NATIVE_HALF: call <4 x half> @llvm.trunc.v4f16
+// NO_HALF: define noundef <4 x float> @"?test_trunc_half4
// NO_HALF: call <4 x float> @llvm.trunc.v4f32(
-half4 test_trunc_half4 ( half4 p0 ) {
- return trunc ( p0 );
-}
+half4 test_trunc_half4(half4 p0) { return trunc(p0); }
-// CHECK: define noundef float @
+// CHECK: define noundef float @"?test_trunc_float
// CHECK: call float @llvm.trunc.f32(
-float test_trunc_float ( float p0 ) {
- return trunc ( p0 );
-}
-// CHECK: define noundef <2 x float> @
+float test_trunc_float(float p0) { return trunc(p0); }
+
+// CHECK: define noundef <2 x float> @"?test_trunc_float2
// CHECK: call <2 x float> @llvm.trunc.v2f32
-float2 test_trunc_float2 ( float2 p0 ) {
- return trunc ( p0 );
-}
-// CHECK: define noundef <3 x float> @
+float2 test_trunc_float2(float2 p0) { return trunc(p0); }
+
+// CHECK: define noundef <3 x float> @"?test_trunc_float3
// CHECK: call <3 x float> @llvm.trunc.v3f32
-float3 test_trunc_float3 ( float3 p0 ) {
- return trunc ( p0 );
-}
-// CHECK: define noundef <4 x float> @
+float3 test_trunc_float3(float3 p0) { return trunc(p0); }
+
+// CHECK: define noundef <4 x float> @"?test_trunc_float4
// CHECK: call <4 x float> @llvm.trunc.v4f32
-float4 test_trunc_float4 ( float4 p0 ) {
- return trunc ( p0 );
-}
+float4 test_trunc_float4(float4 p0) { return trunc(p0); }
diff --git a/clang/test/Driver/aarch64-rdm.c b/clang/test/Driver/aarch64-rdm.c
index f2542b381e7c..62e1a4def4ce 100644
--- a/clang/test/Driver/aarch64-rdm.c
+++ b/clang/test/Driver/aarch64-rdm.c
@@ -1,13 +1,16 @@
// RUN: %clang --target=aarch64-none-elf -march=armv8a+rdm -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-RDM %s
+// RUN: %clang --target=aarch64-none-elf -march=armv8a+rdma -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-RDM %s
// RUN: %clang --target=aarch64-none-elf -mcpu=generic+rdm -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-RDM %s
// RUN: %clang --target=aarch64-none-elf -mcpu=falkor -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-RDM %s
// RUN: %clang --target=aarch64-none-elf -mcpu=thunderx2t99 -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-RDM %s
// CHECK-RDM: "-target-feature" "+rdm"
// RUN: %clang --target=aarch64-none-elf -march=armv8a+nordm -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-NORDM %s
+// RUN: %clang --target=aarch64-none-elf -march=armv8a+nordma -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-NORDM %s
// RUN: %clang --target=aarch64-none-elf -mcpu=generic+nordm -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-NORDM %s
// CHECK-NORDM-NOT: "-target-feature" "+rdm"
//
// RUN: %clang --target=aarch64-none-elf -march=armv8.1a -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-RDM %s
// RUN: %clang --target=aarch64-none-elf -march=armv8.1a+nordm -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-NORDM-DEFAULT %s
+// RUN: %clang --target=aarch64-none-elf -march=armv8.1a+nordma -### -c %s 2>&1 | FileCheck --check-prefix=CHECK-NORDM-DEFAULT %s
// CHECK-NORDM-DEFAULT: "-target-feature" "-rdm"
diff --git a/clang/test/Driver/android-link.cpp b/clang/test/Driver/android-link.cpp
index fa9cbc5d0c7a..f9bdd00507d7 100644
--- a/clang/test/Driver/android-link.cpp
+++ b/clang/test/Driver/android-link.cpp
@@ -17,9 +17,10 @@
//
// RUN: %clang -target aarch64-none-linux-android \
// RUN: -### -v %s 2> %t
-// RUN: FileCheck -check-prefix=MAX-PAGE-SIZE < %t %s
+// RUN: FileCheck -check-prefix=MAX-PAGE-SIZE-AARCH64 < %t %s
//
// GENERIC-ARM: --fix-cortex-a53-843419
// CORTEX-A53: --fix-cortex-a53-843419
// CORTEX-A57-NOT: --fix-cortex-a53-843419
// MAX-PAGE-SIZE: "-z" "max-page-size=4096"
+// MAX-PAGE-SIZE-AARCH64: "-z" "max-page-size=16384"
diff --git a/clang/test/Driver/basic-block-address-map.c b/clang/test/Driver/basic-block-address-map.c
index 022f972b412d..12393e8ebfd5 100644
--- a/clang/test/Driver/basic-block-address-map.c
+++ b/clang/test/Driver/basic-block-address-map.c
@@ -1,8 +1,9 @@
-// RUN: %clang -### -target x86_64 -fbasic-block-address-map %s -S 2>&1 | FileCheck -check-prefix=CHECK-PRESENT %s
+// RUN: %clang -### --target=x86_64 -fbasic-block-address-map %s -S 2>&1 | FileCheck -check-prefix=CHECK-PRESENT %s
+// RUN: %clang -### --target=aarch64 -fbasic-block-address-map %s -S 2>&1 | FileCheck -check-prefix=CHECK-PRESENT %s
// CHECK-PRESENT: -fbasic-block-address-map
-// RUN: %clang -### -target x86_64 -fno-basic-block-address-map %s -S 2>&1 | FileCheck %s --check-prefix=CHECK-ABSENT
+// RUN: %clang -### --target=x86_64 -fno-basic-block-address-map %s -S 2>&1 | FileCheck %s --check-prefix=CHECK-ABSENT
// CHECK-ABSENT-NOT: -fbasic-block-address-map
-// RUN: not %clang -c -target x86_64-apple-darwin10 -fbasic-block-address-map %s -S 2>&1 | FileCheck -check-prefix=CHECK-TRIPLE %s
+// RUN: not %clang -c --target=x86_64-apple-darwin10 -fbasic-block-address-map %s -S 2>&1 | FileCheck -check-prefix=CHECK-TRIPLE %s
// CHECK-TRIPLE: error: unsupported option '-fbasic-block-address-map' for target
diff --git a/clang/test/Driver/darwin-header-search-libcxx.cpp b/clang/test/Driver/darwin-header-search-libcxx.cpp
index 70cc06090a99..5695f53683ba 100644
--- a/clang/test/Driver/darwin-header-search-libcxx.cpp
+++ b/clang/test/Driver/darwin-header-search-libcxx.cpp
@@ -193,7 +193,7 @@
// RUN: ln -sf %t/install/bin/clang %t/symlinked1/bin/clang
// RUN: mkdir -p %t/symlinked1/include/c++/v1
-// RUN: %t/symlinked1/bin/clang -### %s -fsyntax-only 2>&1 \
+// RUN: %t/symlinked1/bin/clang -### %s -no-canonical-prefixes -fsyntax-only 2>&1 \
// RUN: --target=x86_64-apple-darwin \
// RUN: -stdlib=libc++ \
// RUN: -isysroot %S/Inputs/basic_darwin_sdk_usr_cxx_v1 \
diff --git a/clang/test/Driver/mingw-sysroot.cpp b/clang/test/Driver/mingw-sysroot.cpp
index 50152b2ca210..5d512e666970 100644
--- a/clang/test/Driver/mingw-sysroot.cpp
+++ b/clang/test/Driver/mingw-sysroot.cpp
@@ -50,10 +50,12 @@
// CHECK_TESTROOT_GCC_EXPLICIT: "-internal-isystem" "{{[^"]+}}/testroot-gcc{{/|\\\\}}include"
-// If there's a matching sysroot next to the clang binary itself, prefer that
+// If -no-canonical-prefixes and there's a matching sysroot next to the clang binary itself, prefer that
// over a gcc in the path:
-// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang/bin/x86_64-w64-mingw32-clang -target x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG %s
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang/bin/x86_64-w64-mingw32-clang --target=x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_GCC2 %s
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang/bin/x86_64-w64-mingw32-clang --target=x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s -no-canonical-prefixes 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG %s
+// CHECK_TESTROOT_GCC2: "{{[^"]+}}/testroot-gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
// CHECK_TESTROOT_CLANG: "{{[^"]+}}/testroot-clang{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
@@ -82,7 +84,7 @@
// that indicates that we did choose the right base, even if this particular directory
// actually doesn't exist here.
-// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang-native/bin/clang -target x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG_NATIVE %s
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang-native/bin/clang -no-canonical-prefixes --target=x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG_NATIVE %s
// CHECK_TESTROOT_CLANG_NATIVE: "{{[^"]+}}/testroot-clang-native{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
@@ -93,12 +95,12 @@
// that defaults to x86_64 mingw, but it's easier to test this in cross setups
// with symlinks, like the other tests here.)
-// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang/bin/x86_64-w64-mingw32-clang --target=x86_64-w64-mingw32 -m32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG_I686 %s
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang/bin/x86_64-w64-mingw32-clang -no-canonical-prefixes --target=x86_64-w64-mingw32 -m32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG_I686 %s
// CHECK_TESTROOT_CLANG_I686: "{{[^"]+}}/testroot-clang{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
// If the user calls clang with a custom literal triple, make sure this maps
// to sysroots with the matching spelling.
-// RUN: %T/testroot-custom-triple/bin/clang --target=x86_64-w64-mingw32foo -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CUSTOM_TRIPLE %s
+// RUN: %T/testroot-custom-triple/bin/clang -no-canonical-prefixes --target=x86_64-w64-mingw32foo -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CUSTOM_TRIPLE %s
// CHECK_TESTROOT_CUSTOM_TRIPLE: "{{[^"]+}}/testroot-custom-triple{{/|\\\\}}x86_64-w64-mingw32foo{{/|\\\\}}include"
diff --git a/clang/test/Driver/no-canonical-prefixes.c b/clang/test/Driver/no-canonical-prefixes.c
index fb54f85f959a..669e56639284 100644
--- a/clang/test/Driver/no-canonical-prefixes.c
+++ b/clang/test/Driver/no-canonical-prefixes.c
@@ -26,7 +26,7 @@
// RUN: | FileCheck --check-prefix=NON-CANONICAL %s
//
// FIXME: This should really be '.real'.
-// CANONICAL: InstalledDir: {{.*}}.fake
+// CANONICAL: InstalledDir: {{.*}}bin
// CANONICAL: {{[/|\\]*}}clang{{.*}}" -cc1
//
// NON-CANONICAL: InstalledDir: .{{$}}
diff --git a/clang/test/Driver/openmp-offload-gpu.c b/clang/test/Driver/openmp-offload-gpu.c
index 5da74a35d87a..f7b06c9ec595 100644
--- a/clang/test/Driver/openmp-offload-gpu.c
+++ b/clang/test/Driver/openmp-offload-gpu.c
@@ -101,17 +101,6 @@
/// ###########################################################################
-/// Check that the warning is thrown when the libomptarget bitcode library is not found.
-/// Libomptarget requires sm_52 or newer so an sm_52 bitcode library should never exist.
-// RUN: not %clang -### -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda \
-// RUN: -Xopenmp-target -march=sm_52 --cuda-path=%S/Inputs/CUDA_102/usr/local/cuda \
-// RUN: -fopenmp-relocatable-target -save-temps %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHK-BCLIB-WARN %s
-
-// CHK-BCLIB-WARN: no library 'libomptarget-nvptx-sm_52.bc' found in the default clang lib directory or in LIBRARY_PATH; use '--libomptarget-nvptx-bc-path' to specify nvptx bitcode library
-
-/// ###########################################################################
-
/// Check that the error is thrown when the libomptarget bitcode library does not exist.
// RUN: not %clang -### -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda \
// RUN: -Xopenmp-target -march=sm_52 --cuda-path=%S/Inputs/CUDA_102/usr/local/cuda \
diff --git a/clang/test/Driver/program-path-priority.c b/clang/test/Driver/program-path-priority.c
index ee931dd7a9a3..c940c4ced944 100644
--- a/clang/test/Driver/program-path-priority.c
+++ b/clang/test/Driver/program-path-priority.c
@@ -36,7 +36,7 @@
// RUN: touch %t/notreal-none-elf-gcc && chmod +x %t/notreal-none-elf-gcc
// RUN: env "PATH=" %t/clang -### -target notreal-none-elf %s 2>&1 | \
// RUN: FileCheck --check-prefix=PROG_PATH_NOTREAL_GCC %s
-// PROG_PATH_NOTREAL_GCC: notreal-none-elf-gcc"
+// PROG_PATH_NOTREAL_GCC: notreal-none-unknown-elf
/// <triple>-gcc on the PATH is found
// RUN: mkdir -p %t/env
@@ -57,7 +57,7 @@
// RUN: touch %t/gcc && chmod +x %t/gcc
// RUN: env "PATH=" %t/clang -### -target notreal-none-elf %s 2>&1 | \
// RUN: FileCheck --check-prefix=NOTREAL_GCC_PREFERRED %s
-// NOTREAL_GCC_PREFERRED: notreal-none-elf-gcc"
+// NOTREAL_GCC_PREFERRED: notreal-none-unknown-elf"
// NOTREAL_GCC_PREFERRED-NOT: /gcc"
/// <triple>-gcc on the PATH is preferred to gcc in program path
@@ -125,6 +125,9 @@
/// Only if there is nothing in the prefix will we search other paths
/// -f in case $DEFAULT_TRIPLE == %target_triple
// RUN: rm -f %t/prefix/$DEFAULT_TRIPLE-gcc %t/prefix/%target_triple-gcc %t/prefix/gcc
-// RUN: env "PATH=" %t/clang -### -target notreal-none-elf %s -B %t/prefix 2>&1 | \
-// RUN: FileCheck --check-prefix=EMPTY_PREFIX_DIR %s
-// EMPTY_PREFIX_DIR: notreal-none-elf-gcc"
+// RUN: env "PATH=" %t/clang -### -canonical-prefixes --target=notreal-none-elf %s -B %t/prefix 2>&1 | \
+// RUN: FileCheck --check-prefix=EMPTY_PREFIX_DIR1 %s
+// EMPTY_PREFIX_DIR1: gcc"
+// RUN: env "PATH=" %t/clang -### -no-canonical-prefixes --target=notreal-none-elf %s -B %t/prefix 2>&1 | \
+// RUN: FileCheck --check-prefix=EMPTY_PREFIX_DIR2 %s
+// EMPTY_PREFIX_DIR2: notreal-none-elf-gcc"
diff --git a/clang/test/Driver/rocm-detect.hip b/clang/test/Driver/rocm-detect.hip
index 0db994af556f..8b15c322e3fb 100644
--- a/clang/test/Driver/rocm-detect.hip
+++ b/clang/test/Driver/rocm-detect.hip
@@ -102,7 +102,7 @@
// RUN: rm -rf %t/rocm-spack
// RUN: cp -r %S/Inputs/rocm-spack %t
// RUN: ln -fs %clang %t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/bin/clang
-// RUN: %t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/bin/clang -### -v \
+// RUN: %t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/bin/clang -### -no-canonical-prefixes -v \
// RUN: -resource-dir=%t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/lib/clang \
// RUN: -target x86_64-linux-gnu --cuda-gpu-arch=gfx900 --print-rocm-search-dirs %s 2>&1 \
// RUN: | FileCheck -check-prefixes=SPACK %s
@@ -111,7 +111,7 @@
// ROCm release. --hip-path and --rocm-device-lib-path can be used to specify them.
// RUN: cp -r %t/rocm-spack/hip-* %t/rocm-spack/hip-4.0.0-abcd
-// RUN: %t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/bin/clang -### -v \
+// RUN: %t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/bin/clang -### -no-canonical-prefixes -v \
// RUN: -target x86_64-linux-gnu --cuda-gpu-arch=gfx900 \
// RUN: --hip-path=%t/rocm-spack/hip-4.0.0-abcd \
// RUN: %s 2>&1 | FileCheck -check-prefixes=SPACK-SET %s
diff --git a/clang/test/Driver/split-debug.c b/clang/test/Driver/split-debug.c
index a2a3dc023545..968f33b4cc03 100644
--- a/clang/test/Driver/split-debug.c
+++ b/clang/test/Driver/split-debug.c
@@ -124,8 +124,3 @@
// G1_NOSPLIT: "-debug-info-kind=line-tables-only"
// G1_NOSPLIT-NOT: "-split-dwarf-file"
// G1_NOSPLIT-NOT: "-split-dwarf-output"
-
-/// Do not generate -ggnu-pubnames for -glldb
-// RUN: %clang -### -c -target x86_64 -gsplit-dwarf -g -glldb %s 2>&1 | FileCheck %s --check-prefixes=GLLDBSPLIT
-
-// GLLDBSPLIT-NOT: "-ggnu-pubnames"
diff --git a/clang/test/InstallAPI/basic.test b/clang/test/InstallAPI/basic.test
index 22b04792ca2c..5b41ccd517b0 100644
--- a/clang/test/InstallAPI/basic.test
+++ b/clang/test/InstallAPI/basic.test
@@ -16,6 +16,11 @@
// CHECK-NOT: warning:
//--- basic_inputs.json
+{
+ "headers": [
+ ],
+ "version": "3"
+}
//--- expected.tbd
{
diff --git a/clang/test/InstallAPI/objcclasses.test b/clang/test/InstallAPI/objcclasses.test
new file mode 100644
index 000000000000..d32291c64c47
--- /dev/null
+++ b/clang/test/InstallAPI/objcclasses.test
@@ -0,0 +1,85 @@
+// RUN: rm -rf %t
+// RUN: split-file %s %t
+// RUN: sed -e "s|DSTROOT|%/t|g" %t/inputs.json.in > %t/inputs.json
+
+// RUN: clang-installapi -target arm64-apple-macos13.1 \
+// RUN: -F%t -install_name /System/Library/Frameworks/Foo.framework/Foo \
+// RUN: %t/inputs.json -o %t/outputs.tbd -v 2>&1 | FileCheck %s --check-prefix=VERBOSE
+// RUN: llvm-readtapi -compare %t/outputs.tbd %t/expected.tbd 2>&1 | FileCheck %s --allow-empty
+
+// VERBOSE: Public Headers:
+// VERBOSE-NEXT: #import <Foo/Foo.h>
+// CHECK-NOT: error:
+// CHECK-NOT: warning:
+
+//--- Foo.framework/Headers/Foo.h
+// Ignore forward declaration.
+@class NSObject;
+
+@interface Visible
+@end
+
+__attribute__((visibility("hidden")))
+@interface Hidden
+@end
+
+__attribute__((objc_exception))
+@interface Exception
+@end
+
+//--- inputs.json.in
+{
+ "headers": [ {
+ "path" : "DSTROOT/Foo.framework/Headers/Foo.h",
+ "type" : "public"
+ }],
+ "version": "3"
+}
+
+//--- expected.tbd
+{
+ "main_library": {
+ "compatibility_versions": [
+ {
+ "version": "0"
+ }
+ ],
+ "current_versions": [
+ {
+ "version": "0"
+ }
+ ],
+ "exported_symbols": [
+ {
+ "data": {
+ "objc_class": [
+ "Exception",
+ "Visible"
+ ],
+ "objc_eh_type": [
+ "Exception"
+ ]
+ }
+ }
+ ],
+ "flags": [
+ {
+ "attributes": [
+ "not_app_extension_safe"
+ ]
+ }
+ ],
+ "install_names": [
+ {
+ "name": "/System/Library/Frameworks/Foo.framework/Foo"
+ }
+ ],
+ "target_info": [
+ {
+ "min_deployment": "13.1",
+ "target": "arm64-macos"
+ }
+ ]
+ },
+ "tapi_tbd_version": 5
+}
diff --git a/clang/test/InstallAPI/variables.test b/clang/test/InstallAPI/variables.test
new file mode 100644
index 000000000000..6272867911f1
--- /dev/null
+++ b/clang/test/InstallAPI/variables.test
@@ -0,0 +1,63 @@
+// RUN: rm -rf %t
+// RUN: split-file %s %t
+// RUN: sed -e "s|SRC_DIR|%/t|g" %t/vars_inputs.json.in > %t/vars_inputs.json
+
+/// Check multiple targets are captured.
+// RUN: clang-installapi -target arm64-apple-macos13.1 -target arm64e-apple-macos13.1 \
+// RUN: -fapplication-extension -install_name /usr/lib/vars.dylib \
+// RUN: %t/vars_inputs.json -o %t/vars.tbd 2>&1 | FileCheck %s --allow-empty
+// RUN: llvm-readtapi -compare %t/vars.tbd %t/expected.tbd 2>&1 | FileCheck %s --allow-empty
+
+// CHECK-NOT: error:
+// CHECK-NOT: warning:
+
+//--- vars.h
+extern int foo;
+
+//--- vars_inputs.json.in
+{
+ "headers": [ {
+ "path" : "SRC_DIR/vars.h",
+ "type" : "public"
+ }],
+ "version": "3"
+}
+
+//--- expected.tbd
+{
+ "main_library": {
+ "compatibility_versions": [
+ {
+ "version": "0"
+ }],
+ "current_versions": [
+ {
+ "version": "0"
+ }],
+ "install_names": [
+ {
+ "name": "/usr/lib/vars.dylib"
+ }
+ ],
+ "exported_symbols": [
+ {
+ "data": {
+ "global": [
+ "_foo"
+ ]
+ }
+ }
+ ],
+ "target_info": [
+ {
+ "min_deployment": "13.1",
+ "target": "arm64-macos"
+ },
+ {
+ "min_deployment": "13.1",
+ "target": "arm64e-macos"
+ }
+ ]
+ },
+ "tapi_tbd_version": 5
+}
diff --git a/clang/test/Misc/warning-flags.c b/clang/test/Misc/warning-flags.c
index 9d4cac9e39b4..bb3c7d816d2f 100644
--- a/clang/test/Misc/warning-flags.c
+++ b/clang/test/Misc/warning-flags.c
@@ -18,7 +18,7 @@ This test serves two purposes:
The list of warnings below should NEVER grow. It should gradually shrink to 0.
-CHECK: Warnings without flags (66):
+CHECK: Warnings without flags (67):
CHECK-NEXT: ext_expected_semi_decl_list
CHECK-NEXT: ext_explicit_specialization_storage_class
@@ -58,6 +58,7 @@ CHECK-NEXT: warn_ignoring_ftabstop_value
CHECK-NEXT: warn_implements_nscopying
CHECK-NEXT: warn_incompatible_qualified_id
CHECK-NEXT: warn_invalid_asm_cast_lvalue
+CHECK-NEXT: warn_invalid_cpu_supports
CHECK-NEXT: warn_maynot_respond
CHECK-NEXT: warn_method_param_redefinition
CHECK-NEXT: warn_missing_case_for_condition
diff --git a/clang/test/OpenMP/interop_codegen.cpp b/clang/test/OpenMP/interop_codegen.cpp
new file mode 100644
index 000000000000..31df2f1ba58c
--- /dev/null
+++ b/clang/test/OpenMP/interop_codegen.cpp
@@ -0,0 +1,45 @@
+// expected-no-diagnostics
+// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s
+
+#ifndef HEADER
+#define HEADER
+
+typedef void *omp_interop_t;
+#define omp_interop_none 0
+#define omp_ipr_fr_id -1
+typedef long omp_intptr_t;
+#define NULL 0
+
+extern omp_intptr_t omp_get_interop_int(const omp_interop_t, int, int *);
+
+int main() {
+ omp_interop_t obj1 = omp_interop_none;
+ omp_interop_t obj2 = omp_interop_none;
+ omp_interop_t i1 = omp_interop_none;
+ omp_interop_t i2 = omp_interop_none;
+ omp_interop_t i3 = omp_interop_none;
+ omp_interop_t i4 = omp_interop_none;
+ omp_interop_t i5 = omp_interop_none;
+
+ #pragma omp interop init(targetsync: obj1) init(targetsync: obj2)
+ int id = (int )omp_get_interop_int(obj1, omp_ipr_fr_id, NULL);
+ int id1 = (int )omp_get_interop_int(obj2, omp_ipr_fr_id, NULL);
+
+ #pragma omp interop init(target,targetsync: i1) use(i2) use(i3) destroy(i4) destroy(i5)
+ int id2 = (int )omp_get_interop_int(i1, omp_ipr_fr_id, NULL);
+ int id3 = (int )omp_get_interop_int(i2, omp_ipr_fr_id, NULL);
+
+
+}
+#endif
+
+// CHECK-LABEL: define {{.+}}main{{.+}}
+// CHECK: call {{.+}}__tgt_interop_init({{.+}}obj1{{.*}})
+// CHECK: call {{.+}}__tgt_interop_init({{.+}}obj2{{.*}})
+// CHECK: call {{.+}}__tgt_interop_init({{.+}}i1{{.*}})
+// CHECK: call {{.+}}__tgt_interop_destroy({{.+}}i4{{.*}})
+// CHECK: call {{.+}}__tgt_interop_destroy({{.+}}i5{{.*}})
+// CHECK: call {{.+}}__tgt_interop_use({{.+}}i2{{.*}})
+// CHECK: call {{.+}}__tgt_interop_use({{.+}}i3{{.*}})
diff --git a/clang/test/OpenMP/scan_ast_print.cpp b/clang/test/OpenMP/scan_ast_print.cpp
index 3bbd3b60c3e8..82cb13eb6e70 100644
--- a/clang/test/OpenMP/scan_ast_print.cpp
+++ b/clang/test/OpenMP/scan_ast_print.cpp
@@ -19,21 +19,39 @@ T tmain(T argc) {
for (int i = 0; i < 10; ++i) {
#pragma omp scan inclusive(a)
}
+#pragma omp parallel for reduction(inscan, +:a)
+ for (int i = 0; i < 10; ++i) {
+#pragma omp scan inclusive(a)
+ }
return a + argc;
}
// CHECK: static T a;
// CHECK-NEXT: #pragma omp for reduction(inscan, +: a)
// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
// CHECK-NEXT: #pragma omp scan inclusive(a){{$}}
+
+// CHECK: #pragma omp parallel for reduction(inscan, +: a)
+// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
+// CHECK-NEXT: #pragma omp scan inclusive(a){{$}}
+
// CHECK: static int a;
// CHECK-NEXT: #pragma omp for reduction(inscan, +: a)
// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
// CHECK-NEXT: #pragma omp scan inclusive(a)
+
+// CHECK: #pragma omp parallel for reduction(inscan, +: a)
+// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
+// CHECK-NEXT: #pragma omp scan inclusive(a)
+
// CHECK: static char a;
// CHECK-NEXT: #pragma omp for reduction(inscan, +: a)
// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
// CHECK-NEXT: #pragma omp scan inclusive(a)
+// CHECK: #pragma omp parallel for reduction(inscan, +: a)
+// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
+// CHECK-NEXT: #pragma omp scan inclusive(a)
+
int main(int argc, char **argv) {
static int a;
// CHECK: static int a;
diff --git a/clang/test/Preprocessor/has_attribute.cpp b/clang/test/Preprocessor/has_attribute.cpp
index 33546dbb175f..00ec57615c84 100644
--- a/clang/test/Preprocessor/has_attribute.cpp
+++ b/clang/test/Preprocessor/has_attribute.cpp
@@ -1,4 +1,6 @@
+// RUN: %clang_cc1 -triple i386-unknown-unknown -fms-compatibility -std=c++03 -E -P %s -o - | FileCheck %s --check-prefixes=CHECK,ITANIUM --implicit-check-not=:
// RUN: %clang_cc1 -triple i386-unknown-unknown -fms-compatibility -std=c++11 -E -P %s -o - | FileCheck %s --check-prefixes=CHECK,ITANIUM --implicit-check-not=:
+// RUN: %clang_cc1 -triple i386-windows -fms-compatibility -std=c++03 -E -P %s -o - | FileCheck %s --check-prefixes=CHECK,WINDOWS --implicit-check-not=:
// RUN: %clang_cc1 -triple i386-windows -fms-compatibility -std=c++11 -E -P %s -o - | FileCheck %s --check-prefixes=CHECK,WINDOWS --implicit-check-not=:
#define CXX11(x) x: __has_cpp_attribute(x)
@@ -65,7 +67,7 @@ CXX11(unlikely)
// CHECK: likely: 201803L
// CHECK: maybe_unused: 201603L
// ITANIUM: no_unique_address: 201803L
-// WINDOWS: no_unique_address: 0
+// WINDOWS: no_unique_address: 0
// ITANIUM: msvc::no_unique_address: 0
// WINDOWS: msvc::no_unique_address: 201803L
// CHECK: nodiscard: 201907L
diff --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index ea81c6620443..664279cb1239 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -74,6 +74,7 @@
// CHECK-NOT: __riscv_xventanacondops {{.*$}}
// CHECK-NOT: __riscv_za128rs {{.*$}}
// CHECK-NOT: __riscv_za64rs {{.*$}}
+// CHECK-NOT: __riscv_zacas {{.*$}}
// CHECK-NOT: __riscv_zawrs {{.*$}}
// CHECK-NOT: __riscv_zba {{.*$}}
// CHECK-NOT: __riscv_zbb {{.*$}}
@@ -166,7 +167,6 @@
// CHECK-NOT: __riscv_ssqosid{{.*$}}
// CHECK-NOT: __riscv_supm{{.*$}}
// CHECK-NOT: __riscv_zaamo {{.*$}}
-// CHECK-NOT: __riscv_zacas {{.*$}}
// CHECK-NOT: __riscv_zalasr {{.*$}}
// CHECK-NOT: __riscv_zalrsc {{.*$}}
// CHECK-NOT: __riscv_zcmop {{.*$}}
@@ -660,6 +660,14 @@
// RUN: -o - | FileCheck --check-prefix=CHECK-ZA64RS-EXT %s
// CHECK-ZA64RS-EXT: __riscv_za64rs 1000000{{$}}
+// RUN: %clang --target=riscv32 \
+// RUN: -march=rv32i_zacas1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
+// RUN: %clang --target=riscv64 \
+// RUN: -march=rv64i_zacas1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
+// CHECK-ZACAS-EXT: __riscv_zacas 1000000{{$}}
+
// RUN: %clang --target=riscv32-unknown-linux-gnu \
// RUN: -march=rv32izawrs -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZAWRS-EXT %s
@@ -1486,14 +1494,6 @@
// CHECK-ZAAMO-EXT: __riscv_zaamo 2000{{$}}
// RUN: %clang --target=riscv32 -menable-experimental-extensions \
-// RUN: -march=rv32i_zacas1p0 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
-// RUN: %clang --target=riscv64 -menable-experimental-extensions \
-// RUN: -march=rv64i_zacas1p0 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
-// CHECK-ZACAS-EXT: __riscv_zacas 1000000{{$}}
-
-// RUN: %clang --target=riscv32 -menable-experimental-extensions \
// RUN: -march=rv32i_zalasr0p1 -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZALASR-EXT %s
// RUN: %clang --target=riscv64 -menable-experimental-extensions \
diff --git a/clang/test/Sema/aarch64-cpu-supports.c b/clang/test/Sema/aarch64-cpu-supports.c
index 24aae9542dbc..ddeed7c5bc9e 100644
--- a/clang/test/Sema/aarch64-cpu-supports.c
+++ b/clang/test/Sema/aarch64-cpu-supports.c
@@ -5,19 +5,19 @@ int test_aarch64_features(void) {
// expected-error@+1 {{expression is not a string literal}}
if (__builtin_cpu_supports(ssbs2))
return 1;
- // expected-error@+1 {{invalid cpu feature string}}
+ // expected-warning@+1 {{invalid cpu feature string}}
if (__builtin_cpu_supports(""))
return 2;
- // expected-error@+1 {{invalid cpu feature string}}
+ // expected-warning@+1 {{invalid cpu feature string}}
if (__builtin_cpu_supports("pmull128"))
return 3;
- // expected-error@+1 {{invalid cpu feature string}}
+ // expected-warning@+1 {{invalid cpu feature string}}
if (__builtin_cpu_supports("sve2,rpres"))
return 4;
- // expected-error@+1 {{invalid cpu feature string}}
+ // expected-warning@+1 {{invalid cpu feature string}}
if (__builtin_cpu_supports("dgh+sve2-pmull"))
return 5;
- // expected-error@+1 {{invalid cpu feature string}}
+ // expected-warning@+1 {{invalid cpu feature string}}
if (__builtin_cpu_supports("default"))
return 6;
if (__builtin_cpu_supports(" ssbs + bti "))
diff --git a/clang/test/Sema/aix-attr-tls_model.c b/clang/test/Sema/aix-attr-tls_model.c
index 9c22d6cceed8..7c2047bced93 100644
--- a/clang/test/Sema/aix-attr-tls_model.c
+++ b/clang/test/Sema/aix-attr-tls_model.c
@@ -6,6 +6,6 @@
#endif
static __thread int y __attribute((tls_model("global-dynamic"))); // no-warning
-static __thread int y __attribute((tls_model("local-dynamic"))); // expected-error {{TLS model 'local-dynamic' is not yet supported on AIX}}
+static __thread int y __attribute((tls_model("local-dynamic"))); // expected-no-diagnostics
static __thread int y __attribute((tls_model("initial-exec"))); // no-warning
static __thread int y __attribute((tls_model("local-exec"))); // no-warning
diff --git a/clang/test/Sema/attr-target-clones-aarch64.c b/clang/test/Sema/attr-target-clones-aarch64.c
index 4054b7c837ec..0ce277f41884 100644
--- a/clang/test/Sema/attr-target-clones-aarch64.c
+++ b/clang/test/Sema/attr-target-clones-aarch64.c
@@ -1,6 +1,6 @@
// RUN: %clang_cc1 -triple aarch64-linux-gnu -fsyntax-only -verify %s
-void __attribute__((target_clones("fp16+sve2-aes", "sb+sve2-sha3+rcpc3+mops"))) no_def(void);
+void __attribute__((target_clones("fp16+sve2-aes", "sb+sve2-sha3+rcpc3+mops", "rdma"))) no_def(void);
// expected-warning@+1 {{unsupported 'default' in the 'target_clones' attribute string; 'target_clones' attribute ignored}}
void __attribute__((target_clones("default+sha3"))) warn1(void);
diff --git a/clang/test/Sema/builtin-cpu-supports.c b/clang/test/Sema/builtin-cpu-supports.c
index 733d797f3ff8..51ee9661807f 100644
--- a/clang/test/Sema/builtin-cpu-supports.c
+++ b/clang/test/Sema/builtin-cpu-supports.c
@@ -7,7 +7,7 @@ extern const char *str;
int main(void) {
#ifdef __x86_64__
- if (__builtin_cpu_supports("ss")) // expected-error {{invalid cpu feature string}}
+ if (__builtin_cpu_supports("ss")) // expected-warning {{invalid cpu feature string}}
a("sse4.2");
if (__builtin_cpu_supports(str)) // expected-error {{expression is not a string literal}}
@@ -25,9 +25,9 @@ int main(void) {
(void)__builtin_cpu_supports("x86-64-v2");
(void)__builtin_cpu_supports("x86-64-v3");
(void)__builtin_cpu_supports("x86-64-v4");
- (void)__builtin_cpu_supports("x86-64-v5"); // expected-error {{invalid cpu feature string for builtin}}
+ (void)__builtin_cpu_supports("x86-64-v5"); // expected-warning {{invalid cpu feature string for builtin}}
#else
- if (__builtin_cpu_supports("neon")) // expected-error {{invalid cpu feature string for builtin}}
+ if (__builtin_cpu_supports("neon")) // expected-warning {{invalid cpu feature string for builtin}}
a("vsx");
if (__builtin_cpu_is("cortex-x3")) // expected-error {{builtin is not supported on this target}}
diff --git a/clang/test/Sema/builtin-popcountg.c b/clang/test/Sema/builtin-popcountg.c
index e18b910046ff..9d095927d24e 100644
--- a/clang/test/Sema/builtin-popcountg.c
+++ b/clang/test/Sema/builtin-popcountg.c
@@ -1,14 +1,23 @@
-// RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -fsyntax-only -verify -Wpedantic %s
+// RUN: %clang_cc1 -std=c23 -triple=x86_64-pc-linux-gnu -fsyntax-only -verify -Wpedantic %s
typedef int int2 __attribute__((ext_vector_type(2)));
-void test_builtin_popcountg(int i, double d, int2 i2) {
+void test_builtin_popcountg(short s, int i, __int128 i128, _BitInt(128) bi128,
+ double d, int2 i2) {
__builtin_popcountg();
// expected-error@-1 {{too few arguments to function call, expected 1, have 0}}
__builtin_popcountg(i, i);
// expected-error@-1 {{too many arguments to function call, expected 1, have 2}}
+ __builtin_popcountg(s);
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'short')}}
+ __builtin_popcountg(i);
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'int')}}
+ __builtin_popcountg(i128);
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was '__int128')}}
+ __builtin_popcountg(bi128);
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was '_BitInt(128)')}}
__builtin_popcountg(d);
- // expected-error@-1 {{1st argument must be a type of integer (was 'double')}}
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'double')}}
__builtin_popcountg(i2);
- // expected-error@-1 {{1st argument must be a type of integer (was 'int2' (vector of 2 'int' values))}}
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'int2' (vector of 2 'int' values))}}
}
diff --git a/clang/test/SemaCXX/GH83461.cpp b/clang/test/SemaCXX/GH83461.cpp
new file mode 100644
index 000000000000..509535e883e6
--- /dev/null
+++ b/clang/test/SemaCXX/GH83461.cpp
@@ -0,0 +1,60 @@
+// RUN: %clang_cc1 -std=c++20 -fsyntax-only -verify %s
+
+struct S {
+ template<typename Ty = int>
+ friend void foo(auto){}
+
+ template<typename Ty = int, typename Tz>
+ friend void foo2(){}
+};
+
+template<typename T>
+struct TemplS {
+ template<typename Ty = int>
+ friend void foo3(auto){}
+
+ template<typename Ty = int, typename Tz>
+ friend void foo4(){}
+};
+
+void Inst() {
+ TemplS<int>();
+}
+// expected-error@+2{{template parameter missing a default argument}}
+// expected-note@+1{{previous default template argument defined here}}
+template<typename T = int, typename U>
+struct ClassTempl{};
+
+struct HasFriendClassTempl {
+ // expected-error@+1{{default template argument not permitted on a friend template}}
+ template<typename T = int, typename U>
+ friend struct Friend;
+
+ // expected-error@+3{{cannot define a type in a friend declaration}}
+ // expected-error@+1{{default template argument not permitted on a friend template}}
+ template<typename T = int, typename U>
+ friend struct Friend2{};
+};
+
+template<typename Ty>
+struct HasFriendClassTempl2 {
+ // expected-error@+3{{template parameter missing a default argument}}
+ // expected-note@+2{{previous default template argument defined here}}
+ // expected-note@#INST2{{in instantiation of template class}}
+ template<typename T = int, typename U>
+ friend struct Friend;
+};
+
+void Inst2() {
+ HasFriendClassTempl2<int>(); // #INST2
+}
+
+// expected-error@+2{{template parameter missing a default argument}}
+// expected-note@+1{{previous default template argument defined here}}
+template<typename T = int, typename U>
+static constexpr U VarTempl;
+
+// expected-error@+2{{template parameter missing a default argument}}
+// expected-note@+1{{previous default template argument defined here}}
+template<typename T = int, typename U>
+using TypeAlias = U;
diff --git a/clang/test/SemaCXX/attr-declspec-ignored.cpp b/clang/test/SemaCXX/attr-declspec-ignored.cpp
index dfea8cc4d47c..98e0ffd1a1af 100644
--- a/clang/test/SemaCXX/attr-declspec-ignored.cpp
+++ b/clang/test/SemaCXX/attr-declspec-ignored.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 %s -verify -fsyntax-only
+// RUN: %clang_cc1 %s -std=c++03 -Wno-c++11-extensions -verify -fsyntax-only
namespace test1 {
__attribute__((visibility("hidden"))) __attribute__((aligned)) class A; // expected-warning{{attribute 'visibility' is ignored, place it after "class" to apply attribute to type declaration}} \
@@ -28,7 +29,7 @@ namespace test1 {
// expected-warning{{attribute 'aligned' is ignored, place it after "enum class" to apply attribute to type declaration}}
__attribute__((visibility("hidden"))) __attribute__((aligned)) enum struct ES {}; // expected-warning{{attribute 'visibility' is ignored, place it after "enum struct" to apply attribute to type declaration}} \
// expected-warning{{attribute 'aligned' is ignored, place it after "enum struct" to apply attribute to type declaration}}
-
+
// Also test [[]] attribute syntax. (On a non-nested declaration, these
// generate a hard "misplaced attributes" error, which we test for
// elsewhere.)
diff --git a/clang/test/SemaCXX/attr-gnu.cpp b/clang/test/SemaCXX/attr-gnu.cpp
index c257c2b02912..941d01a2e611 100644
--- a/clang/test/SemaCXX/attr-gnu.cpp
+++ b/clang/test/SemaCXX/attr-gnu.cpp
@@ -1,7 +1,8 @@
-// RUN: %clang_cc1 -std=gnu++17 -fsyntax-only -fms-compatibility -verify %s
-
-void f() {
- // GNU-style attributes are prohibited in this position.
+// RUN: %clang_cc1 -std=gnu++03 -fsyntax-only -fms-compatibility -Wno-c++11-extensions -Wno-c++17-extensions -verify %s
+// RUN: %clang_cc1 -std=gnu++17 -fsyntax-only -fms-compatibility -verify %s
+
+void f() {
+ // GNU-style attributes are prohibited in this position.
auto P = new int * __attribute__((vector_size(8))); // expected-error {{an attribute list cannot appear here}} \
// expected-error {{invalid vector element type 'int *'}}
@@ -47,13 +48,13 @@ void tuTest1(Tu<int> u); // expected-note {{candidate function not viable: no kn
void tuTest2(Tu3 u); // expected-note {{candidate function not viable: no known conversion from 'int' to 'Tu3' for 1st argument}}
void tu() {
int x = 2;
- tuTest1(x); // expected-error {{no matching function for call to 'tuTest1'}}
- tuTest2(x); // expected-error {{no matching function for call to 'tuTest2'}}
-}
-
-[[gnu::__const__]] int f2() { return 12; }
-[[__gnu__::__const__]] int f3() { return 12; }
-[[using __gnu__ : __const__]] int f4() { return 12; }
-
-static_assert(__has_cpp_attribute(gnu::__const__));
-static_assert(__has_cpp_attribute(__gnu__::__const__));
+ tuTest1(x); // expected-error {{no matching function for call to 'tuTest1'}}
+ tuTest2(x); // expected-error {{no matching function for call to 'tuTest2'}}
+}
+
+[[gnu::__const__]] int f2() { return 12; }
+[[__gnu__::__const__]] int f3() { return 12; }
+[[using __gnu__ : __const__]] int f4() { return 12; }
+
+static_assert(__has_cpp_attribute(gnu::__const__));
+static_assert(__has_cpp_attribute(__gnu__::__const__));
diff --git a/clang/test/SemaCXX/attr-target-version.cpp b/clang/test/SemaCXX/attr-target-version.cpp
index 5c542ad2e2dc..0bd710c4e282 100644
--- a/clang/test/SemaCXX/attr-target-version.cpp
+++ b/clang/test/SemaCXX/attr-target-version.cpp
@@ -7,6 +7,7 @@ void __attribute__((target_version("dotprod"))) no_def(void);
void __attribute__((target_version("rdm+fp"))) no_def(void);
void __attribute__((target_version("rcpc3"))) no_def(void);
void __attribute__((target_version("mops"))) no_def(void);
+void __attribute__((target_version("rdma"))) no_def(void);
// expected-error@+1 {{no matching function for call to 'no_def'}}
void foo(void) { no_def(); }
diff --git a/clang/test/SemaCXX/bool.cpp b/clang/test/SemaCXX/bool.cpp
index 33e22c8f6d36..57cdba1b1a83 100644
--- a/clang/test/SemaCXX/bool.cpp
+++ b/clang/test/SemaCXX/bool.cpp
@@ -2,6 +2,11 @@
// RUN: %clang_cc1 %std_cxx98-14 -fsyntax-only -verify=expected,precxx17 -Wno-constant-conversion -Wno-deprecated -Wdeprecated-increment-bool %s
// RUN: %clang_cc1 %std_cxx17- -fsyntax-only -verify=expected,cxx17 -Wno-constant-conversion -Wno-deprecated -Wdeprecated-increment-bool %s
+// RUN: %clang_cc1 %std_cxx98-14 -fsyntax-only -verify=expected,precxx17 -Wno-constant-conversion %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 %std_cxx98-14 -fsyntax-only -verify=expected,precxx17 -Wno-constant-conversion -Wno-deprecated -Wdeprecated-increment-bool %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 %std_cxx17- -fsyntax-only -verify=expected,cxx17 -Wno-constant-conversion -Wno-deprecated -Wdeprecated-increment-bool %s -fexperimental-new-constant-interpreter
+
+
// Bool literals can be enum values.
enum {
ReadWrite = false,
diff --git a/clang/test/SemaCXX/cxx03-cxx11-attr.cpp b/clang/test/SemaCXX/cxx03-cxx11-attr.cpp
new file mode 100644
index 000000000000..5a273c8fe253
--- /dev/null
+++ b/clang/test/SemaCXX/cxx03-cxx11-attr.cpp
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 -std=c++03 -fsyntax-only %s
+
+// Ensure that __has_cpp_attribute and argument parsing work in C++03
+
+#if !__has_cpp_attribute(nodiscard)
+# error
+#endif
+
+[[gnu::assume_aligned(4)]] void* g() { return __nullptr; }
diff --git a/clang/test/SemaCXX/restrict-this.cpp b/clang/test/SemaCXX/restrict-this.cpp
new file mode 100644
index 000000000000..e78c8e0d56e2
--- /dev/null
+++ b/clang/test/SemaCXX/restrict-this.cpp
@@ -0,0 +1,69 @@
+// RUN: %clang_cc1 -verify -fsyntax-only %s
+// expected-no-diagnostics
+
+struct C {
+ void f() __restrict {
+ static_assert(__is_same(decltype(this), C *__restrict));
+ (void) [this]() {
+ static_assert(__is_same(decltype(this), C *__restrict));
+ (void) [this]() { static_assert(__is_same(decltype(this), C *__restrict)); };
+
+ // By-value capture means 'this' is now a different object; do not
+ // make it __restrict.
+ (void) [*this]() { static_assert(__is_same(decltype(this), const C *)); };
+ (void) [*this]() mutable { static_assert(__is_same(decltype(this), C *)); };
+ };
+ }
+};
+
+template <typename T> struct TC {
+ void f() __restrict {
+ static_assert(__is_same(decltype(this), TC<int> *__restrict));
+ (void) [this]() {
+ static_assert(__is_same(decltype(this), TC<int> *__restrict));
+ (void) [this]() { static_assert(__is_same(decltype(this), TC<int> *__restrict)); };
+
+ // By-value capture means 'this' is now a different object; do not
+ // make it __restrict.
+ (void) [*this]() { static_assert(__is_same(decltype(this), const TC<int> *)); };
+ (void) [*this]() mutable { static_assert(__is_same(decltype(this), TC<int> *)); };
+ };
+ }
+};
+
+void f() {
+ TC<int>{}.f();
+}
+
+namespace gh18121 {
+struct Foo {
+ void member() __restrict {
+ Foo *__restrict This = this;
+ }
+};
+}
+
+namespace gh42411 {
+struct foo {
+ int v;
+ void f() const __restrict {
+ static_assert(__is_same(decltype((v)), const int&));
+ (void) [this]() { static_assert(__is_same(decltype((v)), const int&)); };
+ }
+};
+}
+
+namespace gh82941 {
+void f(int& x) {
+ (void)x;
+}
+
+class C {
+ int x;
+ void g() __restrict;
+};
+
+void C::g() __restrict {
+ f(this->x);
+}
+}
diff --git a/clang/test/SemaCXX/undefined-internal.cpp b/clang/test/SemaCXX/undefined-internal.cpp
index 790c96c9fe45..054e71b92f93 100644
--- a/clang/test/SemaCXX/undefined-internal.cpp
+++ b/clang/test/SemaCXX/undefined-internal.cpp
@@ -2,11 +2,21 @@
// RUN: %clang_cc1 -fsyntax-only -verify -Wbind-to-temporary-copy -std=c++98 %s
// RUN: %clang_cc1 -fsyntax-only -verify -Wbind-to-temporary-copy -std=c++11 %s
+// RUN: %clang_cc1 -fsyntax-only -verify -Wbind-to-temporary-copy %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -fsyntax-only -verify -Wbind-to-temporary-copy -std=c++98 %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -fsyntax-only -verify -Wbind-to-temporary-copy -std=c++11 %s -fexperimental-new-constant-interpreter
+
+
// Make sure we don't produce invalid IR.
// RUN: %clang_cc1 -emit-llvm-only %s
// RUN: %clang_cc1 -emit-llvm-only -std=c++98 %s
// RUN: %clang_cc1 -emit-llvm-only -std=c++11 %s
+// RUN: %clang_cc1 -emit-llvm-only %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -emit-llvm-only -std=c++98 %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -emit-llvm-only -std=c++11 %s -fexperimental-new-constant-interpreter
+
+
namespace test1 {
static void foo(); // expected-warning {{function 'test1::foo' has internal linkage but is not defined}}
template <class T> static void bar(); // expected-warning {{function 'test1::bar<int>' has internal linkage but is not defined}}
diff --git a/clang/test/SemaCXX/warn-bool-conversion.cpp b/clang/test/SemaCXX/warn-bool-conversion.cpp
index c81d52d864f2..18c35776b17b 100644
--- a/clang/test/SemaCXX/warn-bool-conversion.cpp
+++ b/clang/test/SemaCXX/warn-bool-conversion.cpp
@@ -81,6 +81,33 @@ struct S2 {
bool f5();
bool f6(int);
+#if __cplusplus >= 201103L
+auto f7 = []{};
+auto f8 = [](){};
+
+void foo() {
+ bool b;
+ b = f7; // expected-warning {{address of lambda function pointer conversion operator will always evaluate to 'true'}}
+ b = f8; // expected-warning {{address of lambda function pointer conversion operator will always evaluate to 'true'}}
+ bool is_true = [](){ return true; };
+ // expected-warning@-1{{address of lambda function pointer conversion operator will always evaluate to 'true'}}
+}
+
+template <typename... Ts>
+static bool IsFalse(const Ts&...) { return false; }
+template <typename T>
+static bool IsFalse(const T& p) {
+ bool b;
+ b = f7; // expected-warning {{address of lambda function pointer conversion operator will always evaluate to 'true'}}
+ // Intentionally not warned on because p could be a lambda type in one
+ // instantiation, but a pointer type in another.
+ return p ? false : true;
+}
+
+bool use_instantiation() {
+ return IsFalse([]() { return 0; });
+}
+#endif
void bar() {
bool b;
diff --git a/clang/test/SemaCXX/warn-unused-variables.cpp b/clang/test/SemaCXX/warn-unused-variables.cpp
index b649c7d80893..29e8d08d37d8 100644
--- a/clang/test/SemaCXX/warn-unused-variables.cpp
+++ b/clang/test/SemaCXX/warn-unused-variables.cpp
@@ -1,5 +1,7 @@
// RUN: %clang_cc1 -fsyntax-only -Wunused-variable -Wunused-label -Wno-c++1y-extensions -verify %s
-// RUN: %clang_cc1 -fsyntax-only -Wunused-variable -Wunused-label -Wno-c++1y-extensions -verify -std=gnu++11 %s
+// RUN: %clang_cc1 -fsyntax-only -Wunused-variable -Wunused-label -Wno-c++1y-extensions -verify=expected,cxx98-14 -std=gnu++11 %s
+// RUN: %clang_cc1 -fsyntax-only -Wunused-variable -Wunused-label -Wno-c++1y-extensions -verify=expected,cxx98-14 -std=gnu++14 %s
+// RUN: %clang_cc1 -fsyntax-only -Wunused-variable -Wunused-label -Wno-c++1y-extensions -verify -std=gnu++17 %s
template<typename T> void f() {
T t;
t = 17;
@@ -183,7 +185,8 @@ void foo(int size) {
NonTriviallyDestructible array[2]; // no warning
NonTriviallyDestructible nestedArray[2][2]; // no warning
- Foo fooScalar = 1; // expected-warning {{unused variable 'fooScalar'}}
+ // Copy initialzation gives warning before C++17
+ Foo fooScalar = 1; // cxx98-14-warning {{unused variable 'fooScalar'}}
Foo fooArray[] = {1,2}; // expected-warning {{unused variable 'fooArray'}}
Foo fooNested[2][2] = { {1,2}, {3,4} }; // expected-warning {{unused variable 'fooNested'}}
}
@@ -297,3 +300,29 @@ void RAIIWrapperTest() {
}
} // namespace gh54489
+
+// Ensure that -Wunused-variable does not emit warning
+// on copy constructors with side effects (C++17 and later)
+#if __cplusplus >= 201703L
+namespace gh79518 {
+
+struct S {
+ S(int);
+};
+
+// With an initializer list
+struct A {
+ int x;
+ A(int x) : x(x) {}
+};
+
+void foo() {
+ S s(0); // no warning
+ S s2 = 0; // no warning
+ S s3{0}; // no warning
+
+ A a = 1; // no warning
+}
+
+} // namespace gh79518
+#endif
diff --git a/clang/test/SemaHLSL/BuiltIns/dot-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/dot-errors.hlsl
index 54d093aa7ce3..59eb9482b9ef 100644
--- a/clang/test/SemaHLSL/BuiltIns/dot-errors.hlsl
+++ b/clang/test/SemaHLSL/BuiltIns/dot-errors.hlsl
@@ -1,109 +1,110 @@
// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -verify-ignore-unexpected
-float test_no_second_arg ( float2 p0) {
- return __builtin_hlsl_dot ( p0 );
+float test_no_second_arg(float2 p0) {
+ return __builtin_hlsl_dot(p0);
// expected-error@-1 {{too few arguments to function call, expected 2, have 1}}
}
-float test_too_many_arg ( float2 p0) {
- return __builtin_hlsl_dot ( p0, p0, p0 );
+float test_too_many_arg(float2 p0) {
+ return __builtin_hlsl_dot(p0, p0, p0);
// expected-error@-1 {{too many arguments to function call, expected 2, have 3}}
}
-float test_dot_no_second_arg ( float2 p0) {
- return dot ( p0 );
+float test_dot_no_second_arg(float2 p0) {
+ return dot(p0);
// expected-error@-1 {{no matching function for call to 'dot'}}
}
-float test_dot_vector_size_mismatch ( float3 p0, float2 p1 ) {
- return dot ( p0, p1 );
+float test_dot_vector_size_mismatch(float3 p0, float2 p1) {
+ return dot(p0, p1);
// expected-warning@-1 {{implicit conversion truncates vector: 'float3' (aka 'vector<float, 3>') to 'float __attribute__((ext_vector_type(2)))' (vector of 2 'float' values)}}
}
-float test_dot_builtin_vector_size_mismatch ( float3 p0, float2 p1 ) {
- return __builtin_hlsl_dot ( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must have the same type}}
+float test_dot_builtin_vector_size_mismatch(float3 p0, float2 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must have the same type}}
}
-float test_dot_scalar_mismatch ( float p0, int p1 ) {
- return dot ( p0, p1 );
+float test_dot_scalar_mismatch(float p0, int p1) {
+ return dot(p0, p1);
// expected-error@-1 {{call to 'dot' is ambiguous}}
}
-float test_dot_element_type_mismatch ( int2 p0, float2 p1 ) {
- return dot ( p0, p1 );
+float test_dot_element_type_mismatch(int2 p0, float2 p1) {
+ return dot(p0, p1);
// expected-error@-1 {{call to 'dot' is ambiguous}}
}
//NOTE: for all the *_promotion we are intentionally not handling type promotion in builtins
-float test_builtin_dot_vec_int_to_float_promotion ( int2 p0, float2 p1 ) {
- return __builtin_hlsl_dot ( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must have the same type}}
+float test_builtin_dot_vec_int_to_float_promotion(int2 p0, float2 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must have the same type}}
}
-int64_t test_builtin_dot_vec_int_to_int64_promotion( int64_t2 p0, int2 p1 ) {
- return __builtin_hlsl_dot( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must have the same type}}
+int64_t test_builtin_dot_vec_int_to_int64_promotion(int64_t2 p0, int2 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must have the same type}}
}
-float test_builtin_dot_vec_half_to_float_promotion( float2 p0, half2 p1 ) {
- return __builtin_hlsl_dot( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must have the same type}}
+float test_builtin_dot_vec_half_to_float_promotion(float2 p0, half2 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must have the same type}}
}
#ifdef __HLSL_ENABLE_16_BIT
-float test_builtin_dot_vec_int16_to_float_promotion( float2 p0, int16_t2 p1 ) {
- return __builtin_hlsl_dot( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must have the same type}}
+float test_builtin_dot_vec_int16_to_float_promotion(float2 p0, int16_t2 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must have the same type}}
}
-half test_builtin_dot_vec_int16_to_half_promotion( half2 p0, int16_t2 p1 ) {
- return __builtin_hlsl_dot( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must have the same type}}
+half test_builtin_dot_vec_int16_to_half_promotion(half2 p0, int16_t2 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must have the same type}}
}
-int test_builtin_dot_vec_int16_to_int_promotion( int2 p0, int16_t2 p1 ) {
- return __builtin_hlsl_dot( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must have the same type}}
+int test_builtin_dot_vec_int16_to_int_promotion(int2 p0, int16_t2 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must have the same type}}
}
-int64_t test_builtin_dot_vec_int16_to_int64_promotion( int64_t2 p0, int16_t2 p1 ) {
- return __builtin_hlsl_dot( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must have the same type}}
+int64_t test_builtin_dot_vec_int16_to_int64_promotion(int64_t2 p0,
+ int16_t2 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must have the same type}}
}
#endif
-float test_builtin_dot_float2_splat ( float p0, float2 p1 ) {
- return __builtin_hlsl_dot( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must be vectors}}
+float test_builtin_dot_float2_splat(float p0, float2 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must be vectors}}
}
-float test_builtin_dot_float3_splat ( float p0, float3 p1 ) {
- return __builtin_hlsl_dot( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must be vectors}}
+float test_builtin_dot_float3_splat(float p0, float3 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must be vectors}}
}
-float test_builtin_dot_float4_splat ( float p0, float4 p1 ) {
- return __builtin_hlsl_dot( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must be vectors}}
+float test_builtin_dot_float4_splat(float p0, float4 p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must be vectors}}
}
-float test_dot_float2_int_splat ( float2 p0, int p1 ) {
- return __builtin_hlsl_dot ( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must be vectors}}
+float test_dot_float2_int_splat(float2 p0, int p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must be vectors}}
}
-float test_dot_float3_int_splat ( float3 p0, int p1 ) {
- return __builtin_hlsl_dot ( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must be vectors}}
+float test_dot_float3_int_splat(float3 p0, int p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must be vectors}}
}
-float test_builtin_dot_int_vect_to_float_vec_promotion ( int2 p0, float p1 ) {
- return __builtin_hlsl_dot ( p0, p1 );
- // expected-error@-1 {{first two arguments to '__builtin_hlsl_dot' must be vectors}}
+float test_builtin_dot_int_vect_to_float_vec_promotion(int2 p0, float p1) {
+ return __builtin_hlsl_dot(p0, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_dot' must be vectors}}
}
-int test_builtin_dot_bool_type_promotion ( bool p0, bool p1 ) {
- return __builtin_hlsl_dot ( p0, p1 );
+int test_builtin_dot_bool_type_promotion(bool p0, bool p1) {
+ return __builtin_hlsl_dot(p0, p1);
// expected-error@-1 {{1st argument must be a vector, integer or floating point type (was 'bool')}}
}
diff --git a/clang/test/SemaHLSL/BuiltIns/frac-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/frac-errors.hlsl
new file mode 100644
index 000000000000..06dbdf0a68df
--- /dev/null
+++ b/clang/test/SemaHLSL/BuiltIns/frac-errors.hlsl
@@ -0,0 +1,27 @@
+
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -verify-ignore-unexpected
+
+float test_too_few_arg() {
+ return __builtin_hlsl_elementwise_frac();
+ // expected-error@-1 {{too few arguments to function call, expected 1, have 0}}
+}
+
+float2 test_too_many_arg(float2 p0) {
+ return __builtin_hlsl_elementwise_frac(p0, p0);
+ // expected-error@-1 {{too many arguments to function call, expected 1, have 2}}
+}
+
+float builtin_bool_to_float_type_promotion(bool p1) {
+ return __builtin_hlsl_elementwise_frac(p1);
+ // expected-error@-1 {{1st argument must be a vector, integer or floating point type (was 'bool')}}
+}
+
+float builtin_frac_int_to_float_promotion(int p1) {
+ return __builtin_hlsl_elementwise_frac(p1);
+ // expected-error@-1 {{passing 'int' to parameter of incompatible type 'float'}}
+}
+
+float2 builtin_frac_int2_to_float2_promotion(int2 p1) {
+ return __builtin_hlsl_elementwise_frac(p1);
+ // expected-error@-1 {{passing 'int2' (aka 'vector<int, 2>') to parameter of incompatible type '__attribute__((__vector_size__(2 * sizeof(float)))) float' (vector of 2 'float' values)}}
+}
diff --git a/clang/test/SemaHLSL/BuiltIns/lerp-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/lerp-errors.hlsl
new file mode 100644
index 000000000000..f6ce87e7c33e
--- /dev/null
+++ b/clang/test/SemaHLSL/BuiltIns/lerp-errors.hlsl
@@ -0,0 +1,96 @@
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -verify-ignore-unexpected
+
+float2 test_no_second_arg(float2 p0) {
+ return __builtin_hlsl_lerp(p0);
+ // expected-error@-1 {{too few arguments to function call, expected 3, have 1}}
+}
+
+float2 test_no_third_arg(float2 p0) {
+ return __builtin_hlsl_lerp(p0, p0);
+ // expected-error@-1 {{too few arguments to function call, expected 3, have 2}}
+}
+
+float2 test_too_many_arg(float2 p0) {
+ return __builtin_hlsl_lerp(p0, p0, p0, p0);
+ // expected-error@-1 {{too many arguments to function call, expected 3, have 4}}
+}
+
+float2 test_lerp_no_second_arg(float2 p0) {
+ return lerp(p0);
+ // expected-error@-1 {{no matching function for call to 'lerp'}}
+}
+
+float2 test_lerp_vector_size_mismatch(float3 p0, float2 p1) {
+ return lerp(p0, p0, p1);
+ // expected-warning@-1 {{implicit conversion truncates vector: 'float3' (aka 'vector<float, 3>') to 'float __attribute__((ext_vector_type(2)))' (vector of 2 'float' values)}}
+}
+
+float2 test_lerp_builtin_vector_size_mismatch(float3 p0, float2 p1) {
+ return __builtin_hlsl_lerp(p0, p1, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_lerp' must have the same type}}
+}
+
+float test_lerp_scalar_mismatch(float p0, half p1) {
+ return lerp(p1, p0, p1);
+ // expected-error@-1 {{call to 'lerp' is ambiguous}}
+}
+
+float2 test_lerp_element_type_mismatch(half2 p0, float2 p1) {
+ return lerp(p1, p0, p1);
+ // expected-error@-1 {{call to 'lerp' is ambiguous}}
+}
+
+float2 test_builtin_lerp_float2_splat(float p0, float2 p1) {
+ return __builtin_hlsl_lerp(p0, p1, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+}
+
+float3 test_builtin_lerp_float3_splat(float p0, float3 p1) {
+ return __builtin_hlsl_lerp(p0, p1, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+}
+
+float4 test_builtin_lerp_float4_splat(float p0, float4 p1) {
+ return __builtin_hlsl_lerp(p0, p1, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+}
+
+float2 test_lerp_float2_int_splat(float2 p0, int p1) {
+ return __builtin_hlsl_lerp(p0, p1, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+}
+
+float3 test_lerp_float3_int_splat(float3 p0, int p1) {
+ return __builtin_hlsl_lerp(p0, p1, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+}
+
+float2 test_builtin_lerp_int_vect_to_float_vec_promotion(int2 p0, float p1) {
+ return __builtin_hlsl_lerp(p0, p1, p1);
+ // expected-error@-1 {{all arguments to '__builtin_hlsl_lerp' must be vectors}}
+}
+
+float test_builtin_lerp_bool_type_promotion(bool p0) {
+ return __builtin_hlsl_lerp(p0, p0, p0);
+ // expected-error@-1 {{1st argument must be a floating point type (was 'bool')}}
+}
+
+float builtin_bool_to_float_type_promotion(float p0, bool p1) {
+ return __builtin_hlsl_lerp(p0, p0, p1);
+ // expected-error@-1 {{3rd argument must be a floating point type (was 'bool')}}
+}
+
+float builtin_bool_to_float_type_promotion2(bool p0, float p1) {
+ return __builtin_hlsl_lerp(p1, p0, p1);
+ // expected-error@-1 {{2nd argument must be a floating point type (was 'bool')}}
+}
+
+float builtin_lerp_int_to_float_promotion(float p0, int p1) {
+ return __builtin_hlsl_lerp(p0, p0, p1);
+ // expected-error@-1 {{3rd argument must be a floating point type (was 'int')}}
+}
+
+float4 test_lerp_int4(int4 p0, int4 p1, int4 p2) {
+ return __builtin_hlsl_lerp(p0, p1, p2);
+ // expected-error@-1 {{1st argument must be a floating point type (was 'int4' (aka 'vector<int, 4>'))}}
+} \ No newline at end of file
diff --git a/clang/test/SemaHLSL/BuiltIns/round-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/round-errors.hlsl
new file mode 100644
index 000000000000..fed4573063ac
--- /dev/null
+++ b/clang/test/SemaHLSL/BuiltIns/round-errors.hlsl
@@ -0,0 +1,27 @@
+
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -verify-ignore-unexpected
+
+float test_too_few_arg() {
+ return __builtin_elementwise_round();
+ // expected-error@-1 {{too few arguments to function call, expected 1, have 0}}
+}
+
+float2 test_too_many_arg(float2 p0) {
+ return __builtin_elementwise_round(p0, p0);
+ // expected-error@-1 {{too many arguments to function call, expected 1, have 2}}
+}
+
+float builtin_bool_to_float_type_promotion(bool p1) {
+ return __builtin_elementwise_round(p1);
+ // expected-error@-1 {{1st argument must be a vector, integer or floating point type (was 'bool')}}
+}
+
+float builtin_round_int_to_float_promotion(int p1) {
+ return __builtin_elementwise_round(p1);
+ // expected-error@-1 {{1st argument must be a floating point type (was 'int')}}
+}
+
+float2 builtin_round_int2_to_float2_promotion(int2 p1) {
+ return __builtin_elementwise_round(p1);
+ // expected-error@-1 {{1st argument must be a floating point type (was 'int2' (aka 'vector<int, 2>'))}}
+}
diff --git a/clang/test/SemaHLSL/OverloadResolutionBugs.hlsl b/clang/test/SemaHLSL/OverloadResolutionBugs.hlsl
index 8464f1c1a7c2..c13cb299127a 100644
--- a/clang/test/SemaHLSL/OverloadResolutionBugs.hlsl
+++ b/clang/test/SemaHLSL/OverloadResolutionBugs.hlsl
@@ -7,73 +7,67 @@
void Fn3(double2 D);
void Fn3(float2 F);
-void Call3(half2 H) {
- Fn3(H);
-}
+void Call3(half2 H) { Fn3(H); }
void Fn5(double2 D);
-void Call5(half2 H) {
- Fn5(H);
-}
+void Call5(half2 H) { Fn5(H); }
void Fn4(int64_t2 L);
void Fn4(int2 I);
-void Call4(int16_t H) {
- Fn4(H);
-}
+void Call4(int16_t H) { Fn4(H); }
-int test_builtin_dot_bool_type_promotion ( bool p0, bool p1 ) {
- return dot ( p0, p1 );
+int test_builtin_dot_bool_type_promotion(bool p0, bool p1) {
+ return dot(p0, p1);
}
-float test_dot_scalar_mismatch ( float p0, int p1 ) {
- return dot ( p0, p1 );
-}
+float test_dot_scalar_mismatch(float p0, int p1) { return dot(p0, p1); }
-float test_dot_element_type_mismatch ( int2 p0, float2 p1 ) {
- return dot ( p0, p1 );
-}
+float test_dot_element_type_mismatch(int2 p0, float2 p1) { return dot(p0, p1); }
-float test_builtin_dot_vec_int_to_float_promotion ( int2 p0, float2 p1 ) {
- return dot ( p0, p1 );
+float test_builtin_dot_vec_int_to_float_promotion(int2 p0, float2 p1) {
+ return dot(p0, p1);
}
-int64_t test_builtin_dot_vec_int_to_int64_promotion( int64_t2 p0, int2 p1 ) {
- return dot ( p0, p1 );
+int64_t test_builtin_dot_vec_int_to_int64_promotion(int64_t2 p0, int2 p1) {
+ return dot(p0, p1);
}
-float test_builtin_dot_vec_half_to_float_promotion( float2 p0, half2 p1 ) {
- return dot( p0, p1 );
+float test_builtin_dot_vec_half_to_float_promotion(float2 p0, half2 p1) {
+ return dot(p0, p1);
}
-float test_builtin_dot_vec_int16_to_float_promotion( float2 p0, int16_t2 p1 ) {
- return dot( p0, p1 );
+float test_builtin_dot_vec_int16_to_float_promotion(float2 p0, int16_t2 p1) {
+ return dot(p0, p1);
}
-half test_builtin_dot_vec_int16_to_half_promotion( half2 p0, int16_t2 p1 ) {
- return dot( p0, p1 );
+half test_builtin_dot_vec_int16_to_half_promotion(half2 p0, int16_t2 p1) {
+ return dot(p0, p1);
}
-int test_builtin_dot_vec_int16_to_int_promotion( int2 p0, int16_t2 p1 ) {
- return dot( p0, p1 );
+int test_builtin_dot_vec_int16_to_int_promotion(int2 p0, int16_t2 p1) {
+ return dot(p0, p1);
}
-int64_t test_builtin_dot_vec_int16_to_int64_promotion( int64_t2 p0, int16_t2 p1 ) {
- return dot( p0, p1 );
+int64_t test_builtin_dot_vec_int16_to_int64_promotion(int64_t2 p0,
+ int16_t2 p1) {
+ return dot(p0, p1);
}
+float4 test_frac_int4(int4 p0) { return frac(p0); }
+
+float test_frac_int(int p0) { return frac(p0); }
+
+float test_frac_bool(bool p0) { return frac(p0); }
+
// https://github.com/llvm/llvm-project/issues/81049
// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.2-library %s -emit-llvm -disable-llvm-passes \
// RUN: -o - | FileCheck %s --check-prefix=NO_HALF
-half sqrt_h(half x)
-{
- return sqrt(x);
-}
+half sqrt_h(half x) { return sqrt(x); }
// NO_HALF: define noundef float @"?sqrt_h@@YA$halff@$halff@@Z"(
// NO_HALF: call float @llvm.sqrt.f32(float %0)
diff --git a/clang/test/SemaHLSL/VectorOverloadResolution.hlsl b/clang/test/SemaHLSL/VectorOverloadResolution.hlsl
index 81fedc2de315..2ea7d14e80ee 100644
--- a/clang/test/SemaHLSL/VectorOverloadResolution.hlsl
+++ b/clang/test/SemaHLSL/VectorOverloadResolution.hlsl
@@ -40,7 +40,7 @@ void Fn3( int64_t2 p0);
// CHECK-NEXT: ImplicitCastExpr {{.*}} 'half2':'half __attribute__((ext_vector_type(2)))' <LValueToRValue>
// CHECK-NEXT: DeclRefExpr {{.*}} 'half2':'half __attribute__((ext_vector_type(2)))' lvalue ParmVar {{.*}} 'p0' 'half2':'half __attribute__((ext_vector_type(2)))'
// CHECKIR-LABEL: Call3
-// CHECKIR: %conv = fptosi <2 x half> {{.*}} to <2 x i64>
+// CHECKIR: {{.*}} = fptosi <2 x half> {{.*}} to <2 x i64>
void Call3(half2 p0) {
Fn3(p0);
}
diff --git a/clang/test/SemaOpenACC/no-branch-in-out.c b/clang/test/SemaOpenACC/no-branch-in-out.c
index f8fb40a1ca8f..eccc64324500 100644
--- a/clang/test/SemaOpenACC/no-branch-in-out.c
+++ b/clang/test/SemaOpenACC/no-branch-in-out.c
@@ -113,3 +113,227 @@ void Return() {
}
}
}
+
+void Goto() {
+ int j;
+#pragma acc parallel // expected-note{{invalid branch out of OpenACC Compute Construct}}
+ while(j) {
+ if (j <3)
+ goto LABEL; // expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+LABEL:
+ {}
+
+ goto LABEL_IN; // expected-error{{cannot jump from this goto statement to its label}}
+
+#pragma acc parallel // expected-note{{invalid branch into OpenACC Compute Construct}}
+ for(int i = 0; i < 5; ++i) {
+LABEL_IN:
+ {}
+ }
+
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+LABEL_NOT_CALLED:
+ {}
+ }
+
+#pragma acc parallel
+ {
+ goto ANOTHER_LOOP; // expected-error{{cannot jump from this goto statement to its label}}
+
+ }
+#pragma acc parallel// expected-note{{invalid branch into OpenACC Compute Construct}}
+
+ {
+ANOTHER_LOOP:
+ {}
+ }
+
+#pragma acc parallel
+ {
+ while (j) {
+ --j;
+ if (j < 3)
+ goto LABEL2;
+
+ if (j > 4)
+ break;
+ }
+LABEL2:
+ {}
+ }
+
+#pragma acc parallel
+ do {
+ if (j < 3)
+ goto LABEL3;
+
+ if (j > 4)
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+
+LABEL3:
+ {}
+ } while (j);
+
+LABEL4:
+ {}
+#pragma acc parallel// expected-note{{invalid branch out of OpenACC Compute Construct}}
+ {
+ goto LABEL4;// expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+#pragma acc parallel// expected-note{{invalid branch into OpenACC Compute Construct}}
+
+ {
+LABEL5:
+ {}
+ }
+
+ {
+ goto LABEL5;// expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+#pragma acc parallel
+ {
+LABEL6:
+ {}
+ goto LABEL6;
+
+ }
+
+#pragma acc parallel
+ goto LABEL7; // expected-error{{cannot jump from this goto statement to its label}}
+#pragma acc parallel// expected-note{{invalid branch into OpenACC Compute Construct}}
+ {
+LABEL7:{}
+ }
+
+#pragma acc parallel
+ LABEL8:{}
+#pragma acc parallel// expected-note{{invalid branch out of OpenACC Compute Construct}}
+ {
+ goto LABEL8;// expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+
+#pragma acc parallel// expected-note{{invalid branch into OpenACC Compute Construct}}
+ {
+LABEL9:{}
+ }
+
+ ({goto LABEL9;});// expected-error{{cannot jump from this goto statement to its label}}
+
+#pragma acc parallel// expected-note{{invalid branch out of OpenACC Compute Construct}}
+ {
+ ({goto LABEL10;});// expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+LABEL10:{}
+
+ ({goto LABEL11;});// expected-error{{cannot jump from this goto statement to its label}}
+#pragma acc parallel// expected-note{{invalid branch into OpenACC Compute Construct}}
+ {
+LABEL11:{}
+ }
+
+LABEL12:{}
+#pragma acc parallel// expected-note{{invalid branch out of OpenACC Compute Construct}}
+ {
+ ({goto LABEL12;});// expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+#pragma acc parallel
+ {
+ ({goto LABEL13;});
+LABEL13:{}
+ }
+
+#pragma acc parallel
+ {
+ LABEL14:{}
+ ({goto LABEL14;});
+ }
+}
+
+void IndirectGoto1() {
+ void* ptr;
+#pragma acc parallel
+ {
+LABEL1:{}
+ ptr = &&LABEL1;
+
+ goto *ptr;
+
+ }
+}
+
+void IndirectGoto2() {
+ void* ptr;
+LABEL2:{} // #GOTOLBL2
+ ptr = &&LABEL2;
+#pragma acc parallel // #GOTOPAR2
+ {
+// expected-error@+3{{cannot jump from this indirect goto statement to one of its possible targets}}
+// expected-note@#GOTOLBL2{{possible target of indirect goto statement}}
+// expected-note@#GOTOPAR2{{invalid branch out of OpenACC Compute Construct}}
+ goto *ptr;
+ }
+}
+
+void IndirectGoto3() {
+ void* ptr;
+#pragma acc parallel // #GOTOPAR3
+ {
+LABEL3:{} // #GOTOLBL3
+ ptr = &&LABEL3;
+ }
+// expected-error@+3{{cannot jump from this indirect goto statement to one of its possible targets}}
+// expected-note@#GOTOLBL3{{possible target of indirect goto statement}}
+// expected-note@#GOTOPAR3{{invalid branch into OpenACC Compute Construct}}
+ goto *ptr;
+}
+
+void IndirectGoto4() {
+ void* ptr;
+#pragma acc parallel // #GOTOPAR4
+ {
+LABEL4:{}
+ ptr = &&LABEL4;
+// expected-error@+3{{cannot jump from this indirect goto statement to one of its possible targets}}
+// expected-note@#GOTOLBL5{{possible target of indirect goto statement}}
+// expected-note@#GOTOPAR4{{invalid branch out of OpenACC Compute Construct}}
+ goto *ptr;
+ }
+LABEL5:// #GOTOLBL5
+
+ ptr=&&LABEL5;
+}
+
+void DuffsDevice() {
+ int j;
+ switch (j) {
+#pragma acc parallel
+ for(int i =0; i < 5; ++i) {
+ case 0: // expected-error{{invalid branch into OpenACC Compute Construct}}
+ {}
+ }
+ }
+
+ switch (j) {
+#pragma acc parallel
+ for(int i =0; i < 5; ++i) {
+ default: // expected-error{{invalid branch into OpenACC Compute Construct}}
+ {}
+ }
+ }
+
+ switch (j) {
+#pragma acc parallel
+ for(int i =0; i < 5; ++i) {
+ case 'a' ... 'z': // expected-error{{invalid branch into OpenACC Compute Construct}}
+ {}
+ }
+ }
+}
diff --git a/clang/test/SemaOpenACC/no-branch-in-out.cpp b/clang/test/SemaOpenACC/no-branch-in-out.cpp
index 232e372cedd3..6ee4553cd303 100644
--- a/clang/test/SemaOpenACC/no-branch-in-out.cpp
+++ b/clang/test/SemaOpenACC/no-branch-in-out.cpp
@@ -15,3 +15,188 @@ void ReturnTest() {
}
}
}
+
+template<typename T>
+void BreakContinue() {
+#pragma acc parallel
+ for(int i =0; i < 5; ++i) {
+ switch(i) {
+ case 0:
+ break; // leaves switch, not 'for'.
+ default:
+ i +=2;
+ break;
+ }
+ if (i == 2)
+ continue;
+
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+
+ int j;
+ switch(j) {
+ case 0:
+#pragma acc parallel
+ {
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+ case 1:
+#pragma acc parallel
+ {
+ }
+ break;
+ }
+
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+ if (i > 1)
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+
+#pragma acc parallel
+ switch(j) {
+ case 1:
+ break;
+ }
+
+#pragma acc parallel
+ {
+ for(int i = 1; i < 100; i++) {
+ if (i > 4)
+ break;
+ }
+ }
+
+ for (int i =0; i < 5; ++i) {
+#pragma acc parallel
+ {
+ continue; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+ }
+
+#pragma acc parallel
+ for (int i =0; i < 5; ++i) {
+ continue;
+ }
+
+#pragma acc parallel
+ for (int i =0; i < 5; ++i) {
+ {
+ continue;
+ }
+ }
+
+ for (int i =0; i < 5; ++i) {
+#pragma acc parallel
+ {
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+ }
+
+#pragma acc parallel
+ while (j) {
+ --j;
+ if (j > 4)
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+
+#pragma acc parallel
+ do {
+ --j;
+ if (j > 4)
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ } while (j );
+}
+
+template<typename T>
+void DuffsDevice() {
+ int j;
+ switch (j) {
+#pragma acc parallel
+ for(int i =0; i < 5; ++i) {
+ case 0: // expected-error{{invalid branch into OpenACC Compute Construct}}
+ {}
+ }
+ }
+
+ switch (j) {
+#pragma acc parallel
+ for(int i =0; i < 5; ++i) {
+ default: // expected-error{{invalid branch into OpenACC Compute Construct}}
+ {}
+ }
+ }
+
+ switch (j) {
+#pragma acc parallel
+ for(int i =0; i < 5; ++i) {
+ case 'a' ... 'z': // expected-error{{invalid branch into OpenACC Compute Construct}}
+ {}
+ }
+ }
+}
+
+void Exceptions() {
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+ throw 5; // expected-error{{invalid throw out of OpenACC Compute Construct}}
+ }
+
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+ throw; // expected-error{{invalid throw out of OpenACC Compute Construct}}
+ }
+
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+ try {
+ throw 5;
+ } catch(float f) {
+ }
+ }
+
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+ try {
+ throw 5;
+ } catch(int f) {
+ }
+ }
+
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+ try {
+ throw 5;
+ } catch(...) {
+ }
+ }
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+ try {
+ throw;
+ } catch(...) {
+ }
+ }
+
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+ try {
+ throw;
+ } catch(...) {
+ throw; // expected-error{{invalid throw out of OpenACC Compute Construct}}
+ }
+ }
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+ try {
+ throw;
+ } catch(int f) {
+ throw; // expected-error{{invalid throw out of OpenACC Compute Construct}}
+ }
+ }
+}
+
+void Instantiate() {
+ BreakContinue<int>();
+ DuffsDevice<int>();
+}
diff --git a/clang/tools/clang-installapi/CMakeLists.txt b/clang/tools/clang-installapi/CMakeLists.txt
index b8384c92c104..e05f4eac3ad1 100644
--- a/clang/tools/clang-installapi/CMakeLists.txt
+++ b/clang/tools/clang-installapi/CMakeLists.txt
@@ -14,6 +14,7 @@ add_clang_tool(clang-installapi
clang_target_link_libraries(clang-installapi
PRIVATE
+ clangAST
clangInstallAPI
clangBasic
clangDriver
diff --git a/clang/tools/clang-installapi/ClangInstallAPI.cpp b/clang/tools/clang-installapi/ClangInstallAPI.cpp
index fc23ffd7ae6b..c6da1c80a673 100644
--- a/clang/tools/clang-installapi/ClangInstallAPI.cpp
+++ b/clang/tools/clang-installapi/ClangInstallAPI.cpp
@@ -12,12 +12,14 @@
//===----------------------------------------------------------------------===//
#include "Options.h"
-#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Driver/Tool.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
-#include "clang/InstallAPI/Context.h"
+#include "clang/InstallAPI/Frontend.h"
+#include "clang/Tooling/Tooling.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/CommandLine.h"
@@ -27,7 +29,9 @@
#include "llvm/Support/Process.h"
#include "llvm/Support/Signals.h"
#include "llvm/TargetParser/Host.h"
+#include "llvm/TextAPI/RecordVisitor.h"
#include "llvm/TextAPI/TextAPIWriter.h"
+#include <memory>
using namespace clang;
using namespace clang::installapi;
@@ -35,6 +39,36 @@ using namespace clang::driver::options;
using namespace llvm::opt;
using namespace llvm::MachO;
+static bool runFrontend(StringRef ProgName, bool Verbose,
+ InstallAPIContext &Ctx,
+ llvm::vfs::InMemoryFileSystem *FS,
+ const ArrayRef<std::string> InitialArgs) {
+
+ std::unique_ptr<llvm::MemoryBuffer> ProcessedInput = createInputBuffer(Ctx);
+ // Skip invoking cc1 when there are no header inputs.
+ if (!ProcessedInput)
+ return true;
+
+ if (Verbose)
+ llvm::errs() << getName(Ctx.Type) << " Headers:\n"
+ << ProcessedInput->getBuffer() << "\n\n";
+
+ std::string InputFile = ProcessedInput->getBufferIdentifier().str();
+ FS->addFile(InputFile, /*ModTime=*/0, std::move(ProcessedInput));
+ // Reconstruct arguments with unique values like target triple or input
+ // headers.
+ std::vector<std::string> Args = {ProgName.data(), "-target",
+ Ctx.Slice->getTriple().str().c_str()};
+ llvm::copy(InitialArgs, std::back_inserter(Args));
+ Args.push_back(InputFile);
+
+ // Create & run invocation.
+ clang::tooling::ToolInvocation Invocation(
+ std::move(Args), std::make_unique<InstallAPIAction>(Ctx), Ctx.FM);
+
+ return Invocation.run();
+}
+
static bool run(ArrayRef<const char *> Args, const char *ProgName) {
// Setup Diagnostics engine.
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
@@ -48,15 +82,20 @@ static bool run(ArrayRef<const char *> Args, const char *ProgName) {
new clang::DiagnosticIDs(), DiagOpts.get(),
new clang::TextDiagnosticPrinter(llvm::errs(), DiagOpts.get()));
- // Create file manager for all file operations.
+ // Create file manager for all file operations and holding in-memory generated
+ // inputs.
+ llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
+ new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new llvm::vfs::InMemoryFileSystem);
+ OverlayFileSystem->pushOverlay(InMemoryFileSystem);
IntrusiveRefCntPtr<clang::FileManager> FM(
- new FileManager(clang::FileSystemOptions()));
+ new FileManager(clang::FileSystemOptions(), OverlayFileSystem));
// Set up driver to parse input arguments.
auto DriverArgs = llvm::ArrayRef(Args).slice(1);
clang::driver::Driver Driver(ProgName, llvm::sys::getDefaultTargetTriple(),
*Diag, "clang installapi tool");
- Driver.setInstalledDir(llvm::sys::path::parent_path(ProgName));
auto TargetAndMode =
clang::driver::ToolChain::getTargetAndModeFromProgramName(ProgName);
Driver.setTargetAndMode(TargetAndMode);
@@ -71,7 +110,10 @@ static bool run(ArrayRef<const char *> Args, const char *ProgName) {
Options Opts(*Diag, FM.get(), ArgList);
if (Diag->hasErrorOccurred())
return EXIT_FAILURE;
+
InstallAPIContext Ctx = Opts.createContext();
+ if (Diag->hasErrorOccurred())
+ return EXIT_FAILURE;
// Set up compilation.
std::unique_ptr<CompilerInstance> CI(new CompilerInstance());
@@ -80,6 +122,23 @@ static bool run(ArrayRef<const char *> Args, const char *ProgName) {
if (!CI->hasDiagnostics())
return EXIT_FAILURE;
+ // Execute and gather AST results.
+ // An invocation is ran for each unique target triple and for each header
+ // access level.
+ llvm::MachO::Records FrontendResults;
+ for (const auto &[Targ, Trip] : Opts.DriverOpts.Targets) {
+ for (const HeaderType Type :
+ {HeaderType::Public, HeaderType::Private, HeaderType::Project}) {
+ Ctx.Slice = std::make_shared<FrontendRecordsSlice>(Trip);
+ Ctx.Type = Type;
+ if (!runFrontend(ProgName, Opts.DriverOpts.Verbose, Ctx,
+ InMemoryFileSystem.get(), Opts.getClangFrontendArgs()))
+ return EXIT_FAILURE;
+ FrontendResults.emplace_back(std::move(Ctx.Slice));
+ }
+ }
+
+ // After symbols have been collected, prepare to write output.
auto Out = CI->createOutputFile(Ctx.OutputLoc, /*Binary=*/false,
/*RemoveFileOnSignal=*/false,
/*UseTemporary=*/false,
@@ -88,7 +147,13 @@ static bool run(ArrayRef<const char *> Args, const char *ProgName) {
return EXIT_FAILURE;
// Assign attributes for serialization.
- InterfaceFile IF;
+ auto Symbols = std::make_unique<SymbolSet>();
+ for (const auto &FR : FrontendResults) {
+ SymbolConverter Converter(Symbols.get(), FR->getTarget());
+ FR->visit(Converter);
+ }
+
+ InterfaceFile IF(std::move(Symbols));
for (const auto &TargetInfo : Opts.DriverOpts.Targets) {
IF.addTarget(TargetInfo.first);
IF.setFromBinaryAttrs(Ctx.BA, TargetInfo.first);
diff --git a/clang/tools/clang-installapi/Options.cpp b/clang/tools/clang-installapi/Options.cpp
index 562a643edfcf..7d45e999448d 100644
--- a/clang/tools/clang-installapi/Options.cpp
+++ b/clang/tools/clang-installapi/Options.cpp
@@ -9,6 +9,7 @@
#include "Options.h"
#include "clang/Driver/Driver.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/InstallAPI/FileList.h"
#include "llvm/Support/Program.h"
#include "llvm/TargetParser/Host.h"
@@ -68,6 +69,8 @@ bool Options::processDriverOptions(InputArgList &Args) {
}
}
+ DriverOpts.Verbose = Args.hasArgNoClaim(OPT_v);
+
return true;
}
@@ -104,10 +107,21 @@ Options::Options(DiagnosticsEngine &Diag, FileManager *FM,
if (!processLinkerOptions(ArgList))
return;
+
+ /// Any remaining arguments should be handled by invoking the clang frontend.
+ for (const Arg *A : ArgList) {
+ if (A->isClaimed())
+ continue;
+ FrontendArgs.emplace_back(A->getAsString(ArgList));
+ }
+ FrontendArgs.push_back("-fsyntax-only");
}
InstallAPIContext Options::createContext() {
InstallAPIContext Ctx;
+ Ctx.FM = FM;
+ Ctx.Diags = Diags;
+
// InstallAPI requires two level namespacing.
Ctx.BA.TwoLevelNamespace = true;
@@ -116,6 +130,21 @@ InstallAPIContext Options::createContext() {
Ctx.BA.AppExtensionSafe = LinkerOpts.AppExtensionSafe;
Ctx.FT = DriverOpts.OutFT;
Ctx.OutputLoc = DriverOpts.OutputPath;
+
+ // Process inputs.
+ for (const std::string &ListPath : DriverOpts.FileLists) {
+ auto Buffer = FM->getBufferForFile(ListPath);
+ if (auto Err = Buffer.getError()) {
+ Diags->Report(diag::err_cannot_open_file) << ListPath;
+ return Ctx;
+ }
+ if (auto Err = FileListReader::loadHeaders(std::move(Buffer.get()),
+ Ctx.InputHeaders)) {
+ Diags->Report(diag::err_cannot_open_file) << ListPath;
+ return Ctx;
+ }
+ }
+
return Ctx;
}
diff --git a/clang/tools/clang-installapi/Options.h b/clang/tools/clang-installapi/Options.h
index 4a84166a6c91..f68addf19728 100644
--- a/clang/tools/clang-installapi/Options.h
+++ b/clang/tools/clang-installapi/Options.h
@@ -43,6 +43,9 @@ struct DriverOptions {
/// \brief File encoding to print.
llvm::MachO::FileType OutFT = llvm::MachO::FileType::TBD_V5;
+
+ /// \brief Print verbose output.
+ bool Verbose = false;
};
struct LinkerOptions {
@@ -78,9 +81,14 @@ public:
Options(clang::DiagnosticsEngine &Diag, FileManager *FM,
llvm::opt::InputArgList &Args);
+ /// \brief Get CC1 arguments after extracting out the irrelevant
+ /// ones.
+ std::vector<std::string> &getClangFrontendArgs() { return FrontendArgs; }
+
private:
DiagnosticsEngine *Diags;
FileManager *FM;
+ std::vector<std::string> FrontendArgs;
};
} // namespace installapi
diff --git a/clang/tools/driver/driver.cpp b/clang/tools/driver/driver.cpp
index 0dfb512adb0c..376025e3605b 100644
--- a/clang/tools/driver/driver.cpp
+++ b/clang/tools/driver/driver.cpp
@@ -323,28 +323,6 @@ static void FixupDiagPrefixExeName(TextDiagnosticPrinter *DiagClient,
DiagClient->setPrefix(std::string(ExeBasename));
}
-static void SetInstallDir(SmallVectorImpl<const char *> &argv,
- Driver &TheDriver, bool CanonicalPrefixes) {
- // Attempt to find the original path used to invoke the driver, to determine
- // the installed path. We do this manually, because we want to support that
- // path being a symlink.
- SmallString<128> InstalledPath(argv[0]);
-
- // Do a PATH lookup, if there are no directory components.
- if (llvm::sys::path::filename(InstalledPath) == InstalledPath)
- if (llvm::ErrorOr<std::string> Tmp = llvm::sys::findProgramByName(
- llvm::sys::path::filename(InstalledPath.str())))
- InstalledPath = *Tmp;
-
- // FIXME: We don't actually canonicalize this, we just make it absolute.
- if (CanonicalPrefixes)
- llvm::sys::fs::make_absolute(InstalledPath);
-
- StringRef InstalledPathParent(llvm::sys::path::parent_path(InstalledPath));
- if (llvm::sys::fs::exists(InstalledPathParent))
- TheDriver.setInstalledDir(InstalledPathParent);
-}
-
static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV,
const llvm::ToolContext &ToolContext) {
// If we call the cc1 tool from the clangDriver library (through
@@ -484,7 +462,6 @@ int clang_main(int Argc, char **Argv, const llvm::ToolContext &ToolContext) {
ProcessWarningOptions(Diags, *DiagOpts, /*ReportDiags=*/false);
Driver TheDriver(Path, llvm::sys::getDefaultTargetTriple(), Diags);
- SetInstallDir(Args, TheDriver, CanonicalPrefixes);
auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(ProgName);
TheDriver.setTargetAndMode(TargetAndMode);
// If -canonical-prefixes is set, GetExecutablePath will have resolved Path
diff --git a/clang/unittests/Analysis/FlowSensitive/TestingSupport.h b/clang/unittests/Analysis/FlowSensitive/TestingSupport.h
index 0d36d2802897..b7cf6cc966ed 100644
--- a/clang/unittests/Analysis/FlowSensitive/TestingSupport.h
+++ b/clang/unittests/Analysis/FlowSensitive/TestingSupport.h
@@ -432,6 +432,8 @@ llvm::Error checkDataflowWithNoopAnalysis(
{});
/// Returns the `ValueDecl` for the given identifier.
+/// The returned pointer is guaranteed to be non-null; the function asserts if
+/// no `ValueDecl` with the given name is found.
///
/// Requirements:
///
@@ -475,6 +477,15 @@ ValueT &getValueForDecl(ASTContext &ASTCtx, const Environment &Env,
return *cast<ValueT>(Env.getValue(*VD));
}
+/// Returns the storage location for the field called `Name` of `Loc`.
+/// Optionally casts the field storage location to `T`.
+template <typename T = StorageLocation>
+std::enable_if_t<std::is_base_of_v<StorageLocation, T>, T &>
+getFieldLoc(const RecordStorageLocation &Loc, llvm::StringRef Name,
+ ASTContext &ASTCtx) {
+ return *cast<T>(Loc.getChild(*findValueDecl(ASTCtx, Name)));
+}
+
/// Returns the value of a `Field` on the record referenced by `Loc.`
/// Returns null if `Loc` is null.
inline Value *getFieldValue(const RecordStorageLocation *Loc,
@@ -487,6 +498,14 @@ inline Value *getFieldValue(const RecordStorageLocation *Loc,
return Env.getValue(*FieldLoc);
}
+/// Returns the value of a `Field` on the record referenced by `Loc.`
+/// Returns null if `Loc` is null.
+inline Value *getFieldValue(const RecordStorageLocation *Loc,
+ llvm::StringRef Name, ASTContext &ASTCtx,
+ const Environment &Env) {
+ return getFieldValue(Loc, *findValueDecl(ASTCtx, Name), Env);
+}
+
/// Creates and owns constraints which are boolean values.
class ConstraintContext {
unsigned NextAtom = 0;
diff --git a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
index 2be899f5b6da..f534ccb12547 100644
--- a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
@@ -2392,14 +2392,92 @@ TEST(TransferTest, InitListExprAsUnion) {
} F;
public:
- constexpr target() : F{nullptr} {}
+ constexpr target() : F{nullptr} {
+ int *null = nullptr;
+ F.b; // Make sure we reference 'b' so it is modeled.
+ // [[p]]
+ }
};
)cc";
runDataflow(
Code,
[](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
ASTContext &ASTCtx) {
- // Just verify that it doesn't crash.
+ const Environment &Env = getEnvironmentAtAnnotation(Results, "p");
+
+ auto &FLoc = getFieldLoc<RecordStorageLocation>(
+ *Env.getThisPointeeStorageLocation(), "F", ASTCtx);
+ auto *AVal = cast<PointerValue>(getFieldValue(&FLoc, "a", ASTCtx, Env));
+ EXPECT_EQ(AVal, &getValueForDecl<PointerValue>(ASTCtx, Env, "null"));
+ EXPECT_EQ(getFieldValue(&FLoc, "b", ASTCtx, Env), nullptr);
+ });
+}
+
+TEST(TransferTest, EmptyInitListExprForUnion) {
+ // This is a crash repro.
+ std::string Code = R"cc(
+ class target {
+ union {
+ int *a;
+ bool *b;
+ } F;
+
+ public:
+ // Empty initializer list means that `F` is aggregate-initialized.
+ // For a union, this has the effect that the first member of the union
+ // is copy-initialized from an empty initializer list; in this specific
+ // case, this has the effect of initializing `a` with null.
+ constexpr target() : F{} {
+ int *null = nullptr;
+ F.b; // Make sure we reference 'b' so it is modeled.
+ // [[p]]
+ }
+ };
+ )cc";
+ runDataflow(
+ Code,
+ [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
+ ASTContext &ASTCtx) {
+ const Environment &Env = getEnvironmentAtAnnotation(Results, "p");
+
+ auto &FLoc = getFieldLoc<RecordStorageLocation>(
+ *Env.getThisPointeeStorageLocation(), "F", ASTCtx);
+ auto *AVal = cast<PointerValue>(getFieldValue(&FLoc, "a", ASTCtx, Env));
+ EXPECT_EQ(AVal, &getValueForDecl<PointerValue>(ASTCtx, Env, "null"));
+ EXPECT_EQ(getFieldValue(&FLoc, "b", ASTCtx, Env), nullptr);
+ });
+}
+
+TEST(TransferTest, EmptyInitListExprForStruct) {
+ std::string Code = R"cc(
+ class target {
+ struct {
+ int *a;
+ bool *b;
+ } F;
+
+ public:
+ constexpr target() : F{} {
+ int *NullIntPtr = nullptr;
+ bool *NullBoolPtr = nullptr;
+ // [[p]]
+ }
+ };
+ )cc";
+ runDataflow(
+ Code,
+ [](const llvm::StringMap<DataflowAnalysisState<NoopLattice>> &Results,
+ ASTContext &ASTCtx) {
+ const Environment &Env = getEnvironmentAtAnnotation(Results, "p");
+
+ auto &FLoc = getFieldLoc<RecordStorageLocation>(
+ *Env.getThisPointeeStorageLocation(), "F", ASTCtx);
+ auto *AVal = cast<PointerValue>(getFieldValue(&FLoc, "a", ASTCtx, Env));
+ EXPECT_EQ(AVal,
+ &getValueForDecl<PointerValue>(ASTCtx, Env, "NullIntPtr"));
+ auto *BVal = cast<PointerValue>(getFieldValue(&FLoc, "b", ASTCtx, Env));
+ EXPECT_EQ(BVal,
+ &getValueForDecl<PointerValue>(ASTCtx, Env, "NullBoolPtr"));
});
}
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index d9752c73e34e..fc367a7a5a89 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -11159,10 +11159,8 @@ TEST_F(FormatTest, UnderstandsOverloadedOperators) {
verifyFormat("void f() { a.operator*(b & b); }");
verifyFormat("void f() { a->operator&(a * b); }");
verifyFormat("void f() { NS::a.operator+(*b * *b); }");
- // TODO: Calling an operator as a non-member function is hard to distinguish.
- // https://llvm.org/PR50629
- // verifyFormat("void f() { operator*(a & a); }");
- // verifyFormat("void f() { operator&(a, b * b); }");
+ verifyFormat("void f() { operator*(a & a); }");
+ verifyFormat("void f() { operator&(a, b * b); }");
verifyFormat("void f() { return operator()(x) * b; }");
verifyFormat("void f() { return operator[](x) * b; }");
@@ -16551,9 +16549,8 @@ TEST_F(FormatTest, ConfigurableSpaceBeforeParens) {
verifyFormat("static_assert (sizeof (char) == 1, \"Impossible!\");", Space);
verifyFormat("int f () throw (Deprecated);", Space);
verifyFormat("typedef void (*cb) (int);", Space);
- // FIXME these tests regressed behaviour.
- // verifyFormat("T A::operator() ();", Space);
- // verifyFormat("X A::operator++ (T);", Space);
+ verifyFormat("T A::operator() ();", Space);
+ verifyFormat("X A::operator++ (T);", Space);
verifyFormat("auto lambda = [] () { return 0; };", Space);
verifyFormat("int x = int (y);", Space);
verifyFormat("#define F(...) __VA_OPT__ (__VA_ARGS__)", Space);
@@ -16612,8 +16609,7 @@ TEST_F(FormatTest, ConfigurableSpaceBeforeParens) {
verifyFormat("int f() throw (Deprecated);", SomeSpace);
verifyFormat("typedef void (*cb) (int);", SomeSpace);
verifyFormat("T A::operator()();", SomeSpace);
- // FIXME these tests regressed behaviour.
- // verifyFormat("X A::operator++ (T);", SomeSpace);
+ verifyFormat("X A::operator++ (T);", SomeSpace);
verifyFormat("int x = int (y);", SomeSpace);
verifyFormat("auto lambda = []() { return 0; };", SomeSpace);
@@ -16671,9 +16667,8 @@ TEST_F(FormatTest, ConfigurableSpaceBeforeParens) {
SpaceFuncDecl);
verifyFormat("int f () throw(Deprecated);", SpaceFuncDecl);
verifyFormat("typedef void (*cb)(int);", SpaceFuncDecl);
- // FIXME these tests regressed behaviour.
- // verifyFormat("T A::operator() ();", SpaceFuncDecl);
- // verifyFormat("X A::operator++ (T);", SpaceFuncDecl);
+ verifyFormat("T A::operator()();", SpaceFuncDecl);
+ verifyFormat("X A::operator++(T);", SpaceFuncDecl);
verifyFormat("T A::operator()() {}", SpaceFuncDecl);
verifyFormat("auto lambda = []() { return 0; };", SpaceFuncDecl);
verifyFormat("int x = int(y);", SpaceFuncDecl);
@@ -16710,7 +16705,7 @@ TEST_F(FormatTest, ConfigurableSpaceBeforeParens) {
verifyFormat("typedef void (*cb)(int);", SpaceFuncDef);
verifyFormat("T A::operator()();", SpaceFuncDef);
verifyFormat("X A::operator++(T);", SpaceFuncDef);
- // verifyFormat("T A::operator() () {}", SpaceFuncDef);
+ verifyFormat("T A::operator()() {}", SpaceFuncDef);
verifyFormat("auto lambda = [] () { return 0; };", SpaceFuncDef);
verifyFormat("int x = int(y);", SpaceFuncDef);
verifyFormat("M(std::size_t R, std::size_t C) : C(C), data(R) {}",
@@ -16797,7 +16792,7 @@ TEST_F(FormatTest, ConfigurableSpaceBeforeParens) {
verifyFormat("int f() throw (Deprecated);", SomeSpace2);
verifyFormat("typedef void (*cb) (int);", SomeSpace2);
verifyFormat("T A::operator()();", SomeSpace2);
- // verifyFormat("X A::operator++ (T);", SomeSpace2);
+ verifyFormat("X A::operator++ (T);", SomeSpace2);
verifyFormat("int x = int (y);", SomeSpace2);
verifyFormat("auto lambda = []() { return 0; };", SomeSpace2);
diff --git a/clang/utils/TableGen/ClangAttrEmitter.cpp b/clang/utils/TableGen/ClangAttrEmitter.cpp
index 935b9846990e..eb5c34d15693 100644
--- a/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -3576,10 +3576,6 @@ static void GenerateHasAttrSpellingStringSwitch(
const Record *R = Attr->getValueAsDef("Target");
std::vector<StringRef> Arches = R->getValueAsListOfStrings("Arches");
GenerateTargetSpecificAttrChecks(R, Arches, Test, nullptr);
-
- // If this is the C++11 variety, also add in the LangOpts test.
- if (Variety == "CXX11")
- Test += " && LangOpts.CPlusPlus11";
} else if (!Attr->getValueAsListOfDefs("TargetSpecificSpellings").empty()) {
// Add target checks if this spelling is target-specific.
const std::vector<Record *> TargetSpellings =
@@ -3597,13 +3593,7 @@ static void GenerateHasAttrSpellingStringSwitch(
}
}
}
-
- if (Variety == "CXX11")
- Test += " && LangOpts.CPlusPlus11";
- } else if (Variety == "CXX11")
- // C++11 mode should be checked against LangOpts, which is presumed to be
- // present in the caller.
- Test = "LangOpts.CPlusPlus11";
+ }
std::string TestStr = !Test.empty()
? Test + " ? " + llvm::itostr(Version) + " : 0"
diff --git a/compiler-rt/cmake/Modules/CompilerRTCompile.cmake b/compiler-rt/cmake/Modules/CompilerRTCompile.cmake
index 64e7acb9afd8..3d7528ad2e52 100644
--- a/compiler-rt/cmake/Modules/CompilerRTCompile.cmake
+++ b/compiler-rt/cmake/Modules/CompilerRTCompile.cmake
@@ -46,7 +46,7 @@ function(sanitizer_test_compile obj_list source arch)
# Write out architecture-specific flags into TARGET_CFLAGS variable.
get_target_flags_for_arch(${arch} TARGET_CFLAGS)
set(COMPILE_DEPS ${TEST_COMPILE_DEPS})
- if(NOT COMPILER_RT_STANDALONE_BUILD)
+ if(NOT COMPILER_RT_STANDALONE_BUILD OR COMPILER_RT_TEST_STANDALONE_BUILD_LIBS)
list(APPEND COMPILE_DEPS ${TEST_DEPS})
endif()
clang_compile(${output_obj} ${source}
@@ -70,9 +70,14 @@ function(clang_compile object_file source)
if (TARGET CompilerRTUnitTestCheckCxx)
list(APPEND SOURCE_DEPS CompilerRTUnitTestCheckCxx)
endif()
+ string(REGEX MATCH "[.](cc|cpp)$" is_cxx ${source_rpath})
+ if (is_cxx)
+ set(compiler ${COMPILER_RT_TEST_COMPILER})
+ else()
+ set(compiler ${COMPILER_RT_TEST_CXX_COMPILER})
+ endif()
if(COMPILER_RT_STANDALONE_BUILD)
# Only add global flags in standalone build.
- string(REGEX MATCH "[.](cc|cpp)$" is_cxx ${source_rpath})
if(is_cxx)
string(REPLACE " " ";" global_flags "${CMAKE_CXX_FLAGS}")
else()
@@ -102,7 +107,7 @@ function(clang_compile object_file source)
add_custom_command(
OUTPUT ${object_file}
- COMMAND ${COMPILER_RT_TEST_COMPILER} ${compile_flags} -c
+ COMMAND ${compiler} ${compile_flags} -c
-o "${object_file}"
${source_rpath}
MAIN_DEPENDENCY ${source}
diff --git a/compiler-rt/lib/asan/tests/CMakeLists.txt b/compiler-rt/lib/asan/tests/CMakeLists.txt
index 6ee2fb01c0df..bda47bd7fd6a 100644
--- a/compiler-rt/lib/asan/tests/CMakeLists.txt
+++ b/compiler-rt/lib/asan/tests/CMakeLists.txt
@@ -172,7 +172,7 @@ function(add_asan_tests arch test_runtime)
function(generate_asan_tests test_objects test_suite testname)
generate_compiler_rt_tests(${test_objects} ${test_suite} ${testname} ${arch}
COMPILE_DEPS ${ASAN_UNITTEST_HEADERS} ${ASAN_IGNORELIST_FILE}
- DEPS llvm_gtest asan
+ DEPS asan
KIND ${TEST_KIND}
${ARGN}
)
diff --git a/compiler-rt/lib/builtins/CMakeLists.txt b/compiler-rt/lib/builtins/CMakeLists.txt
index 28ded8766f25..83f7697a4a2b 100644
--- a/compiler-rt/lib/builtins/CMakeLists.txt
+++ b/compiler-rt/lib/builtins/CMakeLists.txt
@@ -916,7 +916,7 @@ cmake_dependent_option(COMPILER_RT_BUILD_CRT "Build crtbegin.o/crtend.o" ON "COM
if (COMPILER_RT_BUILD_CRT)
add_compiler_rt_component(crt)
- option(COMPILER_RT_CRT_USE_EH_FRAME_REGISTRY "Use eh_frame in crtbegin.o/crtend.o" ON)
+ option(COMPILER_RT_CRT_USE_EH_FRAME_REGISTRY "Use eh_frame in crtbegin.o/crtend.o" OFF)
include(CheckSectionExists)
check_section_exists(".init_array" COMPILER_RT_HAS_INITFINI_ARRAY
diff --git a/compiler-rt/lib/fuzzer/tests/CMakeLists.txt b/compiler-rt/lib/fuzzer/tests/CMakeLists.txt
index dd82c492e83a..8f5707c687ac 100644
--- a/compiler-rt/lib/fuzzer/tests/CMakeLists.txt
+++ b/compiler-rt/lib/fuzzer/tests/CMakeLists.txt
@@ -74,7 +74,7 @@ if(COMPILER_RT_DEFAULT_TARGET_ARCH IN_LIST FUZZER_SUPPORTED_ARCH)
FuzzerUnitTests "Fuzzer-${arch}-Test" ${arch}
SOURCES FuzzerUnittest.cpp ${COMPILER_RT_GTEST_SOURCE}
RUNTIME ${LIBFUZZER_TEST_RUNTIME}
- DEPS llvm_gtest ${LIBFUZZER_TEST_RUNTIME_DEPS}
+ DEPS ${LIBFUZZER_TEST_RUNTIME_DEPS}
CFLAGS ${LIBFUZZER_UNITTEST_CFLAGS} ${LIBFUZZER_TEST_RUNTIME_CFLAGS}
LINK_FLAGS ${LIBFUZZER_UNITTEST_LINK_FLAGS} ${LIBFUZZER_TEST_RUNTIME_LINK_FLAGS})
set_target_properties(FuzzerUnitTests PROPERTIES
@@ -84,7 +84,7 @@ if(COMPILER_RT_DEFAULT_TARGET_ARCH IN_LIST FUZZER_SUPPORTED_ARCH)
generate_compiler_rt_tests(FuzzedDataProviderTestObjects
FuzzedDataProviderUnitTests "FuzzerUtils-${arch}-Test" ${arch}
SOURCES FuzzedDataProviderUnittest.cpp ${COMPILER_RT_GTEST_SOURCE}
- DEPS llvm_gtest ${LIBFUZZER_TEST_RUNTIME_DEPS} ${COMPILER_RT_SOURCE_DIR}/include/fuzzer/FuzzedDataProvider.h
+ DEPS ${LIBFUZZER_TEST_RUNTIME_DEPS} ${COMPILER_RT_SOURCE_DIR}/include/fuzzer/FuzzedDataProvider.h
CFLAGS ${LIBFUZZER_UNITTEST_CFLAGS} ${LIBFUZZER_TEST_RUNTIME_CFLAGS}
LINK_FLAGS ${LIBFUZZER_UNITTEST_LINK_FLAGS} ${LIBFUZZER_TEST_RUNTIME_LINK_FLAGS})
set_target_properties(FuzzedDataProviderUnitTests PROPERTIES
diff --git a/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt b/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt
index 4915c83d49ca..2ec332ea74c1 100644
--- a/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt
+++ b/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt
@@ -74,7 +74,7 @@ if(COMPILER_RT_DEFAULT_TARGET_ARCH IN_LIST GWP_ASAN_SUPPORTED_ARCH)
GwpAsanUnitTests "GwpAsan-${arch}-Test" ${arch}
SOURCES ${GWP_ASAN_UNITTESTS} ${COMPILER_RT_GTEST_SOURCE}
RUNTIME ${GWP_ASAN_TEST_RUNTIME}
- DEPS llvm_gtest ${GWP_ASAN_UNIT_TEST_HEADERS}
+ DEPS ${GWP_ASAN_UNIT_TEST_HEADERS}
CFLAGS ${GWP_ASAN_UNITTEST_CFLAGS}
LINK_FLAGS ${GWP_ASAN_UNITTEST_LINK_FLAGS})
set_target_properties(GwpAsanUnitTests PROPERTIES
diff --git a/compiler-rt/lib/interception/tests/CMakeLists.txt b/compiler-rt/lib/interception/tests/CMakeLists.txt
index 644a57664cc4..0a235c662af3 100644
--- a/compiler-rt/lib/interception/tests/CMakeLists.txt
+++ b/compiler-rt/lib/interception/tests/CMakeLists.txt
@@ -107,7 +107,6 @@ macro(add_interception_tests_for_arch arch)
RUNTIME ${INTERCEPTION_COMMON_LIB}
SOURCES ${INTERCEPTION_UNITTESTS} ${COMPILER_RT_GTEST_SOURCE}
COMPILE_DEPS ${INTERCEPTION_TEST_HEADERS}
- DEPS llvm_gtest
CFLAGS ${INTERCEPTION_TEST_CFLAGS_COMMON}
LINK_FLAGS ${INTERCEPTION_TEST_LINK_FLAGS_COMMON})
endmacro()
diff --git a/compiler-rt/lib/msan/tests/CMakeLists.txt b/compiler-rt/lib/msan/tests/CMakeLists.txt
index 6ef63ff82166..1cb03d8323f6 100644
--- a/compiler-rt/lib/msan/tests/CMakeLists.txt
+++ b/compiler-rt/lib/msan/tests/CMakeLists.txt
@@ -69,7 +69,7 @@ macro(msan_compile obj_list source arch kind cflags)
${obj_list} ${source} ${arch}
KIND ${kind}
COMPILE_DEPS ${MSAN_UNITTEST_HEADERS}
- DEPS llvm_gtest msan
+ DEPS msan
CFLAGS -isystem ${CMAKE_CURRENT_BINARY_DIR}/../libcxx_msan_${arch}/include/c++/v1
${MSAN_UNITTEST_INSTRUMENTED_CFLAGS} ${cflags}
)
diff --git a/compiler-rt/lib/orc/tests/CMakeLists.txt b/compiler-rt/lib/orc/tests/CMakeLists.txt
index 2f1cb7657c28..e8f4c95b8a65 100644
--- a/compiler-rt/lib/orc/tests/CMakeLists.txt
+++ b/compiler-rt/lib/orc/tests/CMakeLists.txt
@@ -73,7 +73,7 @@ macro(add_orc_unittest testname)
SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE}
RUNTIME "${ORC_RUNTIME_LIBS}"
COMPILE_DEPS ${TEST_HEADERS} ${ORC_HEADERS}
- DEPS llvm_gtest ${ORC_DEPS}
+ DEPS ${ORC_DEPS}
CFLAGS ${ORC_UNITTEST_CFLAGS} ${COMPILER_RT_GTEST_CFLAGS}
LINK_FLAGS ${ORC_UNITTEST_LINK_FLAGS})
endif()
diff --git a/compiler-rt/lib/profile/InstrProfilingBuffer.c b/compiler-rt/lib/profile/InstrProfilingBuffer.c
index 7c5c26f4d113..1c451d7ec756 100644
--- a/compiler-rt/lib/profile/InstrProfilingBuffer.c
+++ b/compiler-rt/lib/profile/InstrProfilingBuffer.c
@@ -61,19 +61,12 @@ uint64_t __llvm_profile_get_size_for_buffer(void) {
NamesBegin, NamesEnd, VTableBegin, VTableEnd, VNamesBegin, VNamesEnd);
}
+// NOTE: Caller should guarantee that `Begin` and `End` specifies a half-open
+// interval [Begin, End). Namely, `End` is one-byte past the end of the array.
COMPILER_RT_VISIBILITY
uint64_t __llvm_profile_get_num_data(const __llvm_profile_data *Begin,
const __llvm_profile_data *End) {
intptr_t BeginI = (intptr_t)Begin, EndI = (intptr_t)End;
- // `sizeof(__llvm_profile_data) - 1` is required in the numerator when
- // [Begin, End] represents an inclusive range.
- // For ELF, [Begin, End) represents the address of linker-inserted
- // symbols `__start__<elf-section>` and `__stop_<elf-section>`.
- // Thereby, `End` is one byte past the inclusive range, and
- // `sizeof(__llvm_profile_data) - 1` is not necessary in the numerator to get
- // the correct number of profile data.
- // FIXME: Consider removing `sizeof(__llvm_profile_data) - 1` if this is true
- // across platforms.
return ((EndI + sizeof(__llvm_profile_data) - 1) - BeginI) /
sizeof(__llvm_profile_data);
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
index 38f968d533b1..4940062eeae4 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
@@ -475,6 +475,8 @@ CHECK_TYPE_SIZE(nfds_t);
CHECK_TYPE_SIZE(sigset_t);
COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+COMPILER_CHECK(sizeof(__sanitizer_siginfo) == sizeof(siginfo_t));
+CHECK_SIZE_AND_OFFSET(siginfo_t, si_value);
// Can't write checks for sa_handler and sa_sigaction due to them being
// preprocessor macros.
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
index 43b8a38f39be..8ce73f206fd8 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
@@ -301,11 +301,29 @@ struct __sanitizer_sigset_t {
typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+union __sanitizer_sigval {
+ int sival_int;
+ void *sival_ptr;
+};
+
struct __sanitizer_siginfo {
- // The size is determined by looking at sizeof of real siginfo_t on linux.
- u64 opaque[128 / sizeof(u64)];
+ int si_signo;
+ int si_errno;
+ int si_code;
+ pid_t si_pid;
+ u32 si_uid;
+ int si_status;
+ void *si_addr;
+ union __sanitizer_sigval si_value;
+# if SANITIZER_WORDSIZE == 64
+ char data[40];
+# else
+ char data[32];
+# endif
};
+typedef __sanitizer_siginfo __sanitizer_siginfo_t;
+
using __sanitizer_sighandler_ptr = void (*)(int sig);
using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
__sanitizer_siginfo *siginfo,
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp
index dcfd94fe3225..7c2d8e6f1731 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp
@@ -30,17 +30,17 @@ namespace __sanitizer {
#if SANITIZER_FREEBSD
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
- const int Mib[] = {
- CTL_KERN,
- KERN_PROC,
- KERN_PROC_PID,
- getpid()
- };
-
- struct kinfo_proc InfoProc;
- uptr Len = sizeof(InfoProc);
- CHECK_EQ(internal_sysctl(Mib, ARRAY_SIZE(Mib), nullptr, (uptr *)&InfoProc, &Len, 0), 0);
- cb(0, InfoProc.ki_rssize * GetPageSizeCached(), false, stats);
+ const int Mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid()};
+
+ struct kinfo_proc *InfoProc;
+ uptr Len = sizeof(*InfoProc);
+ uptr Size = Len;
+ InfoProc = (struct kinfo_proc *)MmapOrDie(Size, "GetMemoryProfile()");
+ CHECK_EQ(
+ internal_sysctl(Mib, ARRAY_SIZE(Mib), nullptr, (uptr *)InfoProc, &Len, 0),
+ 0);
+ cb(0, InfoProc->ki_rssize * GetPageSizeCached(), false, stats);
+ UnmapOrDie(InfoProc, Size, true);
}
#endif
diff --git a/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt
index 3c709e411e48..a3efe6871508 100644
--- a/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt
+++ b/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt
@@ -176,7 +176,6 @@ macro(add_sanitizer_tests_for_arch arch)
RUNTIME "${SANITIZER_COMMON_LIB}"
SOURCES ${SANITIZER_UNITTESTS} ${COMPILER_RT_GTEST_SOURCE} ${COMPILER_RT_GMOCK_SOURCE}
COMPILE_DEPS ${SANITIZER_TEST_HEADERS}
- DEPS llvm_gtest
CFLAGS ${SANITIZER_TEST_CFLAGS_COMMON} ${extra_flags}
LINK_FLAGS ${SANITIZER_TEST_LINK_FLAGS_COMMON} ${TARGET_LINK_FLAGS} ${extra_flags})
diff --git a/compiler-rt/lib/scudo/standalone/allocator_common.h b/compiler-rt/lib/scudo/standalone/allocator_common.h
index 95f4776ac596..2b77516ad11c 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_common.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_common.h
@@ -40,6 +40,7 @@ template <class SizeClassAllocator> struct TransferBatch {
B->Count = static_cast<u16>(B->Count - N);
}
void clear() { Count = 0; }
+ bool empty() { return Count == 0; }
void add(CompactPtrT P) {
DCHECK_LT(Count, MaxNumCached);
Batch[Count++] = P;
@@ -48,6 +49,12 @@ template <class SizeClassAllocator> struct TransferBatch {
memcpy(Array, Batch, sizeof(Batch[0]) * Count);
clear();
}
+
+ void moveNToArray(CompactPtrT *Array, u16 N) {
+ DCHECK_LE(N, Count);
+ memcpy(Array, Batch + Count - N, sizeof(Batch[0]) * N);
+ Count = static_cast<u16>(Count - N);
+ }
u16 getCount() const { return Count; }
bool isEmpty() const { return Count == 0U; }
CompactPtrT get(u16 I) const {
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 4d03b282d000..c86e75b8fd66 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -191,38 +191,21 @@ public:
return BlockSize > PageSize;
}
- // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
- // count. Now it's the same as the number of blocks stored in the
- // `TransferBatch`.
u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
- UNUSED const u16 MaxBlockCount) {
- TransferBatchT *B = popBatch(C, ClassId);
- if (!B)
- return 0;
-
- const u16 Count = B->getCount();
- DCHECK_GT(Count, 0U);
- B->moveToArray(ToArray);
-
- if (ClassId != SizeClassMap::BatchClassId)
- C->deallocate(SizeClassMap::BatchClassId, B);
-
- return Count;
- }
-
- TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
+ const u16 MaxBlockCount) {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
ScopedLock L(Sci->Mutex);
- TransferBatchT *B = popBatchImpl(C, ClassId, Sci);
- if (UNLIKELY(!B)) {
+
+ u16 PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
+ if (UNLIKELY(PopCount == 0)) {
if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
- return nullptr;
- B = popBatchImpl(C, ClassId, Sci);
- // if `populateFreeList` succeeded, we are supposed to get free blocks.
- DCHECK_NE(B, nullptr);
+ return 0U;
+ PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
+ DCHECK_NE(PopCount, 0U);
}
- return B;
+
+ return PopCount;
}
// Push the array of free blocks to the designated batch group.
@@ -510,7 +493,7 @@ private:
// by TransferBatch is also free for use. We don't need to recycle the
// TransferBatch. Note that the correctness is maintained by the invariant,
//
- // The unit of each popBatch() request is entire TransferBatch. Return
+ // Each popBlocks() request returns the entire TransferBatch. Returning
// part of the blocks in a TransferBatch is invalid.
//
// This ensures that TransferBatch won't leak the address itself while it's
@@ -634,7 +617,7 @@ private:
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
- BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
+ BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
return BG;
};
@@ -726,14 +709,11 @@ private:
InsertBlocks(Cur, Array + Size - Count, Count);
}
- // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
- // group id will be considered first.
- //
- // The region mutex needs to be held while calling this method.
- TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
+ u16 popBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
+ CompactPtrT *ToArray, const u16 MaxBlockCount)
REQUIRES(Sci->Mutex) {
if (Sci->FreeListInfo.BlockList.empty())
- return nullptr;
+ return 0U;
SinglyLinkedList<TransferBatchT> &Batches =
Sci->FreeListInfo.BlockList.front()->Batches;
@@ -746,33 +726,57 @@ private:
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
// `TransferBatch` with single block.
TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
- TB->clear();
- TB->add(
- compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
+ ToArray[0] =
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB));
Sci->FreeListInfo.PoppedBlocks += 1;
- return TB;
+ return 1U;
}
+ // So far, instead of always filling the blocks to `MaxBlockCount`, we only
+ // examine single `TransferBatch` to minimize the time spent on the primary
+ // allocator. Besides, the sizes of `TransferBatch` and
+ // `CacheT::getMaxCached()` may also impact the time spent on accessing the
+ // primary allocator.
+ // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
+ // blocks and/or adjust the size of `TransferBatch` according to
+ // `CacheT::getMaxCached()`.
TransferBatchT *B = Batches.front();
- Batches.pop_front();
DCHECK_NE(B, nullptr);
DCHECK_GT(B->getCount(), 0U);
- if (Batches.empty()) {
- BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
- Sci->FreeListInfo.BlockList.pop_front();
-
- // We don't keep BatchGroup with zero blocks to avoid empty-checking while
- // allocating. Note that block used by constructing BatchGroup is recorded
- // as free blocks in the last element of BatchGroup::Batches. Which means,
- // once we pop the last TransferBatch, the block is implicitly
- // deallocated.
+ // BachClassId should always take all blocks in the TransferBatch. Read the
+ // comment in `pushBatchClassBlocks()` for more details.
+ const u16 PopCount = ClassId == SizeClassMap::BatchClassId
+ ? B->getCount()
+ : Min(MaxBlockCount, B->getCount());
+ B->moveNToArray(ToArray, PopCount);
+
+ // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
+ // done without holding `Mutex`.
+ if (B->empty()) {
+ Batches.pop_front();
+ // `TransferBatch` of BatchClassId is self-contained, no need to
+ // deallocate. Read the comment in `pushBatchClassBlocks()` for more
+ // details.
if (ClassId != SizeClassMap::BatchClassId)
- C->deallocate(SizeClassMap::BatchClassId, BG);
+ C->deallocate(SizeClassMap::BatchClassId, B);
+
+ if (Batches.empty()) {
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
+ Sci->FreeListInfo.BlockList.pop_front();
+
+ // We don't keep BatchGroup with zero blocks to avoid empty-checking
+ // while allocating. Note that block used for constructing BatchGroup is
+ // recorded as free blocks in the last element of BatchGroup::Batches.
+ // Which means, once we pop the last TransferBatch, the block is
+ // implicitly deallocated.
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, BG);
+ }
}
- Sci->FreeListInfo.PoppedBlocks += B->getCount();
- return B;
+ Sci->FreeListInfo.PoppedBlocks += PopCount;
+ return PopCount;
}
NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 9a642d23620e..d89a2e6a4e5c 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -12,6 +12,7 @@
#include "allocator_common.h"
#include "bytemap.h"
#include "common.h"
+#include "condition_variable.h"
#include "list.h"
#include "local_cache.h"
#include "mem_map.h"
@@ -22,8 +23,6 @@
#include "string_utils.h"
#include "thread_annotations.h"
-#include "condition_variable.h"
-
namespace scudo {
// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
@@ -221,41 +220,24 @@ public:
DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
}
- // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
- // count. Now it's the same as the number of blocks stored in the
- // `TransferBatch`.
u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
- UNUSED const u16 MaxBlockCount) {
- TransferBatchT *B = popBatch(C, ClassId);
- if (!B)
- return 0;
-
- const u16 Count = B->getCount();
- DCHECK_GT(Count, 0U);
- B->moveToArray(ToArray);
-
- if (ClassId != SizeClassMap::BatchClassId)
- C->deallocate(SizeClassMap::BatchClassId, B);
-
- return Count;
- }
-
- TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
+ const u16 MaxBlockCount) {
DCHECK_LT(ClassId, NumClasses);
RegionInfo *Region = getRegionInfo(ClassId);
+ u16 PopCount = 0;
{
ScopedLock L(Region->FLLock);
- TransferBatchT *B = popBatchImpl(C, ClassId, Region);
- if (LIKELY(B))
- return B;
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
+ return PopCount;
}
bool ReportRegionExhausted = false;
- TransferBatchT *B = nullptr;
if (conditionVariableEnabled()) {
- B = popBatchWithCV(C, ClassId, Region, ReportRegionExhausted);
+ PopCount = popBlocksWithCV(C, ClassId, Region, ToArray, MaxBlockCount,
+ ReportRegionExhausted);
} else {
while (true) {
// When two threads compete for `Region->MMLock`, we only want one of
@@ -264,13 +246,15 @@ public:
ScopedLock ML(Region->MMLock);
{
ScopedLock FL(Region->FLLock);
- if ((B = popBatchImpl(C, ClassId, Region)))
- break;
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
+ return PopCount;
}
const bool RegionIsExhausted = Region->Exhausted;
if (!RegionIsExhausted)
- B = populateFreeListAndPopBatch(C, ClassId, Region);
+ PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
+ MaxBlockCount);
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
break;
}
@@ -286,7 +270,7 @@ public:
reportOutOfBatchClass();
}
- return B;
+ return PopCount;
}
// Push the array of free blocks to the designated batch group.
@@ -640,7 +624,7 @@ private:
// by TransferBatch is also free for use. We don't need to recycle the
// TransferBatch. Note that the correctness is maintained by the invariant,
//
- // The unit of each popBatch() request is entire TransferBatch. Return
+ // Each popBlocks() request returns the entire TransferBatch. Returning
// part of the blocks in a TransferBatch is invalid.
//
// This ensures that TransferBatch won't leak the address itself while it's
@@ -763,7 +747,7 @@ private:
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
- BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
+ BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
return BG;
};
@@ -855,9 +839,10 @@ private:
InsertBlocks(Cur, Array + Size - Count, Count);
}
- TransferBatchT *popBatchWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
- bool &ReportRegionExhausted) {
- TransferBatchT *B = nullptr;
+ u16 popBlocksWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
+ CompactPtrT *ToArray, const u16 MaxBlockCount,
+ bool &ReportRegionExhausted) {
+ u16 PopCount = 0;
while (true) {
// We only expect one thread doing the freelist refillment and other
@@ -878,7 +863,8 @@ private:
const bool RegionIsExhausted = Region->Exhausted;
if (!RegionIsExhausted)
- B = populateFreeListAndPopBatch(C, ClassId, Region);
+ PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
+ MaxBlockCount);
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
{
@@ -905,7 +891,8 @@ private:
// blocks were used up right after the refillment. Therefore, we have to
// check if someone is still populating the freelist.
ScopedLock FL(Region->FLLock);
- if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
break;
if (!Region->isPopulatingFreeList)
@@ -918,21 +905,19 @@ private:
// `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
Region->FLLockCV.wait(Region->FLLock);
- if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
break;
}
- return B;
+ return PopCount;
}
- // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
- // group id will be considered first.
- //
- // The region mutex needs to be held while calling this method.
- TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
+ u16 popBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region,
+ CompactPtrT *ToArray, const u16 MaxBlockCount)
REQUIRES(Region->FLLock) {
if (Region->FreeListInfo.BlockList.empty())
- return nullptr;
+ return 0U;
SinglyLinkedList<TransferBatchT> &Batches =
Region->FreeListInfo.BlockList.front()->Batches;
@@ -945,39 +930,64 @@ private:
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
// `TransferBatch` with single block.
TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
- TB->clear();
- TB->add(
- compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
+ ToArray[0] =
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB));
Region->FreeListInfo.PoppedBlocks += 1;
- return TB;
+ return 1U;
}
+ // So far, instead of always filling blocks to `MaxBlockCount`, we only
+ // examine single `TransferBatch` to minimize the time spent in the primary
+ // allocator. Besides, the sizes of `TransferBatch` and
+ // `CacheT::getMaxCached()` may also impact the time spent on accessing the
+ // primary allocator.
+ // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
+ // blocks and/or adjust the size of `TransferBatch` according to
+ // `CacheT::getMaxCached()`.
TransferBatchT *B = Batches.front();
- Batches.pop_front();
DCHECK_NE(B, nullptr);
DCHECK_GT(B->getCount(), 0U);
- if (Batches.empty()) {
- BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
- Region->FreeListInfo.BlockList.pop_front();
-
- // We don't keep BatchGroup with zero blocks to avoid empty-checking while
- // allocating. Note that block used by constructing BatchGroup is recorded
- // as free blocks in the last element of BatchGroup::Batches. Which means,
- // once we pop the last TransferBatch, the block is implicitly
- // deallocated.
+ // BachClassId should always take all blocks in the TransferBatch. Read the
+ // comment in `pushBatchClassBlocks()` for more details.
+ const u16 PopCount = ClassId == SizeClassMap::BatchClassId
+ ? B->getCount()
+ : Min(MaxBlockCount, B->getCount());
+ B->moveNToArray(ToArray, PopCount);
+
+ // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
+ // done without holding `FLLock`.
+ if (B->empty()) {
+ Batches.pop_front();
+ // `TransferBatch` of BatchClassId is self-contained, no need to
+ // deallocate. Read the comment in `pushBatchClassBlocks()` for more
+ // details.
if (ClassId != SizeClassMap::BatchClassId)
- C->deallocate(SizeClassMap::BatchClassId, BG);
+ C->deallocate(SizeClassMap::BatchClassId, B);
+
+ if (Batches.empty()) {
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
+ Region->FreeListInfo.BlockList.pop_front();
+
+ // We don't keep BatchGroup with zero blocks to avoid empty-checking
+ // while allocating. Note that block used for constructing BatchGroup is
+ // recorded as free blocks in the last element of BatchGroup::Batches.
+ // Which means, once we pop the last TransferBatch, the block is
+ // implicitly deallocated.
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, BG);
+ }
}
- Region->FreeListInfo.PoppedBlocks += B->getCount();
+ Region->FreeListInfo.PoppedBlocks += PopCount;
- return B;
+ return PopCount;
}
- // Refill the freelist and return one batch.
- NOINLINE TransferBatchT *populateFreeListAndPopBatch(CacheT *C, uptr ClassId,
- RegionInfo *Region)
+ NOINLINE u16 populateFreeListAndPopBlocks(CacheT *C, uptr ClassId,
+ RegionInfo *Region,
+ CompactPtrT *ToArray,
+ const u16 MaxBlockCount)
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
const uptr Size = getSizeByClassId(ClassId);
const u16 MaxCount = CacheT::getMaxCached(Size);
@@ -994,7 +1004,7 @@ private:
const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) {
Region->Exhausted = true;
- return nullptr;
+ return 0U;
}
if (UNLIKELY(!Region->MemMapInfo.MemMap.remap(
@@ -1002,7 +1012,7 @@ private:
MAP_ALLOWNOMEM | MAP_RESIZABLE |
(useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG
: 0)))) {
- return nullptr;
+ return 0U;
}
Region->MemMapInfo.MappedUser += MapSize;
C->getStats().add(StatMapped, MapSize);
@@ -1049,8 +1059,9 @@ private:
pushBatchClassBlocks(Region, ShuffleArray, NumberOfBlocks);
}
- TransferBatchT *B = popBatchImpl(C, ClassId, Region);
- DCHECK_NE(B, nullptr);
+ const u16 PopCount =
+ popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ DCHECK_NE(PopCount, 0U);
// Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
// the requests from `PushBlocks` and `PopBatch` which are external
@@ -1062,7 +1073,7 @@ private:
C->getStats().add(StatFree, AllocatedUser);
Region->MemMapInfo.AllocatedUser += AllocatedUser;
- return B;
+ return PopCount;
}
void getStats(ScopedString *Str, uptr ClassId, RegionInfo *Region)
@@ -1186,7 +1197,7 @@ private:
}
// Note that we have extracted the `GroupsToRelease` from region freelist.
- // It's safe to let pushBlocks()/popBatches() access the remaining region
+ // It's safe to let pushBlocks()/popBlocks() access the remaining region
// freelist. In the steps 3 and 4, we will temporarily release the FLLock
// and lock it again before step 5.
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index f52a4188bcf3..732fd307ed2f 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -122,7 +122,29 @@ bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
Flags |= MAP_RESIZABLE;
Flags |= MAP_ALLOWNOMEM;
- const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
+ const uptr PageSize = getPageSizeCached();
+ if (SCUDO_TRUSTY) {
+ /*
+ * On Trusty we need AllocPos to be usable for shared memory, which cannot
+ * cross multiple mappings. This means we need to split around AllocPos
+ * and not over it. We can only do this if the address is page-aligned.
+ */
+ const uptr TaggedSize = AllocPos - CommitBase;
+ if (useMemoryTagging<Config>(Options) && isAligned(TaggedSize, PageSize)) {
+ DCHECK_GT(TaggedSize, 0);
+ return MemMap.remap(CommitBase, TaggedSize, "scudo:secondary",
+ MAP_MEMTAG | Flags) &&
+ MemMap.remap(AllocPos, CommitSize - TaggedSize, "scudo:secondary",
+ Flags);
+ } else {
+ const uptr RemapFlags =
+ (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
+ return MemMap.remap(CommitBase, CommitSize, "scudo:secondary",
+ RemapFlags);
+ }
+ }
+
+ const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * PageSize;
if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
diff --git a/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt b/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt
index c6b6a1cb57ce..ac92805872f9 100644
--- a/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt
+++ b/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt
@@ -81,7 +81,7 @@ macro(add_scudo_unittest testname)
"${testname}-${arch}-Test" ${arch}
SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE}
COMPILE_DEPS ${SCUDO_TEST_HEADERS}
- DEPS llvm_gtest scudo_standalone
+ DEPS scudo_standalone
RUNTIME ${RUNTIME}
CFLAGS ${SCUDO_UNITTEST_CFLAGS}
LINK_FLAGS ${SCUDO_UNITTEST_LINK_FLAGS})
diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
index 18171511758a..f64a5143b30d 100644
--- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
@@ -237,7 +237,6 @@ struct SmallRegionsConfig {
// For the 32-bit one, it requires actually exhausting memory, so we skip it.
TEST(ScudoPrimaryTest, Primary64OOM) {
using Primary = scudo::SizeClassAllocator64<SmallRegionsConfig>;
- using TransferBatch = Primary::TransferBatchT;
Primary Allocator;
Allocator.init(/*ReleaseToOsInterval=*/-1);
typename Primary::CacheT Cache;
@@ -245,29 +244,26 @@ TEST(ScudoPrimaryTest, Primary64OOM) {
Stats.init();
Cache.init(&Stats, &Allocator);
bool AllocationFailed = false;
- std::vector<TransferBatch *> Batches;
+ std::vector<void *> Blocks;
const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId;
const scudo::uptr Size = Primary::getSizeByClassId(ClassId);
- typename Primary::CacheT::CompactPtrT Blocks[TransferBatch::MaxNumCached];
+ const scudo::u16 MaxCachedBlockCount = Primary::CacheT::getMaxCached(Size);
for (scudo::uptr I = 0; I < 10000U; I++) {
- TransferBatch *B = Allocator.popBatch(&Cache, ClassId);
- if (!B) {
- AllocationFailed = true;
- break;
+ for (scudo::uptr J = 0; J < MaxCachedBlockCount; ++J) {
+ void *Ptr = Cache.allocate(ClassId);
+ if (Ptr == nullptr) {
+ AllocationFailed = true;
+ break;
+ }
+ memset(Ptr, 'B', Size);
+ Blocks.push_back(Ptr);
}
- for (scudo::u16 J = 0; J < B->getCount(); J++)
- memset(Allocator.decompactPtr(ClassId, B->get(J)), 'B', Size);
- Batches.push_back(B);
- }
- while (!Batches.empty()) {
- TransferBatch *B = Batches.back();
- Batches.pop_back();
- const scudo::u16 Count = B->getCount();
- B->moveToArray(Blocks);
- Allocator.pushBlocks(&Cache, ClassId, Blocks, Count);
- Cache.deallocate(Primary::SizeClassMap::BatchClassId, B);
}
+
+ for (auto *Ptr : Blocks)
+ Cache.deallocate(ClassId, Ptr);
+
Cache.destroy(nullptr);
Allocator.releaseToOS(scudo::ReleaseToOS::Force);
scudo::ScopedString Str;
@@ -342,7 +338,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
V.push_back(std::make_pair(ClassId, P));
}
- // Try to interleave pushBlocks(), popBatch() and releaseToOS().
+ // Try to interleave pushBlocks(), popBlocks() and releaseToOS().
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
while (!V.empty()) {
diff --git a/compiler-rt/lib/tsan/tests/CMakeLists.txt b/compiler-rt/lib/tsan/tests/CMakeLists.txt
index c02c2279583b..1bc08bbf7450 100644
--- a/compiler-rt/lib/tsan/tests/CMakeLists.txt
+++ b/compiler-rt/lib/tsan/tests/CMakeLists.txt
@@ -64,10 +64,10 @@ foreach (header ${TSAN_HEADERS})
list(APPEND TSAN_RTL_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
endforeach()
-set(TSAN_DEPS llvm_gtest tsan)
+set(TSAN_DEPS tsan)
# TSan uses C++ standard library headers.
if (TARGET cxx-headers OR HAVE_LIBCXX)
- set(TSAN_DEPS cxx-headers)
+ list(APPEND TSAN_DEPS cxx-headers)
endif()
# add_tsan_unittest(<name>
diff --git a/compiler-rt/lib/xray/tests/CMakeLists.txt b/compiler-rt/lib/xray/tests/CMakeLists.txt
index 732f982c932f..0a428b9a30b1 100644
--- a/compiler-rt/lib/xray/tests/CMakeLists.txt
+++ b/compiler-rt/lib/xray/tests/CMakeLists.txt
@@ -109,7 +109,7 @@ macro(add_xray_unittest testname)
${XRAY_HEADERS} ${XRAY_ALL_SOURCE_FILES_ABS_PATHS}
"test_helpers.h"
RUNTIME "${XRAY_RUNTIME_LIBS}"
- DEPS llvm_gtest xray llvm-xray LLVMXRay LLVMTestingSupport
+ DEPS xray llvm-xray LLVMXRay LLVMTestingSupport
CFLAGS ${XRAY_UNITTEST_CFLAGS}
LINK_FLAGS ${TARGET_LINK_FLAGS} ${XRAY_UNITTEST_LINK_FLAGS}
)
diff --git a/compiler-rt/test/builtins/Unit/ctor_dtor.c b/compiler-rt/test/builtins/Unit/ctor_dtor.c
index 47560722a9f7..3d5f895a0a1c 100644
--- a/compiler-rt/test/builtins/Unit/ctor_dtor.c
+++ b/compiler-rt/test/builtins/Unit/ctor_dtor.c
@@ -9,23 +9,13 @@
// Ensure the various startup functions are called in the proper order.
-// CHECK: __register_frame_info()
/// ctor() is here if ld.so/libc supports DT_INIT/DT_FINI
// CHECK: main()
/// dtor() is here if ld.so/libc supports DT_INIT/DT_FINI
-// CHECK: __deregister_frame_info()
struct object;
static int counter;
-void __register_frame_info(const void *fi, struct object *obj) {
- printf("__register_frame_info()\n");
-}
-
-void __deregister_frame_info(const void *fi) {
- printf("__deregister_frame_info()\n");
-}
-
void __attribute__((constructor)) ctor() {
printf("ctor()\n");
++counter;
diff --git a/compiler-rt/test/dfsan/reaches_function.c b/compiler-rt/test/dfsan/reaches_function.c
index 9e2bcee935b2..a00654e7ae13 100644
--- a/compiler-rt/test/dfsan/reaches_function.c
+++ b/compiler-rt/test/dfsan/reaches_function.c
@@ -32,11 +32,11 @@ void my_dfsan_reaches_function_callback(dfsan_label label, dfsan_origin origin,
__attribute__((noinline)) uint64_t add(uint64_t *a, uint64_t *b) {
return *a + *b;
- // CHECK: {{.*}}compiler-rt/test/dfsan/reaches_function.c:[[# @LINE - 1]] add.dfsan
+ // CHECK: reaches_function.c:[[# @LINE - 1]] add.dfsan
// CHECK-ORIGIN-TRACKING: Origin value: 0x10000002, Taint value was stored to memory at
- // CHECK-ORIGIN-TRACKING: #0 {{.*}} in add.dfsan {{.*}}compiler-rt/test/dfsan/reaches_function.c:[[# @LINE - 3]]:{{.*}}
+ // CHECK-ORIGIN-TRACKING: #0 {{.*}} in add.dfsan {{.*}}reaches_function.c:[[# @LINE - 3]]:{{.*}}
// CHECK-ORIGIN-TRACKING: Origin value: 0x1, Taint value was created at
- // CHECK-ORIGIN-TRACKING: #0 {{.*}} in main {{.*}}compiler-rt/test/dfsan/reaches_function.c:{{.*}}
+ // CHECK-ORIGIN-TRACKING: #0 {{.*}} in main {{.*}}reaches_function.c:{{.*}}
}
extern void my_dfsan_reaches_function_callback(dfsan_label label,
@@ -54,11 +54,11 @@ int main(int argc, char *argv[]) {
dfsan_set_label(8, &a, sizeof(a));
uint64_t c = add(&a, &b);
- // CHECK: {{.*}}compiler-rt/test/dfsan/reaches_function.c:[[# @LINE - 1]] main
+ // CHECK: reaches_function.c:[[# @LINE - 1]] main
// CHECK-ORIGIN-TRACKING: Origin value: 0x10000002, Taint value was stored to memory at
- // CHECK-ORIGIN-TRACKING: #0 {{.*}} in add.dfsan {{.*}}compiler-rt/test/dfsan/reaches_function.c:{{.*}}
+ // CHECK-ORIGIN-TRACKING: #0 {{.*}} in add.dfsan {{.*}}reaches_function.c:{{.*}}
// CHECK-ORIGIN-TRACKING: Origin value: 0x1, Taint value was created at
- // CHECK-ORIGIN-TRACKING: #0 {{.*}} in main {{.*}}compiler-rt/test/dfsan/reaches_function.c:[[# @LINE - 6]]:{{.*}}
+ // CHECK-ORIGIN-TRACKING: #0 {{.*}} in main {{.*}}reaches_function.c:[[# @LINE - 6]]:{{.*}}
return c;
}
diff --git a/compiler-rt/test/hwasan/TestCases/longjmp-out-of-range.c b/compiler-rt/test/hwasan/TestCases/longjmp-out-of-range.c
new file mode 100644
index 000000000000..2d7ed2ab5ee2
--- /dev/null
+++ b/compiler-rt/test/hwasan/TestCases/longjmp-out-of-range.c
@@ -0,0 +1,20 @@
+// RUN: %clang_hwasan -O0 %s -o %t && %run %t 2>&1 | FileCheck %s
+
+// REQUIRES: pointer-tagging
+#include <assert.h>
+#include <sanitizer/hwasan_interface.h>
+#include <stdlib.h>
+
+__attribute__((noinline)) int f(void *caller_frame) {
+ int z = 0;
+ int *volatile p = &z;
+ // Tag of local is never zero.
+ assert(__hwasan_tag_pointer(p, 0) != p);
+ __hwasan_handle_longjmp(NULL);
+ return p[0];
+}
+
+int main() {
+ return f(__builtin_frame_address(0));
+ // CHECK: HWASan is ignoring requested __hwasan_handle_longjmp:
+}
diff --git a/compiler-rt/test/profile/instrprof-basic.c b/compiler-rt/test/profile/instrprof-basic.c
index de66e1b27468..702f521ba4ed 100644
--- a/compiler-rt/test/profile/instrprof-basic.c
+++ b/compiler-rt/test/profile/instrprof-basic.c
@@ -1,6 +1,7 @@
// RUN: %clang_profgen -o %t -O3 %s
// RUN: env LLVM_PROFILE_FILE=%t.profraw %run %t
// RUN: llvm-profdata merge -o %t.profdata %t.profraw
+// RUN: llvm-profdata show --all-functions %t.profdata | FileCheck %s --check-prefix=PROFCNT
// RUN: %clang_profuse=%t.profdata -o - -S -emit-llvm %s | FileCheck %s --check-prefix=COMMON --check-prefix=ORIG
//
// RUN: rm -fr %t.dir1
@@ -8,6 +9,7 @@
// RUN: env LLVM_PROFILE_FILE=%t.dir1/profraw_e_%1m %run %t
// RUN: env LLVM_PROFILE_FILE=%t.dir1/profraw_e_%1m %run %t
// RUN: llvm-profdata merge -o %t.em.profdata %t.dir1
+// RUN: llvm-profdata show --all-functions %t.em.profdata | FileCheck %s --check-prefix=PROFCNT
// RUN: %clang_profuse=%t.em.profdata -o - -S -emit-llvm %s | FileCheck %s --check-prefix=COMMON --check-prefix=MERGE
//
// RUN: rm -fr %t.dir2
@@ -16,6 +18,7 @@
// RUN: %run %t.merge
// RUN: %run %t.merge
// RUN: llvm-profdata merge -o %t.m.profdata %t.dir2/
+// RUN: llvm-profdata show --all-functions %t.m.profdata | FileCheck %s --check-prefix=PROFCNT
// RUN: %clang_profuse=%t.m.profdata -o - -S -emit-llvm %s | FileCheck %s --check-prefix=COMMON --check-prefix=MERGE
//
// Test that merging is enabled by default with -fprofile-generate=
@@ -27,6 +30,7 @@
// RUN: %run %t.merge3
// RUN: %run %t.merge3
// RUN: llvm-profdata merge -o %t.m3.profdata %t.dir3/
+// RUN: llvm-profdata show --all-functions %t.m3.profdata | FileCheck %s --check-prefix=PROFCNT
// RUN: %clang_profuse=%t.m3.profdata -O0 -o - -S -emit-llvm %s | FileCheck %s --check-prefix=COMMON --check-prefix=PGOMERGE
//
// Test that merging is enabled by default with -fprofile-generate
@@ -40,6 +44,7 @@
// RUN: %run %t.dir4/merge4
// RUN: rm -f %t.dir4/merge4*
// RUN: llvm-profdata merge -o %t.m4.profdata ./
+// RUN: llvm-profdata show --all-functions %t.m4.profdata | FileCheck %s --check-prefix=PROFCNT
// RUN: %clang_profuse=%t.m4.profdata -O0 -o - -S -emit-llvm %s | FileCheck %s --check-prefix=COMMON --check-prefix=PGOMERGE
/// Test that the merge pool size can be larger than 10.
@@ -49,6 +54,13 @@
// RUN: not ls %t.dir5/e_%20m.profraw
// RUN: ls %t.dir5/e_*.profraw | count 1
+// Test that all three functions have counters in the profile.
+// PROFCNT-DAG: begin
+// PROFCNT-DAG: end
+// PROFCNT-DAG: main
+// PROFCNT: Functions shown: 3
+// PROFCNT: Total functions: 3
+
int begin(int i) {
// COMMON: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}, !prof ![[PD1:[0-9]+]]
if (i)
diff --git a/flang/docs/Directives.md b/flang/docs/Directives.md
index 134de36f884d..fe08b4f855f2 100644
--- a/flang/docs/Directives.md
+++ b/flang/docs/Directives.md
@@ -30,3 +30,9 @@ A list of non-standard directives supported by Flang
end
end interface
```
+* `!dir$ assume_aligned desginator:alignment`, where designator is a variable,
+ maybe with array indices, and alignment is what the compiler should assume the
+ alignment to be. E.g A:64 or B(1,1,1):128. The alignment should be a power of 2,
+ and is limited to 256.
+ [This directive is currently recognised by the parser, but not
+ handled by the other parts of the compiler].
diff --git a/flang/docs/Extensions.md b/flang/docs/Extensions.md
index 46ef8f07b4a8..baecfd7c48fd 100644
--- a/flang/docs/Extensions.md
+++ b/flang/docs/Extensions.md
@@ -481,6 +481,11 @@ end
* Many compilers disallow a `VALUE` assumed-length character dummy
argument, which has been standard since F'2008.
We accept this usage with an optional portability warning.
+* The `ASYNCHRONOUS` attribute can be implied by usage in data
+ transfer I/O statements. Only one other compiler supports this
+ correctly. This compiler does, apart from objects in asynchronous
+ NAMELIST I/O, for which an actual asynchronous runtime implementation
+ seems unlikely.
## Behavior in cases where the standard is ambiguous or indefinite
@@ -687,6 +692,20 @@ end
essentially ignored unless there are some unmasked array entries and
*all* of them are NaNs.
+* When `INDEX` is used as an unrestricted specific intrinsic function
+ in the context of an actual procedure, as the explicit interface in
+ a `PROCEDURE` declaration statement, or as the target of a procedure
+ pointer assignment, its interface has exactly two dummy arguments
+ (`STRING=` and `SUBSTRING=`), and includes neither `BACK=` nor
+ `KIND=`.
+ This is how `INDEX` as an unrestricted specific intrinsic function was
+ documented in FORTRAN '77 and Fortran '90; later revisions of the
+ standard deleted the argument information from the section on
+ unrestricted specific intrinsic functions.
+ At least one other compiler (XLF) seems to expect that the interface for
+ `INDEX` include an optional `BACK=` argument, but it doesn't actually
+ work.
+
## De Facto Standard Features
* `EXTENDS_TYPE_OF()` returns `.TRUE.` if both of its arguments have the
diff --git a/flang/include/flang/Common/float128.h b/flang/include/flang/Common/float128.h
index 3443aa06437b..2e76bc0a162e 100644
--- a/flang/include/flang/Common/float128.h
+++ b/flang/include/flang/Common/float128.h
@@ -20,6 +20,8 @@
#ifndef FORTRAN_COMMON_FLOAT128_H_
#define FORTRAN_COMMON_FLOAT128_H_
+#include <float.h>
+
#ifdef __cplusplus
/*
* libc++ does not fully support __float128 right now, e.g.
@@ -49,4 +51,25 @@
#endif /* (defined(__FLOAT128__) || defined(__SIZEOF_FLOAT128__)) && \
!defined(_LIBCPP_VERSION) && !defined(__CUDA_ARCH__) */
+/* Define pure C CFloat128Type and CFloat128ComplexType. */
+#if LDBL_MANT_DIG == 113
+typedef long double CFloat128Type;
+#ifndef __cplusplus
+typedef long double _Complex CFloat128ComplexType;
+#endif
+#elif HAS_FLOAT128
+typedef __float128 CFloat128Type;
+
+#ifndef __cplusplus
+/*
+ * Use mode() attribute supported by GCC and Clang.
+ * Adjust it for other compilers as needed.
+ */
+#if !defined(_ARCH_PPC) || defined(__LONG_DOUBLE_IEEE128__)
+typedef _Complex float __attribute__((mode(TC))) CFloat128ComplexType;
+#else
+typedef _Complex float __attribute__((mode(KC))) CFloat128ComplexType;
+#endif
+#endif // __cplusplus
+#endif
#endif /* FORTRAN_COMMON_FLOAT128_H_ */
diff --git a/flang/include/flang/Evaluate/characteristics.h b/flang/include/flang/Evaluate/characteristics.h
index 04a0d71e1ade..f2f37866ecde 100644
--- a/flang/include/flang/Evaluate/characteristics.h
+++ b/flang/include/flang/Evaluate/characteristics.h
@@ -381,8 +381,8 @@ struct Procedure {
int FindPassIndex(std::optional<parser::CharBlock>) const;
bool CanBeCalledViaImplicitInterface(std::string *whyNot = nullptr) const;
bool CanOverride(const Procedure &, std::optional<int> passIndex) const;
- bool IsCompatibleWith(const Procedure &, std::string *whyNot = nullptr,
- const SpecificIntrinsic * = nullptr,
+ bool IsCompatibleWith(const Procedure &, bool ignoreImplicitVsExplicit,
+ std::string *whyNot = nullptr, const SpecificIntrinsic * = nullptr,
std::optional<std::string> *warning = nullptr) const;
llvm::raw_ostream &Dump(llvm::raw_ostream &) const;
diff --git a/flang/include/flang/Evaluate/tools.h b/flang/include/flang/Evaluate/tools.h
index d257da1a7096..53896072675a 100644
--- a/flang/include/flang/Evaluate/tools.h
+++ b/flang/include/flang/Evaluate/tools.h
@@ -1094,7 +1094,7 @@ std::optional<parser::MessageFixedText> CheckProcCompatibility(bool isCall,
const std::optional<characteristics::Procedure> &lhsProcedure,
const characteristics::Procedure *rhsProcedure,
const SpecificIntrinsic *specificIntrinsic, std::string &whyNotCompatible,
- std::optional<std::string> &warning);
+ std::optional<std::string> &warning, bool ignoreImplicitVsExplicit);
// Scalar constant expansion
class ScalarConstantExpander {
diff --git a/flang/include/flang/Lower/AbstractConverter.h b/flang/include/flang/Lower/AbstractConverter.h
index e2af59e0aaa1..32e7a5e2b040 100644
--- a/flang/include/flang/Lower/AbstractConverter.h
+++ b/flang/include/flang/Lower/AbstractConverter.h
@@ -53,6 +53,7 @@ class DerivedTypeSpec;
namespace lower {
class SymMap;
+struct SymbolBox;
namespace pft {
struct Variable;
}
@@ -299,6 +300,11 @@ public:
return loweringOptions;
}
+ /// Find the symbol in one level up of symbol map such as for host-association
+ /// in OpenMP code or return null.
+ virtual Fortran::lower::SymbolBox
+ lookupOneLevelUpSymbol(const Fortran::semantics::Symbol &sym) = 0;
+
private:
/// Options controlling lowering behavior.
const Fortran::lower::LoweringOptions &loweringOptions;
diff --git a/flang/include/flang/Lower/LoweringOptions.def b/flang/include/flang/Lower/LoweringOptions.def
index 503acdac869c..9de69ac5c80f 100644
--- a/flang/include/flang/Lower/LoweringOptions.def
+++ b/flang/include/flang/Lower/LoweringOptions.def
@@ -24,8 +24,8 @@ LOWERINGOPT(Name, Bits, Default)
/// If true, lower transpose without a runtime call.
ENUM_LOWERINGOPT(OptimizeTranspose, unsigned, 1, 1)
-/// If true, enable polymorphic type lowering feature. Off by default.
-ENUM_LOWERINGOPT(PolymorphicTypeImpl, unsigned, 1, 0)
+/// If true, enable polymorphic type lowering feature. On by default.
+ENUM_LOWERINGOPT(PolymorphicTypeImpl, unsigned, 1, 1)
/// If true, lower to High level FIR before lowering to FIR. On by default.
ENUM_LOWERINGOPT(LowerToHighLevelFIR, unsigned, 1, 1)
diff --git a/flang/include/flang/Optimizer/Builder/FIRBuilder.h b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
index 39821f1036c6..bd9b67b14b96 100644
--- a/flang/include/flang/Optimizer/Builder/FIRBuilder.h
+++ b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
@@ -688,6 +688,9 @@ fir::BoxValue createBoxValue(fir::FirOpBuilder &builder, mlir::Location loc,
/// Generate Null BoxProc for procedure pointer null initialization.
mlir::Value createNullBoxProc(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Type boxType);
+
+/// Set internal linkage attribute on a function.
+void setInternalLinkage(mlir::func::FuncOp);
} // namespace fir::factory
#endif // FORTRAN_OPTIMIZER_BUILDER_FIRBUILDER_H
diff --git a/flang/include/flang/Parser/dump-parse-tree.h b/flang/include/flang/Parser/dump-parse-tree.h
index d067a7273540..048008a8d80c 100644
--- a/flang/include/flang/Parser/dump-parse-tree.h
+++ b/flang/include/flang/Parser/dump-parse-tree.h
@@ -205,6 +205,7 @@ public:
NODE(parser, CompilerDirective)
NODE(CompilerDirective, IgnoreTKR)
NODE(CompilerDirective, LoopCount)
+ NODE(CompilerDirective, AssumeAligned)
NODE(CompilerDirective, NameValue)
NODE(parser, ComplexLiteralConstant)
NODE(parser, ComplexPart)
diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h
index e9bfb728a2be..f7b72c3af091 100644
--- a/flang/include/flang/Parser/parse-tree.h
+++ b/flang/include/flang/Parser/parse-tree.h
@@ -551,7 +551,9 @@ struct ExecutionPartConstruct {
};
// R509 execution-part -> executable-construct [execution-part-construct]...
-WRAPPER_CLASS(ExecutionPart, std::list<ExecutionPartConstruct>);
+// R1101 block -> [execution-part-construct]...
+using Block = std::list<ExecutionPartConstruct>;
+WRAPPER_CLASS(ExecutionPart, Block);
// R502 program-unit ->
// main-program | external-subprogram | module | submodule | block-data
@@ -2115,9 +2117,6 @@ struct ForallConstruct {
t;
};
-// R1101 block -> [execution-part-construct]...
-using Block = std::list<ExecutionPartConstruct>;
-
// R1105 selector -> expr | variable
struct Selector {
UNION_CLASS_BOILERPLATE(Selector);
@@ -3309,12 +3308,18 @@ struct CompilerDirective {
struct LoopCount {
WRAPPER_CLASS_BOILERPLATE(LoopCount, std::list<std::uint64_t>);
};
+ struct AssumeAligned {
+ TUPLE_CLASS_BOILERPLATE(AssumeAligned);
+ std::tuple<common::Indirection<Designator>, uint64_t> t;
+ };
struct NameValue {
TUPLE_CLASS_BOILERPLATE(NameValue);
std::tuple<Name, std::optional<std::uint64_t>> t;
};
CharBlock source;
- std::variant<std::list<IgnoreTKR>, LoopCount, std::list<NameValue>> u;
+ std::variant<std::list<IgnoreTKR>, LoopCount, std::list<AssumeAligned>,
+ std::list<NameValue>>
+ u;
};
// (CUDA) ATTRIBUTE(attribute) [::] name-list
diff --git a/flang/include/flang/Parser/provenance.h b/flang/include/flang/Parser/provenance.h
index a5a521114428..73d500f32831 100644
--- a/flang/include/flang/Parser/provenance.h
+++ b/flang/include/flang/Parser/provenance.h
@@ -151,6 +151,7 @@ public:
void ClearSearchPath();
void AppendSearchPathDirectory(std::string); // new last directory
+ const SourceFile *OpenPath(std::string path, llvm::raw_ostream &error);
const SourceFile *Open(std::string path, llvm::raw_ostream &error,
std::optional<std::string> &&prependPath = std::nullopt);
const SourceFile *ReadStandardInput(llvm::raw_ostream &error);
diff --git a/flang/include/flang/Parser/source.h b/flang/include/flang/Parser/source.h
index f0ae97a3ef04..a6efdf9546c7 100644
--- a/flang/include/flang/Parser/source.h
+++ b/flang/include/flang/Parser/source.h
@@ -36,6 +36,8 @@ namespace Fortran::parser {
std::string DirectoryName(std::string path);
std::optional<std::string> LocateSourceFile(
std::string name, const std::list<std::string> &searchPath);
+std::vector<std::string> LocateSourceFileAll(
+ std::string name, const std::vector<std::string> &searchPath);
class SourceFile;
diff --git a/flang/include/flang/Runtime/reduction.h b/flang/include/flang/Runtime/reduction.h
index b91fec0cd26b..5b6077658575 100644
--- a/flang/include/flang/Runtime/reduction.h
+++ b/flang/include/flang/Runtime/reduction.h
@@ -92,9 +92,11 @@ void RTDECL(CppSumComplex8)(std::complex<double> &, const Descriptor &,
void RTDECL(CppSumComplex10)(std::complex<long double> &, const Descriptor &,
const char *source, int line, int dim = 0,
const Descriptor *mask = nullptr);
-void RTDECL(CppSumComplex16)(std::complex<long double> &, const Descriptor &,
- const char *source, int line, int dim = 0,
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+void RTDECL(CppSumComplex16)(std::complex<CppFloat128Type> &,
+ const Descriptor &, const char *source, int line, int dim = 0,
const Descriptor *mask = nullptr);
+#endif
void RTDECL(SumDim)(Descriptor &result, const Descriptor &array, int dim,
const char *source, int line, const Descriptor *mask = nullptr);
@@ -145,12 +147,16 @@ void RTDECL(CppProductComplex4)(std::complex<float> &, const Descriptor &,
void RTDECL(CppProductComplex8)(std::complex<double> &, const Descriptor &,
const char *source, int line, int dim = 0,
const Descriptor *mask = nullptr);
+#if LDBL_MANT_DIG == 64
void RTDECL(CppProductComplex10)(std::complex<long double> &,
const Descriptor &, const char *source, int line, int dim = 0,
const Descriptor *mask = nullptr);
-void RTDECL(CppProductComplex16)(std::complex<long double> &,
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+void RTDECL(CppProductComplex16)(std::complex<CppFloat128Type> &,
const Descriptor &, const char *source, int line, int dim = 0,
const Descriptor *mask = nullptr);
+#endif
void RTDECL(ProductDim)(Descriptor &result, const Descriptor &array, int dim,
const char *source, int line, const Descriptor *mask = nullptr);
@@ -358,9 +364,12 @@ double RTDECL(Norm2_8)(
#if LDBL_MANT_DIG == 64
long double RTDECL(Norm2_10)(
const Descriptor &, const char *source, int line, int dim = 0);
-#elif LDBL_MANT_DIG == 113
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
long double RTDECL(Norm2_16)(
const Descriptor &, const char *source, int line, int dim = 0);
+void RTDECL(Norm2DimReal16)(
+ Descriptor &, const Descriptor &, int dim, const char *source, int line);
#endif
void RTDECL(Norm2Dim)(
Descriptor &, const Descriptor &, int dim, const char *source, int line);
diff --git a/flang/include/flang/Semantics/expression.h b/flang/include/flang/Semantics/expression.h
index a330e241c2cd..a224b08da21d 100644
--- a/flang/include/flang/Semantics/expression.h
+++ b/flang/include/flang/Semantics/expression.h
@@ -506,9 +506,18 @@ public:
}
bool Pre(const parser::ComponentDefStmt &) {
- // Already analyzed in name resolution and PDT instantiation;
- // do not attempt to re-analyze now without type parameters.
- return false;
+ inComponentDefStmt_ = true;
+ return true;
+ }
+ void Post(const parser::ComponentDefStmt &) { inComponentDefStmt_ = false; }
+ bool Pre(const parser::Initialization &x) {
+ // Default component initialization expressions (but not DATA-like ones
+ // as in DEC STRUCTUREs) were already analyzed in name resolution
+ // and PDT instantiation; do not attempt to re-analyze them without
+ // type parameters.
+ return !inComponentDefStmt_ ||
+ std::holds_alternative<
+ std::list<common::Indirection<parser::DataStmtValue>>>(x.u);
}
template <typename A> bool Pre(const parser::Scalar<A> &x) {
@@ -538,6 +547,7 @@ private:
SemanticsContext &context_;
evaluate::ExpressionAnalyzer exprAnalyzer_{context_};
int whereDepth_{0}; // nesting of WHERE statements & constructs
+ bool inComponentDefStmt_{false};
};
} // namespace Fortran::semantics
#endif // FORTRAN_SEMANTICS_EXPRESSION_H_
diff --git a/flang/include/flang/Semantics/module-dependences.h b/flang/include/flang/Semantics/module-dependences.h
new file mode 100644
index 000000000000..29813a19a4b1
--- /dev/null
+++ b/flang/include/flang/Semantics/module-dependences.h
@@ -0,0 +1,51 @@
+//===-- include/flang/Semantics/module-dependences.h ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FORTRAN_SEMANTICS_MODULE_DEPENDENCES_H_
+#define FORTRAN_SEMANTICS_MODULE_DEPENDENCES_H_
+
+#include <cinttypes>
+#include <map>
+#include <optional>
+#include <string>
+
+namespace Fortran::semantics {
+
+using ModuleCheckSumType = std::uint64_t;
+
+class ModuleDependences {
+public:
+ void AddDependence(
+ std::string &&name, bool intrinsic, ModuleCheckSumType hash) {
+ if (intrinsic) {
+ intrinsicMap_.emplace(std::move(name), hash);
+ } else {
+ nonIntrinsicMap_.emplace(std::move(name), hash);
+ }
+ }
+ std::optional<ModuleCheckSumType> GetRequiredHash(
+ const std::string &name, bool intrinsic) {
+ if (intrinsic) {
+ if (auto iter{intrinsicMap_.find(name)}; iter != intrinsicMap_.end()) {
+ return iter->second;
+ }
+ } else {
+ if (auto iter{nonIntrinsicMap_.find(name)};
+ iter != nonIntrinsicMap_.end()) {
+ return iter->second;
+ }
+ }
+ return std::nullopt;
+ }
+
+private:
+ std::map<std::string, ModuleCheckSumType> intrinsicMap_, nonIntrinsicMap_;
+};
+
+} // namespace Fortran::semantics
+#endif // FORTRAN_SEMANTICS_MODULE_DEPENDENCES_H_
diff --git a/flang/include/flang/Semantics/semantics.h b/flang/include/flang/Semantics/semantics.h
index 4e8b71fa652f..c8ee71945d8b 100644
--- a/flang/include/flang/Semantics/semantics.h
+++ b/flang/include/flang/Semantics/semantics.h
@@ -16,6 +16,7 @@
#include "flang/Evaluate/intrinsics.h"
#include "flang/Evaluate/target.h"
#include "flang/Parser/message.h"
+#include "flang/Semantics/module-dependences.h"
#include <iosfwd>
#include <set>
#include <string>
@@ -108,6 +109,7 @@ public:
parser::Messages &messages() { return messages_; }
evaluate::FoldingContext &foldingContext() { return foldingContext_; }
parser::AllCookedSources &allCookedSources() { return allCookedSources_; }
+ ModuleDependences &moduleDependences() { return moduleDependences_; }
SemanticsContext &set_location(
const std::optional<parser::CharBlock> &location) {
@@ -293,6 +295,7 @@ private:
const Scope *ppcBuiltinsScope_{nullptr}; // module __ppc_intrinsics
std::list<parser::Program> modFileParseTrees_;
std::unique_ptr<CommonBlockMap> commonBlockMap_;
+ ModuleDependences moduleDependences_;
};
class Semantics {
diff --git a/flang/include/flang/Semantics/symbol.h b/flang/include/flang/Semantics/symbol.h
index 4535a92ce3dd..125025dab5f4 100644
--- a/flang/include/flang/Semantics/symbol.h
+++ b/flang/include/flang/Semantics/symbol.h
@@ -14,6 +14,7 @@
#include "flang/Common/enum-set.h"
#include "flang/Common/reference.h"
#include "flang/Common/visit.h"
+#include "flang/Semantics/module-dependences.h"
#include "llvm/ADT/DenseMapInfo.h"
#include <array>
@@ -86,11 +87,16 @@ public:
void set_scope(const Scope *);
bool isDefaultPrivate() const { return isDefaultPrivate_; }
void set_isDefaultPrivate(bool yes = true) { isDefaultPrivate_ = yes; }
+ std::optional<ModuleCheckSumType> moduleFileHash() const {
+ return moduleFileHash_;
+ }
+ void set_moduleFileHash(ModuleCheckSumType x) { moduleFileHash_ = x; }
private:
bool isSubmodule_;
bool isDefaultPrivate_{false};
const Scope *scope_{nullptr};
+ std::optional<ModuleCheckSumType> moduleFileHash_;
};
class MainProgramDetails : public WithOmpDeclarative {
@@ -413,7 +419,6 @@ public:
const Symbol *procInterface() const { return procInterface_; }
void set_procInterface(const Symbol &sym) { procInterface_ = &sym; }
- bool IsInterfaceSet() { return procInterface_ || type(); }
inline bool HasExplicitInterface() const;
// Be advised: !init().has_value() => uninitialized pointer,
@@ -1035,7 +1040,7 @@ struct SymbolAddressCompare {
// Symbol comparison is usually based on the order of cooked source
// stream creation and, when both are from the same cooked source,
// their positions in that cooked source stream.
-// Don't use this comparator or OrderedSymbolSet to hold
+// Don't use this comparator or SourceOrderedSymbolSet to hold
// Symbols that might be subject to ReplaceName().
struct SymbolSourcePositionCompare {
// These functions are implemented in Evaluate/tools.cpp to
diff --git a/flang/lib/Decimal/decimal-to-binary.cpp b/flang/lib/Decimal/decimal-to-binary.cpp
index d38af0f9b800..c5cdb72e355f 100644
--- a/flang/lib/Decimal/decimal-to-binary.cpp
+++ b/flang/lib/Decimal/decimal-to-binary.cpp
@@ -14,6 +14,7 @@
#include <cinttypes>
#include <cstring>
#include <ctype.h>
+#include <utility>
namespace Fortran::decimal {
@@ -275,7 +276,12 @@ ConversionToBinaryResult<PREC> IntermediateFloat<PREC>::ToBinary(
if (guard != 0) {
flags |= Underflow;
}
- return {Binary{}, static_cast<enum ConversionResultFlags>(flags)};
+ Binary zero;
+ if (isNegative) {
+ zero.Negate();
+ }
+ return {
+ std::move(zero), static_cast<enum ConversionResultFlags>(flags)};
}
}
} else {
diff --git a/flang/lib/Evaluate/characteristics.cpp b/flang/lib/Evaluate/characteristics.cpp
index 80b0f346c32d..688a856220a1 100644
--- a/flang/lib/Evaluate/characteristics.cpp
+++ b/flang/lib/Evaluate/characteristics.cpp
@@ -25,6 +25,7 @@ using namespace Fortran::parser::literals;
namespace Fortran::evaluate::characteristics {
// Copy attributes from a symbol to dst based on the mapping in pairs.
+// An ASYNCHRONOUS attribute counts even if it is implied.
template <typename A, typename B>
static void CopyAttrs(const semantics::Symbol &src, A &dst,
const std::initializer_list<std::pair<semantics::Attr, B>> &pairs) {
@@ -533,7 +534,8 @@ bool DummyProcedure::IsCompatibleWith(
}
return false;
}
- if (!procedure.value().IsCompatibleWith(actual.procedure.value(), whyNot)) {
+ if (!procedure.value().IsCompatibleWith(actual.procedure.value(),
+ /*ignoreImplicitVsExplicit=*/false, whyNot)) {
if (whyNot) {
*whyNot = "incompatible dummy procedure interfaces: "s + *whyNot;
}
@@ -1206,7 +1208,8 @@ bool FunctionResult::IsCompatibleWith(
CHECK(ifaceProc != nullptr);
if (const auto *actualProc{
std::get_if<CopyableIndirection<Procedure>>(&actual.u)}) {
- if (ifaceProc->value().IsCompatibleWith(actualProc->value(), whyNot)) {
+ if (ifaceProc->value().IsCompatibleWith(actualProc->value(),
+ /*ignoreImplicitVsExplicit=*/false, whyNot)) {
return true;
}
if (whyNot) {
@@ -1251,7 +1254,8 @@ bool Procedure::operator==(const Procedure &that) const {
cudaSubprogramAttrs == that.cudaSubprogramAttrs;
}
-bool Procedure::IsCompatibleWith(const Procedure &actual, std::string *whyNot,
+bool Procedure::IsCompatibleWith(const Procedure &actual,
+ bool ignoreImplicitVsExplicit, std::string *whyNot,
const SpecificIntrinsic *specificIntrinsic,
std::optional<std::string> *warning) const {
// 15.5.2.9(1): if dummy is not pure, actual need not be.
@@ -1265,6 +1269,9 @@ bool Procedure::IsCompatibleWith(const Procedure &actual, std::string *whyNot,
}
Attrs differences{attrs ^ actualAttrs};
differences.reset(Attr::Subroutine); // dealt with specifically later
+ if (ignoreImplicitVsExplicit) {
+ differences.reset(Attr::ImplicitInterface);
+ }
if (!differences.empty()) {
if (whyNot) {
auto sep{": "s};
diff --git a/flang/lib/Evaluate/intrinsics.cpp b/flang/lib/Evaluate/intrinsics.cpp
index 61bf0f2b48ad..a8f2e5b445ed 100644
--- a/flang/lib/Evaluate/intrinsics.cpp
+++ b/flang/lib/Evaluate/intrinsics.cpp
@@ -1120,6 +1120,12 @@ static const SpecificIntrinsicInterface specificIntrinsicFunction[]{
{{"iiabs", {{"a", TypePattern{IntType, KindCode::exactKind, 2}}},
TypePattern{IntType, KindCode::exactKind, 2}},
"abs"},
+ // The definition of the unrestricted specific intrinsic function INDEX
+ // in F'77 and F'90 has only two arguments; later standards omit the
+ // argument information for all unrestricted specific intrinsic
+ // procedures. No compiler supports an implementation that allows
+ // INDEX with BACK= to work when associated as an actual procedure or
+ // procedure pointer target.
{{"index", {{"string", DefaultChar}, {"substring", DefaultChar}},
DefaultInt}},
{{"isign", {{"a", DefaultInt}, {"b", DefaultInt}}, DefaultInt}, "sign"},
@@ -2505,6 +2511,8 @@ private:
std::multimap<std::string, const IntrinsicInterface *> subroutines_;
const semantics::Scope *builtinsScope_{nullptr};
std::map<std::string, std::string> aliases_;
+ semantics::ParamValue assumedLen_{
+ semantics::ParamValue::Assumed(common::TypeParamAttr::Len)};
};
bool IntrinsicProcTable::Implementation::IsIntrinsicFunction(
@@ -3241,6 +3249,10 @@ DynamicType IntrinsicProcTable::Implementation::GetSpecificType(
TypeCategory category{set.LeastElement().value()};
if (pattern.kindCode == KindCode::doublePrecision) {
return DynamicType{category, defaults_.doublePrecisionKind()};
+ } else if (category == TypeCategory::Character) {
+ // All character arguments to specific intrinsic functions are
+ // assumed-length.
+ return DynamicType{defaults_.GetDefaultKind(category), assumedLen_};
} else {
return DynamicType{category, defaults_.GetDefaultKind(category)};
}
diff --git a/flang/lib/Evaluate/tools.cpp b/flang/lib/Evaluate/tools.cpp
index 131bbd97ce16..e7fc651b9173 100644
--- a/flang/lib/Evaluate/tools.cpp
+++ b/flang/lib/Evaluate/tools.cpp
@@ -1083,7 +1083,7 @@ std::optional<parser::MessageFixedText> CheckProcCompatibility(bool isCall,
const std::optional<characteristics::Procedure> &lhsProcedure,
const characteristics::Procedure *rhsProcedure,
const SpecificIntrinsic *specificIntrinsic, std::string &whyNotCompatible,
- std::optional<std::string> &warning) {
+ std::optional<std::string> &warning, bool ignoreImplicitVsExplicit) {
std::optional<parser::MessageFixedText> msg;
if (!lhsProcedure) {
msg = "In assignment to object %s, the target '%s' is a procedure"
@@ -1097,8 +1097,9 @@ std::optional<parser::MessageFixedText> CheckProcCompatibility(bool isCall,
*rhsProcedure->functionResult, &whyNotCompatible)) {
msg =
"Function %s associated with incompatible function designator '%s': %s"_err_en_US;
- } else if (lhsProcedure->IsCompatibleWith(*rhsProcedure, &whyNotCompatible,
- specificIntrinsic, &warning)) {
+ } else if (lhsProcedure->IsCompatibleWith(*rhsProcedure,
+ ignoreImplicitVsExplicit, &whyNotCompatible, specificIntrinsic,
+ &warning)) {
// OK
} else if (isCall) {
msg = "Procedure %s associated with result of reference to function '%s'"
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index f865b53f74de..153ce0623ab3 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -1000,6 +1000,17 @@ private:
if (sym.detailsIf<Fortran::semantics::CommonBlockDetails>())
return symMap->lookupSymbol(sym);
+ // For symbols to be privatized in OMP, the symbol is mapped to an
+ // instance of `SymbolBox::Intrinsic` (i.e. a direct mapping to an MLIR
+ // SSA value). This MLIR SSA value is the block argument to the
+ // `omp.private`'s `alloc` block. If this is the case, we return this
+ // `SymbolBox::Intrinsic` value.
+ if (Fortran::lower::SymbolBox v = symMap->lookupSymbol(sym))
+ return v.match(
+ [&](const Fortran::lower::SymbolBox::Intrinsic &)
+ -> Fortran::lower::SymbolBox { return v; },
+ [](const auto &) -> Fortran::lower::SymbolBox { return {}; });
+
return {};
}
if (Fortran::lower::SymbolBox v = symMap->lookupSymbol(sym))
@@ -1018,7 +1029,7 @@ private:
/// Find the symbol in one level up of symbol map such as for host-association
/// in OpenMP code or return null.
Fortran::lower::SymbolBox
- lookupOneLevelUpSymbol(const Fortran::semantics::Symbol &sym) {
+ lookupOneLevelUpSymbol(const Fortran::semantics::Symbol &sym) override {
if (Fortran::lower::SymbolBox v = localSymbols.lookupOneLevelUpSymbol(sym))
return v;
return {};
@@ -4477,7 +4488,16 @@ private:
assert(builder && "FirOpBuilder did not instantiate");
builder->setFastMathFlags(bridge.getLoweringOptions().getMathOptions());
builder->setInsertionPointToStart(&func.front());
- func.setVisibility(mlir::SymbolTable::Visibility::Public);
+ if (funit.parent.isA<Fortran::lower::pft::FunctionLikeUnit>()) {
+ // Give internal linkage to internal functions. There are no name clash
+ // risks, but giving global linkage to internal procedure will break the
+ // static link register in shared libraries because of the system calls.
+ // Also, it should be possible to eliminate the procedure code if all the
+ // uses have been inlined.
+ fir::factory::setInternalLinkage(func);
+ } else {
+ func.setVisibility(mlir::SymbolTable::Visibility::Public);
+ }
assert(blockId == 0 && "invalid blockId");
assert(activeConstructStack.empty() && "invalid construct stack state");
diff --git a/flang/lib/Lower/HostAssociations.cpp b/flang/lib/Lower/HostAssociations.cpp
index a62f7a7e99b6..b9e13ccad1c9 100644
--- a/flang/lib/Lower/HostAssociations.cpp
+++ b/flang/lib/Lower/HostAssociations.cpp
@@ -247,9 +247,11 @@ public:
}
};
-/// Class defining how polymorphic entities are captured in internal procedures.
-/// Polymorphic entities are always boxed as a fir.class box.
-class CapturedPolymorphic : public CapturedSymbols<CapturedPolymorphic> {
+/// Class defining how polymorphic scalar entities are captured in internal
+/// procedures. Polymorphic entities are always boxed as a fir.class box.
+/// Polymorphic array can be handled in CapturedArrays directly
+class CapturedPolymorphicScalar
+ : public CapturedSymbols<CapturedPolymorphicScalar> {
public:
static mlir::Type getType(Fortran::lower::AbstractConverter &converter,
const Fortran::semantics::Symbol &sym) {
@@ -257,19 +259,50 @@ public:
}
static void instantiateHostTuple(const InstantiateHostTuple &args,
Fortran::lower::AbstractConverter &converter,
- const Fortran::semantics::Symbol &) {
+ const Fortran::semantics::Symbol &sym) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
+ mlir::Location loc = args.loc;
mlir::Type typeInTuple = fir::dyn_cast_ptrEleTy(args.addrInTuple.getType());
assert(typeInTuple && "addrInTuple must be an address");
mlir::Value castBox = builder.createConvert(args.loc, typeInTuple,
fir::getBase(args.hostValue));
- builder.create<fir::StoreOp>(args.loc, castBox, args.addrInTuple);
+ if (Fortran::semantics::IsOptional(sym)) {
+ auto isPresent =
+ builder.create<fir::IsPresentOp>(loc, builder.getI1Type(), castBox);
+ builder.genIfThenElse(loc, isPresent)
+ .genThen([&]() {
+ builder.create<fir::StoreOp>(loc, castBox, args.addrInTuple);
+ })
+ .genElse([&]() {
+ mlir::Value null = fir::factory::createUnallocatedBox(
+ builder, loc, typeInTuple,
+ /*nonDeferredParams=*/mlir::ValueRange{});
+ builder.create<fir::StoreOp>(loc, null, args.addrInTuple);
+ })
+ .end();
+ } else {
+ builder.create<fir::StoreOp>(loc, castBox, args.addrInTuple);
+ }
}
static void getFromTuple(const GetFromTuple &args,
Fortran::lower::AbstractConverter &converter,
const Fortran::semantics::Symbol &sym,
const Fortran::lower::BoxAnalyzer &ba) {
- bindCapturedSymbol(sym, args.valueInTuple, converter, args.symMap);
+ fir::FirOpBuilder &builder = converter.getFirOpBuilder();
+ mlir::Location loc = args.loc;
+ mlir::Value box = args.valueInTuple;
+ if (Fortran::semantics::IsOptional(sym)) {
+ auto boxTy = box.getType().cast<fir::BaseBoxType>();
+ auto eleTy = boxTy.getEleTy();
+ if (!fir::isa_ref_type(eleTy))
+ eleTy = builder.getRefType(eleTy);
+ auto addr = builder.create<fir::BoxAddrOp>(loc, eleTy, box);
+ mlir::Value isPresent = builder.genIsNotNullAddr(loc, addr);
+ auto absentBox = builder.create<fir::AbsentOp>(loc, boxTy);
+ box =
+ builder.create<mlir::arith::SelectOp>(loc, isPresent, box, absentBox);
+ }
+ bindCapturedSymbol(sym, box, converter, args.symMap);
}
};
@@ -342,7 +375,12 @@ public:
static mlir::Type getType(Fortran::lower::AbstractConverter &converter,
const Fortran::semantics::Symbol &sym) {
mlir::Type type = converter.genType(sym);
- assert(type.isa<fir::SequenceType>() && "must be a sequence type");
+ bool isPolymorphic = Fortran::semantics::IsPolymorphic(sym);
+ assert((type.isa<fir::SequenceType>() ||
+ (isPolymorphic && type.isa<fir::ClassType>())) &&
+ "must be a sequence type");
+ if (isPolymorphic)
+ return type;
return fir::BoxType::get(type);
}
@@ -410,13 +448,13 @@ public:
fir::factory::readBoxValue(builder, loc, boxValue),
converter, args.symMap);
} else {
- // Keep variable as a fir.box.
+ // Keep variable as a fir.box/fir.class.
// If this is an optional that is absent, the fir.box needs to be an
// AbsentOp result, otherwise it will not work properly with IsPresentOp
// (absent boxes are null descriptor addresses, not descriptors containing
// a null base address).
if (Fortran::semantics::IsOptional(sym)) {
- auto boxTy = box.getType().cast<fir::BoxType>();
+ auto boxTy = box.getType().cast<fir::BaseBoxType>();
auto eleTy = boxTy.getEleTy();
if (!fir::isa_ref_type(eleTy))
eleTy = builder.getRefType(eleTy);
@@ -470,14 +508,10 @@ walkCaptureCategories(T visitor, Fortran::lower::AbstractConverter &converter,
ba.analyze(sym);
if (Fortran::semantics::IsAllocatableOrPointer(sym))
return CapturedAllocatableAndPointer::visit(visitor, converter, sym, ba);
- if (Fortran::semantics::IsPolymorphic(sym)) {
- if (ba.isArray() && !ba.lboundIsAllOnes())
- TODO(converter.genLocation(sym.name()),
- "polymorphic array with non default lower bound");
- return CapturedPolymorphic::visit(visitor, converter, sym, ba);
- }
if (ba.isArray())
return CapturedArrays::visit(visitor, converter, sym, ba);
+ if (Fortran::semantics::IsPolymorphic(sym))
+ return CapturedPolymorphicScalar::visit(visitor, converter, sym, ba);
if (ba.isChar())
return CapturedCharacterScalars::visit(visitor, converter, sym, ba);
assert(ba.isTrivial() && "must be trivial scalar");
diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
index 136bda0b582e..717b8cc0276a 100644
--- a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
@@ -14,6 +14,7 @@
#include "Utils.h"
#include "flang/Lower/PFTBuilder.h"
+#include "flang/Lower/SymbolMap.h"
#include "flang/Optimizer/Builder/Todo.h"
#include "flang/Semantics/tools.h"
#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
@@ -66,9 +67,10 @@ void DataSharingProcessor::cloneSymbol(const Fortran::semantics::Symbol *sym) {
}
void DataSharingProcessor::copyFirstPrivateSymbol(
- const Fortran::semantics::Symbol *sym) {
+ const Fortran::semantics::Symbol *sym,
+ mlir::OpBuilder::InsertPoint *copyAssignIP) {
if (sym->test(Fortran::semantics::Symbol::Flag::OmpFirstPrivate))
- converter.copyHostAssociateVar(*sym);
+ converter.copyHostAssociateVar(*sym, copyAssignIP);
}
void DataSharingProcessor::copyLastPrivateSymbol(
@@ -307,14 +309,10 @@ void DataSharingProcessor::privatize() {
for (const Fortran::semantics::Symbol *sym : privatizedSymbols) {
if (const auto *commonDet =
sym->detailsIf<Fortran::semantics::CommonBlockDetails>()) {
- for (const auto &mem : commonDet->objects()) {
- cloneSymbol(&*mem);
- copyFirstPrivateSymbol(&*mem);
- }
- } else {
- cloneSymbol(sym);
- copyFirstPrivateSymbol(sym);
- }
+ for (const auto &mem : commonDet->objects())
+ doPrivatize(&*mem);
+ } else
+ doPrivatize(sym);
}
}
@@ -338,11 +336,95 @@ void DataSharingProcessor::defaultPrivatize() {
!sym->GetUltimate().has<Fortran::semantics::NamelistDetails>() &&
!symbolsInNestedRegions.contains(sym) &&
!symbolsInParentRegions.contains(sym) &&
- !privatizedSymbols.contains(sym)) {
+ !privatizedSymbols.contains(sym))
+ doPrivatize(sym);
+ }
+}
+
+void DataSharingProcessor::doPrivatize(const Fortran::semantics::Symbol *sym) {
+ if (!useDelayedPrivatization) {
+ cloneSymbol(sym);
+ copyFirstPrivateSymbol(sym);
+ return;
+ }
+
+ Fortran::lower::SymbolBox hsb = converter.lookupOneLevelUpSymbol(*sym);
+ assert(hsb && "Host symbol box not found");
+
+ mlir::Type symType = hsb.getAddr().getType();
+ mlir::Location symLoc = hsb.getAddr().getLoc();
+ std::string privatizerName = sym->name().ToString() + ".privatizer";
+ bool isFirstPrivate =
+ sym->test(Fortran::semantics::Symbol::Flag::OmpFirstPrivate);
+
+ mlir::omp::PrivateClauseOp privatizerOp = [&]() {
+ auto moduleOp = firOpBuilder.getModule();
+ auto uniquePrivatizerName = fir::getTypeAsString(
+ symType, converter.getKindMap(),
+ converter.mangleName(*sym) +
+ (isFirstPrivate ? "_firstprivate" : "_private"));
+
+ if (auto existingPrivatizer =
+ moduleOp.lookupSymbol<mlir::omp::PrivateClauseOp>(
+ uniquePrivatizerName))
+ return existingPrivatizer;
+
+ auto ip = firOpBuilder.saveInsertionPoint();
+ firOpBuilder.setInsertionPoint(&moduleOp.getBodyRegion().front(),
+ moduleOp.getBodyRegion().front().begin());
+ auto result = firOpBuilder.create<mlir::omp::PrivateClauseOp>(
+ symLoc, uniquePrivatizerName, symType,
+ isFirstPrivate ? mlir::omp::DataSharingClauseType::FirstPrivate
+ : mlir::omp::DataSharingClauseType::Private);
+
+ symTable->pushScope();
+
+ // Populate the `alloc` region.
+ {
+ mlir::Region &allocRegion = result.getAllocRegion();
+ mlir::Block *allocEntryBlock = firOpBuilder.createBlock(
+ &allocRegion, /*insertPt=*/{}, symType, symLoc);
+
+ firOpBuilder.setInsertionPointToEnd(allocEntryBlock);
+ symTable->addSymbol(*sym, allocRegion.getArgument(0));
+ symTable->pushScope();
cloneSymbol(sym);
- copyFirstPrivateSymbol(sym);
+ firOpBuilder.create<mlir::omp::YieldOp>(
+ hsb.getAddr().getLoc(),
+ symTable->shallowLookupSymbol(*sym).getAddr());
+ symTable->popScope();
}
- }
+
+ // Populate the `copy` region if this is a `firstprivate`.
+ if (isFirstPrivate) {
+ mlir::Region &copyRegion = result.getCopyRegion();
+ // First block argument corresponding to the original/host value while
+ // second block argument corresponding to the privatized value.
+ mlir::Block *copyEntryBlock = firOpBuilder.createBlock(
+ &copyRegion, /*insertPt=*/{}, {symType, symType}, {symLoc, symLoc});
+ firOpBuilder.setInsertionPointToEnd(copyEntryBlock);
+ symTable->addSymbol(*sym, copyRegion.getArgument(0),
+ /*force=*/true);
+ symTable->pushScope();
+ symTable->addSymbol(*sym, copyRegion.getArgument(1));
+ auto ip = firOpBuilder.saveInsertionPoint();
+ copyFirstPrivateSymbol(sym, &ip);
+
+ firOpBuilder.create<mlir::omp::YieldOp>(
+ hsb.getAddr().getLoc(),
+ symTable->shallowLookupSymbol(*sym).getAddr());
+ symTable->popScope();
+ }
+
+ symTable->popScope();
+ firOpBuilder.restoreInsertionPoint(ip);
+ return result;
+ }();
+
+ delayedPrivatizationInfo.privatizers.push_back(
+ mlir::SymbolRefAttr::get(privatizerOp));
+ delayedPrivatizationInfo.originalAddresses.push_back(hsb.getAddr());
+ delayedPrivatizationInfo.symbols.push_back(sym);
}
} // namespace omp
diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.h b/flang/lib/Lower/OpenMP/DataSharingProcessor.h
index 10c0a30c09c3..9f7301df0759 100644
--- a/flang/lib/Lower/OpenMP/DataSharingProcessor.h
+++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.h
@@ -23,6 +23,24 @@ namespace lower {
namespace omp {
class DataSharingProcessor {
+public:
+ /// Collects all the information needed for delayed privatization. This can be
+ /// used by ops with data-sharing clauses to properly generate their regions
+ /// (e.g. add region arguments) and map the original SSA values to their
+ /// corresponding OMP region operands.
+ struct DelayedPrivatizationInfo {
+ // The list of symbols referring to delayed privatizer ops (i.e.
+ // `omp.private` ops).
+ llvm::SmallVector<mlir::SymbolRefAttr> privatizers;
+ // SSA values that correspond to "original" values being privatized.
+ // "Original" here means the SSA value outside the OpenMP region from which
+ // a clone is created inside the region.
+ llvm::SmallVector<mlir::Value> originalAddresses;
+ // Fortran symbols corresponding to the above SSA values.
+ llvm::SmallVector<const Fortran::semantics::Symbol *> symbols;
+ };
+
+private:
bool hasLastPrivateOp;
mlir::OpBuilder::InsertPoint lastPrivIP;
mlir::OpBuilder::InsertPoint insPt;
@@ -36,6 +54,9 @@ class DataSharingProcessor {
fir::FirOpBuilder &firOpBuilder;
const Fortran::parser::OmpClauseList &opClauseList;
Fortran::lower::pft::Evaluation &eval;
+ bool useDelayedPrivatization;
+ Fortran::lower::SymMap *symTable;
+ DelayedPrivatizationInfo delayedPrivatizationInfo;
bool needBarrier();
void collectSymbols(Fortran::semantics::Symbol::Flag flag);
@@ -47,10 +68,13 @@ class DataSharingProcessor {
void collectDefaultSymbols();
void privatize();
void defaultPrivatize();
+ void doPrivatize(const Fortran::semantics::Symbol *sym);
void copyLastPrivatize(mlir::Operation *op);
void insertLastPrivateCompare(mlir::Operation *op);
void cloneSymbol(const Fortran::semantics::Symbol *sym);
- void copyFirstPrivateSymbol(const Fortran::semantics::Symbol *sym);
+ void
+ copyFirstPrivateSymbol(const Fortran::semantics::Symbol *sym,
+ mlir::OpBuilder::InsertPoint *copyAssignIP = nullptr);
void copyLastPrivateSymbol(const Fortran::semantics::Symbol *sym,
mlir::OpBuilder::InsertPoint *lastPrivIP);
void insertDeallocs();
@@ -58,10 +82,14 @@ class DataSharingProcessor {
public:
DataSharingProcessor(Fortran::lower::AbstractConverter &converter,
const Fortran::parser::OmpClauseList &opClauseList,
- Fortran::lower::pft::Evaluation &eval)
+ Fortran::lower::pft::Evaluation &eval,
+ bool useDelayedPrivatization = false,
+ Fortran::lower::SymMap *symTable = nullptr)
: hasLastPrivateOp(false), converter(converter),
firOpBuilder(converter.getFirOpBuilder()), opClauseList(opClauseList),
- eval(eval) {}
+ eval(eval), useDelayedPrivatization(useDelayedPrivatization),
+ symTable(symTable) {}
+
// Privatisation is split into two steps.
// Step1 performs cloning of all privatisation clauses and copying for
// firstprivates. Step1 is performed at the place where process/processStep1
@@ -80,6 +108,10 @@ public:
assert(!loopIV && "Loop iteration variable already set");
loopIV = iv;
}
+
+ const DelayedPrivatizationInfo &getDelayedPrivatizationInfo() const {
+ return delayedPrivatizationInfo;
+ }
};
} // namespace omp
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index 7953bf83cba0..90fc1f80f57a 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -558,6 +558,7 @@ genOrderedRegionOp(Fortran::lower::AbstractConverter &converter,
static mlir::omp::ParallelOp
genParallelOp(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
Fortran::semantics::SemanticsContext &semaCtx,
Fortran::lower::pft::Evaluation &eval, bool genNested,
mlir::Location currentLocation,
@@ -590,8 +591,8 @@ genParallelOp(Fortran::lower::AbstractConverter &converter,
auto reductionCallback = [&](mlir::Operation *op) {
llvm::SmallVector<mlir::Location> locs(reductionVars.size(),
currentLocation);
- auto block = converter.getFirOpBuilder().createBlock(&op->getRegion(0), {},
- reductionTypes, locs);
+ auto *block = converter.getFirOpBuilder().createBlock(&op->getRegion(0), {},
+ reductionTypes, locs);
for (auto [arg, prv] :
llvm::zip_equal(reductionSymbols, block->getArguments())) {
converter.bindSymbol(*arg, prv);
@@ -599,13 +600,78 @@ genParallelOp(Fortran::lower::AbstractConverter &converter,
return reductionSymbols;
};
- return genOpWithBody<mlir::omp::ParallelOp>(
+ OpWithBodyGenInfo genInfo =
OpWithBodyGenInfo(converter, semaCtx, currentLocation, eval)
.setGenNested(genNested)
.setOuterCombined(outerCombined)
.setClauses(&clauseList)
.setReductions(&reductionSymbols, &reductionTypes)
- .setGenRegionEntryCb(reductionCallback),
+ .setGenRegionEntryCb(reductionCallback);
+
+ if (!enableDelayedPrivatization) {
+ return genOpWithBody<mlir::omp::ParallelOp>(
+ genInfo,
+ /*resultTypes=*/mlir::TypeRange(), ifClauseOperand,
+ numThreadsClauseOperand, allocateOperands, allocatorOperands,
+ reductionVars,
+ reductionDeclSymbols.empty()
+ ? nullptr
+ : mlir::ArrayAttr::get(converter.getFirOpBuilder().getContext(),
+ reductionDeclSymbols),
+ procBindKindAttr, /*private_vars=*/llvm::SmallVector<mlir::Value>{},
+ /*privatizers=*/nullptr);
+ }
+
+ bool privatize = !outerCombined;
+ DataSharingProcessor dsp(converter, clauseList, eval,
+ /*useDelayedPrivatization=*/true, &symTable);
+
+ if (privatize)
+ dsp.processStep1();
+
+ const auto &delayedPrivatizationInfo = dsp.getDelayedPrivatizationInfo();
+
+ auto genRegionEntryCB = [&](mlir::Operation *op) {
+ auto parallelOp = llvm::cast<mlir::omp::ParallelOp>(op);
+
+ llvm::SmallVector<mlir::Location> reductionLocs(reductionVars.size(),
+ currentLocation);
+
+ mlir::OperandRange privateVars = parallelOp.getPrivateVars();
+ mlir::Region &region = parallelOp.getRegion();
+
+ llvm::SmallVector<mlir::Type> privateVarTypes = reductionTypes;
+ privateVarTypes.reserve(privateVarTypes.size() + privateVars.size());
+ llvm::transform(privateVars, std::back_inserter(privateVarTypes),
+ [](mlir::Value v) { return v.getType(); });
+
+ llvm::SmallVector<mlir::Location> privateVarLocs = reductionLocs;
+ privateVarLocs.reserve(privateVarLocs.size() + privateVars.size());
+ llvm::transform(privateVars, std::back_inserter(privateVarLocs),
+ [](mlir::Value v) { return v.getLoc(); });
+
+ converter.getFirOpBuilder().createBlock(&region, /*insertPt=*/{},
+ privateVarTypes, privateVarLocs);
+
+ llvm::SmallVector<const Fortran::semantics::Symbol *> allSymbols =
+ reductionSymbols;
+ allSymbols.append(delayedPrivatizationInfo.symbols);
+ for (auto [arg, prv] : llvm::zip_equal(allSymbols, region.getArguments())) {
+ converter.bindSymbol(*arg, prv);
+ }
+
+ return allSymbols;
+ };
+
+ // TODO Merge with the reduction CB.
+ genInfo.setGenRegionEntryCb(genRegionEntryCB).setDataSharingProcessor(&dsp);
+
+ llvm::SmallVector<mlir::Attribute> privatizers(
+ delayedPrivatizationInfo.privatizers.begin(),
+ delayedPrivatizationInfo.privatizers.end());
+
+ return genOpWithBody<mlir::omp::ParallelOp>(
+ genInfo,
/*resultTypes=*/mlir::TypeRange(), ifClauseOperand,
numThreadsClauseOperand, allocateOperands, allocatorOperands,
reductionVars,
@@ -613,8 +679,11 @@ genParallelOp(Fortran::lower::AbstractConverter &converter,
? nullptr
: mlir::ArrayAttr::get(converter.getFirOpBuilder().getContext(),
reductionDeclSymbols),
- procBindKindAttr, /*private_vars=*/llvm::SmallVector<mlir::Value>{},
- /*privatizers=*/nullptr);
+ procBindKindAttr, delayedPrivatizationInfo.originalAddresses,
+ delayedPrivatizationInfo.privatizers.empty()
+ ? nullptr
+ : mlir::ArrayAttr::get(converter.getFirOpBuilder().getContext(),
+ privatizers));
}
static mlir::omp::SectionOp
@@ -771,7 +840,8 @@ genEnterExitUpdateDataOp(Fortran::lower::AbstractConverter &converter,
llvm::SmallVector<mlir::Attribute> dependTypeOperands;
Fortran::parser::OmpIfClause::DirectiveNameModifier directiveName;
- llvm::omp::Directive directive;
+ // GCC 9.3.0 emits a (probably) bogus warning about an unused variable.
+ [[maybe_unused]] llvm::omp::Directive directive;
if constexpr (std::is_same_v<OpTy, mlir::omp::EnterDataOp>) {
directiveName =
Fortran::parser::OmpIfClause::DirectiveNameModifier::TargetEnterData;
@@ -1621,7 +1691,7 @@ static void genOMP(Fortran::lower::AbstractConverter &converter,
if ((llvm::omp::allParallelSet & llvm::omp::loopConstructSet)
.test(ompDirective)) {
validDirective = true;
- genParallelOp(converter, semaCtx, eval, /*genNested=*/false,
+ genParallelOp(converter, symTable, semaCtx, eval, /*genNested=*/false,
currentLocation, loopOpClauseList,
/*outerCombined=*/true);
}
@@ -1711,8 +1781,8 @@ genOMP(Fortran::lower::AbstractConverter &converter,
currentLocation);
break;
case llvm::omp::Directive::OMPD_parallel:
- genParallelOp(converter, semaCtx, eval, /*genNested=*/true, currentLocation,
- beginClauseList);
+ genParallelOp(converter, symTable, semaCtx, eval, /*genNested=*/true,
+ currentLocation, beginClauseList);
break;
case llvm::omp::Directive::OMPD_single:
genSingleOp(converter, semaCtx, eval, /*genNested=*/true, currentLocation,
@@ -1769,7 +1839,7 @@ genOMP(Fortran::lower::AbstractConverter &converter,
.test(directive.v)) {
bool outerCombined =
directive.v != llvm::omp::Directive::OMPD_target_parallel;
- genParallelOp(converter, semaCtx, eval, /*genNested=*/false,
+ genParallelOp(converter, symTable, semaCtx, eval, /*genNested=*/false,
currentLocation, beginClauseList, outerCombined);
combinedDirective = true;
}
@@ -1852,7 +1922,7 @@ genOMP(Fortran::lower::AbstractConverter &converter,
// Parallel wrapper of PARALLEL SECTIONS construct
if (dir == llvm::omp::Directive::OMPD_parallel_sections) {
- genParallelOp(converter, semaCtx, eval,
+ genParallelOp(converter, symTable, semaCtx, eval,
/*genNested=*/false, currentLocation, sectionsClauseList,
/*outerCombined=*/true);
} else {
diff --git a/flang/lib/Lower/OpenMP/Utils.cpp b/flang/lib/Lower/OpenMP/Utils.cpp
index 31b15257d186..49517f62895d 100644
--- a/flang/lib/Lower/OpenMP/Utils.cpp
+++ b/flang/lib/Lower/OpenMP/Utils.cpp
@@ -24,6 +24,12 @@ llvm::cl::opt<bool> treatIndexAsSection(
llvm::cl::desc("In the OpenMP data clauses treat `a(N)` as `a(N:N)`."),
llvm::cl::init(true));
+llvm::cl::opt<bool> enableDelayedPrivatization(
+ "openmp-enable-delayed-privatization",
+ llvm::cl::desc(
+ "Emit `[first]private` variables as clauses on the MLIR ops."),
+ llvm::cl::init(false));
+
namespace Fortran {
namespace lower {
namespace omp {
diff --git a/flang/lib/Lower/OpenMP/Utils.h b/flang/lib/Lower/OpenMP/Utils.h
index c346f891f079..f57cd7420ce4 100644
--- a/flang/lib/Lower/OpenMP/Utils.h
+++ b/flang/lib/Lower/OpenMP/Utils.h
@@ -15,6 +15,7 @@
#include "llvm/Support/CommandLine.h"
extern llvm::cl::opt<bool> treatIndexAsSection;
+extern llvm::cl::opt<bool> enableDelayedPrivatization;
namespace fir {
class FirOpBuilder;
diff --git a/flang/lib/Optimizer/Builder/FIRBuilder.cpp b/flang/lib/Optimizer/Builder/FIRBuilder.cpp
index 3cce39f5b8c7..788c99e40105 100644
--- a/flang/lib/Optimizer/Builder/FIRBuilder.cpp
+++ b/flang/lib/Optimizer/Builder/FIRBuilder.cpp
@@ -18,6 +18,7 @@
#include "flang/Optimizer/Dialect/FIROpsSupport.h"
#include "flang/Optimizer/Support/FatalError.h"
#include "flang/Optimizer/Support/InternalNames.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/OpenACC/OpenACC.h"
#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
#include "llvm/ADT/ArrayRef.h"
@@ -1533,3 +1534,10 @@ mlir::Value fir::factory::createNullBoxProc(fir::FirOpBuilder &builder,
mlir::Value initVal{builder.create<fir::ZeroOp>(loc, boxEleTy)};
return builder.create<fir::EmboxProcOp>(loc, boxTy, initVal);
}
+
+void fir::factory::setInternalLinkage(mlir::func::FuncOp func) {
+ auto internalLinkage = mlir::LLVM::linkage::Linkage::Internal;
+ auto linkage =
+ mlir::LLVM::LinkageAttr::get(func->getContext(), internalLinkage);
+ func->setAttr("llvm.linkage", linkage);
+}
diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
index c84fb27cb38d..fb9b58ef69c6 100644
--- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
@@ -1834,10 +1834,7 @@ mlir::func::FuncOp IntrinsicLibrary::getWrapper(GeneratorType generator,
// First time this wrapper is needed, build it.
function = builder.createFunction(loc, wrapperName, funcType);
function->setAttr("fir.intrinsic", builder.getUnitAttr());
- auto internalLinkage = mlir::LLVM::linkage::Linkage::Internal;
- auto linkage =
- mlir::LLVM::LinkageAttr::get(builder.getContext(), internalLinkage);
- function->setAttr("llvm.linkage", linkage);
+ fir::factory::setInternalLinkage(function);
function.addEntryBlock();
// Create local context to emit code into the newly created function
@@ -4198,39 +4195,45 @@ mlir::Value IntrinsicLibrary::genIeeeLogb(mlir::Type resultType,
builder.create<mlir::arith::BitcastOp>(loc, intType, realVal);
mlir::Type i1Ty = builder.getI1Type();
- int exponentBias, significandSize;
+ int exponentBias, significandSize, nonSignificandSize;
switch (bitWidth) {
case 16:
if (realType.isF16()) {
// kind=2: 1 sign bit, 5 exponent bits, 10 significand bits
exponentBias = (1 << (5 - 1)) - 1; // 15
significandSize = 10;
+ nonSignificandSize = 6;
break;
}
assert(realType.isBF16() && "unknown 16-bit real type");
// kind=3: 1 sign bit, 8 exponent bits, 7 significand bits
exponentBias = (1 << (8 - 1)) - 1; // 127
significandSize = 7;
+ nonSignificandSize = 9;
break;
case 32:
// kind=4: 1 sign bit, 8 exponent bits, 23 significand bits
exponentBias = (1 << (8 - 1)) - 1; // 127
significandSize = 23;
+ nonSignificandSize = 9;
break;
case 64:
// kind=8: 1 sign bit, 11 exponent bits, 52 significand bits
exponentBias = (1 << (11 - 1)) - 1; // 1023
significandSize = 52;
+ nonSignificandSize = 12;
break;
case 80:
// kind=10: 1 sign bit, 15 exponent bits, 1+63 significand bits
exponentBias = (1 << (15 - 1)) - 1; // 16383
significandSize = 64;
+ nonSignificandSize = 16 + 1;
break;
case 128:
// kind=16: 1 sign bit, 15 exponent bits, 112 significand bits
exponentBias = (1 << (15 - 1)) - 1; // 16383
significandSize = 112;
+ nonSignificandSize = 16;
break;
default:
llvm_unreachable("unknown real type");
@@ -4262,6 +4265,11 @@ mlir::Value IntrinsicLibrary::genIeeeLogb(mlir::Type resultType,
/*withElseRegion=*/true);
// X is non-zero finite -- result is unbiased exponent of X
builder.setInsertionPointToStart(&innerIfOp.getThenRegion().front());
+ mlir::Value isNormal = genIsFPClass(i1Ty, args, normalTest);
+ auto normalIfOp = builder.create<fir::IfOp>(loc, resultType, isNormal,
+ /*withElseRegion=*/true);
+ // X is normal
+ builder.setInsertionPointToStart(&normalIfOp.getThenRegion().front());
mlir::Value biasedExponent = builder.create<mlir::arith::ShRUIOp>(
loc, shiftLeftOne,
builder.createIntegerConstant(loc, intType, significandSize + 1));
@@ -4271,6 +4279,23 @@ mlir::Value IntrinsicLibrary::genIeeeLogb(mlir::Type resultType,
result = builder.create<fir::ConvertOp>(loc, resultType, result);
builder.create<fir::ResultOp>(loc, result);
+ // X is denormal -- result is (-exponentBias - ctlz(significand))
+ builder.setInsertionPointToStart(&normalIfOp.getElseRegion().front());
+ mlir::Value significand = builder.create<mlir::arith::ShLIOp>(
+ loc, intVal,
+ builder.createIntegerConstant(loc, intType, nonSignificandSize));
+ mlir::Value ctlz =
+ builder.create<mlir::math::CountLeadingZerosOp>(loc, significand);
+ mlir::Type i32Ty = builder.getI32Type();
+ result = builder.create<mlir::arith::SubIOp>(
+ loc, builder.createIntegerConstant(loc, i32Ty, -exponentBias),
+ builder.create<fir::ConvertOp>(loc, i32Ty, ctlz));
+ result = builder.create<fir::ConvertOp>(loc, resultType, result);
+ builder.create<fir::ResultOp>(loc, result);
+
+ builder.setInsertionPointToEnd(&innerIfOp.getThenRegion().front());
+ builder.create<fir::ResultOp>(loc, normalIfOp.getResult(0));
+
// X is infinity or NaN -- result is +infinity or NaN
builder.setInsertionPointToStart(&innerIfOp.getElseRegion().front());
result = builder.create<mlir::arith::ShRUIOp>(loc, shiftLeftOne, one);
diff --git a/flang/lib/Optimizer/Builder/MutableBox.cpp b/flang/lib/Optimizer/Builder/MutableBox.cpp
index 4d8860b60915..d4012e9c3d9d 100644
--- a/flang/lib/Optimizer/Builder/MutableBox.cpp
+++ b/flang/lib/Optimizer/Builder/MutableBox.cpp
@@ -674,7 +674,7 @@ void fir::factory::disassociateMutableBox(fir::FirOpBuilder &builder,
// 7.3.2.3 point 7. The dynamic type of a disassociated pointer is the
// same as its declared type.
auto boxTy = box.getBoxTy().dyn_cast<fir::BaseBoxType>();
- auto eleTy = fir::dyn_cast_ptrOrBoxEleTy(boxTy.getEleTy());
+ auto eleTy = fir::unwrapPassByRefType(boxTy.getEleTy());
mlir::Type derivedType = fir::getDerivedType(eleTy);
if (auto recTy = derivedType.dyn_cast<fir::RecordType>()) {
fir::runtime::genNullifyDerivedType(builder, loc, box.getAddr(), recTy,
diff --git a/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp b/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
index fabbff818b6f..66fbaddcbda1 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
@@ -149,6 +149,22 @@ struct ForcedNorm2Real16 {
}
};
+/// Placeholder for real*16 version of Norm2Dim Intrinsic
+struct ForcedNorm2DimReal16 {
+ static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Norm2DimReal16));
+ static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() {
+ return [](mlir::MLIRContext *ctx) {
+ auto boxTy =
+ fir::runtime::getModel<const Fortran::runtime::Descriptor &>()(ctx);
+ auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8));
+ auto intTy = mlir::IntegerType::get(ctx, 8 * sizeof(int));
+ return mlir::FunctionType::get(
+ ctx, {fir::ReferenceType::get(boxTy), boxTy, intTy, strTy, intTy},
+ mlir::NoneType::get(ctx));
+ };
+ }
+};
+
/// Placeholder for real*10 version of Product Intrinsic
struct ForcedProductReal10 {
static constexpr const char *name = ExpandAndQuoteKey(RTNAME(ProductReal10));
@@ -876,7 +892,14 @@ mlir::Value fir::runtime::genMinval(fir::FirOpBuilder &builder,
void fir::runtime::genNorm2Dim(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value resultBox, mlir::Value arrayBox,
mlir::Value dim) {
- auto func = fir::runtime::getRuntimeFunc<mkRTKey(Norm2Dim)>(loc, builder);
+ mlir::func::FuncOp func;
+ auto ty = arrayBox.getType();
+ auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
+ auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
+ if (eleTy.isF128())
+ func = fir::runtime::getRuntimeFunc<ForcedNorm2DimReal16>(loc, builder);
+ else
+ func = fir::runtime::getRuntimeFunc<mkRTKey(Norm2Dim)>(loc, builder);
auto fTy = func.getFunctionType();
auto sourceFile = fir::factory::locationToFilename(builder, loc);
auto sourceLine =
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIRIntrinsics.cpp b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIRIntrinsics.cpp
index 314e4264c17e..0142fb0cfb0b 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIRIntrinsics.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIRIntrinsics.cpp
@@ -18,12 +18,12 @@
#include "flang/Optimizer/HLFIR/HLFIROps.h"
#include "flang/Optimizer/HLFIR/Passes.h"
#include "mlir/IR/BuiltinDialect.h"
+#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
-#include "mlir/Transforms/DialectConversion.h"
-#include <mlir/IR/MLIRContext.h>
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include <optional>
namespace hlfir {
@@ -176,7 +176,7 @@ protected:
rewriter.eraseOp(use);
}
}
- rewriter.replaceAllUsesWith(op->getResults(), {base});
+
rewriter.replaceOp(op, base);
}
};
@@ -484,19 +484,19 @@ public:
ProductOpConversion, TransposeOpConversion, CountOpConversion,
DotProductOpConversion, MaxvalOpConversion, MinvalOpConversion,
MinlocOpConversion, MaxlocOpConversion>(context);
- mlir::ConversionTarget target(*context);
- target.addLegalDialect<mlir::BuiltinDialect, mlir::arith::ArithDialect,
- mlir::func::FuncDialect, fir::FIROpsDialect,
- hlfir::hlfirDialect>();
- target.addIllegalOp<hlfir::MatmulOp, hlfir::MatmulTransposeOp, hlfir::SumOp,
- hlfir::ProductOp, hlfir::TransposeOp, hlfir::AnyOp,
- hlfir::AllOp, hlfir::DotProductOp, hlfir::CountOp,
- hlfir::MaxvalOp, hlfir::MinvalOp, hlfir::MinlocOp,
- hlfir::MaxlocOp>();
- target.markUnknownOpDynamicallyLegal(
- [](mlir::Operation *) { return true; });
- if (mlir::failed(
- mlir::applyFullConversion(module, target, std::move(patterns)))) {
+
+ // While conceptually this pass is performing dialect conversion, we use
+ // pattern rewrites here instead of dialect conversion because this pass
+ // looses array bounds from some of the expressions e.g.
+ // !hlfir.expr<2xi32> -> !hlfir.expr<?xi32>
+ // MLIR thinks this is a different type so dialect conversion fails.
+ // Pattern rewriting only requires that the resulting IR is still valid
+ mlir::GreedyRewriteConfig config;
+ // Prevent the pattern driver from merging blocks
+ config.enableRegionSimplification = false;
+
+ if (mlir::failed(mlir::applyPatternsAndFoldGreedily(
+ module, std::move(patterns), config))) {
mlir::emitError(mlir::UnknownLoc::get(context),
"failure in HLFIR intrinsic lowering");
signalPassFailure();
diff --git a/flang/lib/Parser/Fortran-parsers.cpp b/flang/lib/Parser/Fortran-parsers.cpp
index 0dd95d69d3c6..fc81a477897a 100644
--- a/flang/lib/Parser/Fortran-parsers.cpp
+++ b/flang/lib/Parser/Fortran-parsers.cpp
@@ -1268,9 +1268,13 @@ constexpr auto ignore_tkr{
constexpr auto loopCount{
"DIR$ LOOP COUNT" >> construct<CompilerDirective::LoopCount>(
parenthesized(nonemptyList(digitString64)))};
+constexpr auto assumeAligned{"DIR$ ASSUME_ALIGNED" >>
+ optionalList(construct<CompilerDirective::AssumeAligned>(
+ indirect(designator), ":"_tok >> digitString64))};
TYPE_PARSER(beginDirective >>
sourced(construct<CompilerDirective>(ignore_tkr) ||
construct<CompilerDirective>(loopCount) ||
+ construct<CompilerDirective>(assumeAligned) ||
construct<CompilerDirective>(
"DIR$" >> many(construct<CompilerDirective::NameValue>(name,
maybe(("="_tok || ":"_tok) >> digitString64))))) /
diff --git a/flang/lib/Parser/provenance.cpp b/flang/lib/Parser/provenance.cpp
index 3f185ffeb1b1..55ef67fd6288 100644
--- a/flang/lib/Parser/provenance.cpp
+++ b/flang/lib/Parser/provenance.cpp
@@ -167,6 +167,16 @@ void AllSources::AppendSearchPathDirectory(std::string directory) {
searchPath_.push_back(directory);
}
+const SourceFile *AllSources::OpenPath(
+ std::string path, llvm::raw_ostream &error) {
+ std::unique_ptr<SourceFile> source{std::make_unique<SourceFile>(encoding_)};
+ if (source->Open(path, error)) {
+ return ownedSourceFiles_.emplace_back(std::move(source)).get();
+ } else {
+ return nullptr;
+ }
+}
+
const SourceFile *AllSources::Open(std::string path, llvm::raw_ostream &error,
std::optional<std::string> &&prependPath) {
std::unique_ptr<SourceFile> source{std::make_unique<SourceFile>(encoding_)};
@@ -180,12 +190,10 @@ const SourceFile *AllSources::Open(std::string path, llvm::raw_ostream &error,
if (prependPath) {
searchPath_.pop_front();
}
- if (!found) {
- error << "Source file '" << path << "' was not found";
- return nullptr;
- } else if (source->Open(*found, error)) {
- return ownedSourceFiles_.emplace_back(std::move(source)).get();
+ if (found) {
+ return OpenPath(*found, error);
} else {
+ error << "Source file '" << path << "' was not found";
return nullptr;
}
}
diff --git a/flang/lib/Parser/source.cpp b/flang/lib/Parser/source.cpp
index 4b4fed64a1a4..ae834dc24165 100644
--- a/flang/lib/Parser/source.cpp
+++ b/flang/lib/Parser/source.cpp
@@ -75,6 +75,24 @@ std::optional<std::string> LocateSourceFile(
return std::nullopt;
}
+std::vector<std::string> LocateSourceFileAll(
+ std::string name, const std::vector<std::string> &searchPath) {
+ if (name == "-" || llvm::sys::path::is_absolute(name)) {
+ return {name};
+ }
+ std::vector<std::string> result;
+ for (const std::string &dir : searchPath) {
+ llvm::SmallString<128> path{dir};
+ llvm::sys::path::append(path, name);
+ bool isDir{false};
+ auto er = llvm::sys::fs::is_directory(path, isDir);
+ if (!er && !isDir) {
+ result.emplace_back(path.str().str());
+ }
+ }
+ return result;
+}
+
std::size_t RemoveCarriageReturns(llvm::MutableArrayRef<char> buf) {
std::size_t wrote{0};
char *buffer{buf.data()};
diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp
index 1df49a688a12..600aa01999da 100644
--- a/flang/lib/Parser/unparse.cpp
+++ b/flang/lib/Parser/unparse.cpp
@@ -1819,6 +1819,11 @@ public:
[&](const CompilerDirective::LoopCount &lcount) {
Walk("!DIR$ LOOP COUNT (", lcount.v, ", ", ")");
},
+ [&](const std::list<CompilerDirective::AssumeAligned>
+ &assumeAligned) {
+ Word("!DIR$ ASSUME_ALIGNED ");
+ Walk(" ", assumeAligned, ", ");
+ },
[&](const std::list<CompilerDirective::NameValue> &names) {
Walk("!DIR$ ", names, " ");
},
@@ -1841,6 +1846,11 @@ public:
Walk(std::get<Name>(x.t));
Walk("=", std::get<std::optional<std::uint64_t>>(x.t));
}
+ void Unparse(const CompilerDirective::AssumeAligned &x) {
+ Walk(std::get<common::Indirection<Designator>>(x.t));
+ Put(":");
+ Walk(std::get<uint64_t>(x.t));
+ }
// OpenACC Directives & Clauses
void Unparse(const AccAtomicCapture &x) {
diff --git a/flang/lib/Semantics/check-call.cpp b/flang/lib/Semantics/check-call.cpp
index fdf7805beab7..3adbd7cc4177 100644
--- a/flang/lib/Semantics/check-call.cpp
+++ b/flang/lib/Semantics/check-call.cpp
@@ -674,7 +674,7 @@ static void CheckExplicitDataArg(const characteristics::DummyDataObject &dummy,
!(dummyIsAssumedShape || dummyIsAssumedRank ||
(actualIsPointer && dummyIsPointer))) { // C1539 & C1540
messages.Say(
- "ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous %s"_err_en_US,
+ "ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE %s"_err_en_US,
dummyName);
}
}
@@ -912,7 +912,7 @@ static void CheckExplicitDataArg(const characteristics::DummyDataObject &dummy,
static void CheckProcedureArg(evaluate::ActualArgument &arg,
const characteristics::Procedure &proc,
const characteristics::DummyProcedure &dummy, const std::string &dummyName,
- SemanticsContext &context) {
+ SemanticsContext &context, bool ignoreImplicitVsExplicit) {
evaluate::FoldingContext &foldingContext{context.foldingContext()};
parser::ContextualMessages &messages{foldingContext.messages()};
auto restorer{
@@ -975,7 +975,8 @@ static void CheckProcedureArg(evaluate::ActualArgument &arg,
if (interface.HasExplicitInterface()) {
std::string whyNot;
std::optional<std::string> warning;
- if (!interface.IsCompatibleWith(argInterface, &whyNot,
+ if (!interface.IsCompatibleWith(argInterface,
+ ignoreImplicitVsExplicit, &whyNot,
/*specificIntrinsic=*/nullptr, &warning)) {
// 15.5.2.9(1): Explicit interfaces must match
if (argInterface.HasExplicitInterface()) {
@@ -1081,7 +1082,8 @@ static void CheckExplicitInterfaceArg(evaluate::ActualArgument &arg,
const characteristics::DummyArgument &dummy,
const characteristics::Procedure &proc, SemanticsContext &context,
const Scope *scope, const evaluate::SpecificIntrinsic *intrinsic,
- bool allowActualArgumentConversions, bool extentErrors) {
+ bool allowActualArgumentConversions, bool extentErrors,
+ bool ignoreImplicitVsExplicit) {
evaluate::FoldingContext &foldingContext{context.foldingContext()};
auto &messages{foldingContext.messages()};
std::string dummyName{"dummy argument"};
@@ -1185,7 +1187,8 @@ static void CheckExplicitInterfaceArg(evaluate::ActualArgument &arg,
},
[&](const characteristics::DummyProcedure &dummy) {
if (!checkActualArgForLabel(arg)) {
- CheckProcedureArg(arg, proc, dummy, dummyName, context);
+ CheckProcedureArg(arg, proc, dummy, dummyName, context,
+ ignoreImplicitVsExplicit);
}
},
[&](const characteristics::AlternateReturn &) {
@@ -1371,7 +1374,8 @@ static void CheckAssociated(evaluate::ActualArguments &arguments,
: nullptr};
std::optional<parser::MessageFixedText> msg{
CheckProcCompatibility(isCall, pointerProc, &*targetProc,
- specificIntrinsic, whyNot, warning)};
+ specificIntrinsic, whyNot, warning,
+ /*ignoreImplicitVsExplicit=*/false)};
if (!msg && warning &&
semanticsContext.ShouldWarn(
common::UsageWarning::ProcDummyArgShapes)) {
@@ -1740,7 +1744,8 @@ static parser::Messages CheckExplicitInterface(
const characteristics::Procedure &proc, evaluate::ActualArguments &actuals,
SemanticsContext &context, const Scope *scope,
const evaluate::SpecificIntrinsic *intrinsic,
- bool allowActualArgumentConversions, bool extentErrors) {
+ bool allowActualArgumentConversions, bool extentErrors,
+ bool ignoreImplicitVsExplicit) {
evaluate::FoldingContext &foldingContext{context.foldingContext()};
parser::ContextualMessages &messages{foldingContext.messages()};
parser::Messages buffer;
@@ -1754,7 +1759,8 @@ static parser::Messages CheckExplicitInterface(
const auto &dummy{proc.dummyArguments.at(index++)};
if (actual) {
CheckExplicitInterfaceArg(*actual, dummy, proc, context, scope, intrinsic,
- allowActualArgumentConversions, extentErrors);
+ allowActualArgumentConversions, extentErrors,
+ ignoreImplicitVsExplicit);
} else if (!dummy.IsOptional()) {
if (dummy.name.empty()) {
messages.Say(
@@ -1783,7 +1789,8 @@ bool CheckInterfaceForGeneric(const characteristics::Procedure &proc,
bool allowActualArgumentConversions) {
return proc.HasExplicitInterface() &&
!CheckExplicitInterface(proc, actuals, context, nullptr, nullptr,
- allowActualArgumentConversions, false /*extentErrors*/)
+ allowActualArgumentConversions, /*extentErrors=*/false,
+ /*ignoreImplicitVsExplicit=*/false)
.AnyFatalError();
}
@@ -1876,6 +1883,7 @@ bool CheckPPCIntrinsic(const Symbol &generic, const Symbol &specific,
bool CheckArguments(const characteristics::Procedure &proc,
evaluate::ActualArguments &actuals, SemanticsContext &context,
const Scope &scope, bool treatingExternalAsImplicit,
+ bool ignoreImplicitVsExplicit,
const evaluate::SpecificIntrinsic *intrinsic) {
bool explicitInterface{proc.HasExplicitInterface()};
evaluate::FoldingContext foldingContext{context.foldingContext()};
@@ -1898,8 +1906,9 @@ bool CheckArguments(const characteristics::Procedure &proc,
}
}
if (explicitInterface) {
- auto buffer{CheckExplicitInterface(
- proc, actuals, context, &scope, intrinsic, true, true)};
+ auto buffer{CheckExplicitInterface(proc, actuals, context, &scope,
+ intrinsic, /*allowArgumentConversions=*/true, /*extentErrors=*/true,
+ ignoreImplicitVsExplicit)};
if (!buffer.empty()) {
if (treatingExternalAsImplicit) {
if (auto *msg{messages.Say(
diff --git a/flang/lib/Semantics/check-call.h b/flang/lib/Semantics/check-call.h
index 4275606225eb..8553f3a31efb 100644
--- a/flang/lib/Semantics/check-call.h
+++ b/flang/lib/Semantics/check-call.h
@@ -35,7 +35,7 @@ class SemanticsContext;
// messages were created, true if all is well.
bool CheckArguments(const evaluate::characteristics::Procedure &,
evaluate::ActualArguments &, SemanticsContext &, const Scope &,
- bool treatingExternalAsImplicit,
+ bool treatingExternalAsImplicit, bool ignoreImplicitVsExplicit,
const evaluate::SpecificIntrinsic *intrinsic);
bool CheckPPCIntrinsic(const Symbol &generic, const Symbol &specific,
diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp
index e9adc086402d..719bea34406a 100644
--- a/flang/lib/Semantics/check-declarations.cpp
+++ b/flang/lib/Semantics/check-declarations.cpp
@@ -1481,7 +1481,8 @@ void CheckHelper::CheckExternal(const Symbol &symbol) {
if (auto globalChars{Characterize(*global)}) {
if (chars->HasExplicitInterface()) {
std::string whyNot;
- if (!chars->IsCompatibleWith(*globalChars, &whyNot)) {
+ if (!chars->IsCompatibleWith(*globalChars,
+ /*ignoreImplicitVsExplicit=*/false, &whyNot)) {
msg = WarnIfNotInModuleFile(
"The global subprogram '%s' is not compatible with its local procedure declaration (%s)"_warn_en_US,
global->name(), whyNot);
@@ -1507,7 +1508,8 @@ void CheckHelper::CheckExternal(const Symbol &symbol) {
if (auto chars{Characterize(symbol)}) {
if (auto previousChars{Characterize(previous)}) {
std::string whyNot;
- if (!chars->IsCompatibleWith(*previousChars, &whyNot)) {
+ if (!chars->IsCompatibleWith(*previousChars,
+ /*ignoreImplicitVsExplicit=*/false, &whyNot)) {
if (auto *msg{WarnIfNotInModuleFile(
"The external interface '%s' is not compatible with an earlier definition (%s)"_warn_en_US,
symbol.name(), whyNot)}) {
diff --git a/flang/lib/Semantics/data-to-inits.cpp b/flang/lib/Semantics/data-to-inits.cpp
index c12af1bb0165..fa22d4986790 100644
--- a/flang/lib/Semantics/data-to-inits.cpp
+++ b/flang/lib/Semantics/data-to-inits.cpp
@@ -524,7 +524,7 @@ static const DerivedTypeSpec *HasDefaultInitialization(const Symbol &symbol) {
directs.begin(), directs.end(), [](const Symbol &component) {
return !IsAllocatable(component) &&
HasDeclarationInitializer(component);
- })) {
+ }) != directs.end()) {
return derived;
}
}
diff --git a/flang/lib/Semantics/expression.cpp b/flang/lib/Semantics/expression.cpp
index 8d817f077880..54bfe0f2e156 100644
--- a/flang/lib/Semantics/expression.cpp
+++ b/flang/lib/Semantics/expression.cpp
@@ -973,7 +973,8 @@ MaybeExpr ExpressionAnalyzer::Analyze(const parser::Name &n) {
}
}
if (!isWholeAssumedSizeArrayOk_ &&
- semantics::IsAssumedSizeArray(*n.symbol)) { // C1002, C1014, C1231
+ semantics::IsAssumedSizeArray(
+ ResolveAssociations(*n.symbol))) { // C1002, C1014, C1231
AttachDeclaration(
SayAt(n,
"Whole assumed-size array '%s' may not appear here without subscripts"_err_en_US,
@@ -1329,15 +1330,29 @@ std::optional<Component> ExpressionAnalyzer::CreateComponent(DataRef &&base,
// Derived type component references and type parameter inquiries
MaybeExpr ExpressionAnalyzer::Analyze(const parser::StructureComponent &sc) {
- MaybeExpr base{Analyze(sc.base)};
Symbol *sym{sc.component.symbol};
- if (!base || !sym || context_.HasError(sym)) {
+ if (context_.HasError(sym)) {
+ return std::nullopt;
+ }
+ const auto *misc{sym->detailsIf<semantics::MiscDetails>()};
+ bool isTypeParamInquiry{sym->has<semantics::TypeParamDetails>() ||
+ (misc &&
+ (misc->kind() == semantics::MiscDetails::Kind::KindParamInquiry ||
+ misc->kind() == semantics::MiscDetails::Kind::LenParamInquiry))};
+ MaybeExpr base;
+ if (isTypeParamInquiry) {
+ auto restorer{AllowWholeAssumedSizeArray()};
+ base = Analyze(sc.base);
+ } else {
+ base = Analyze(sc.base);
+ }
+ if (!base) {
return std::nullopt;
}
const auto &name{sc.component.source};
if (auto *dtExpr{UnwrapExpr<Expr<SomeDerived>>(*base)}) {
const auto *dtSpec{GetDerivedTypeSpec(dtExpr->GetType())};
- if (sym->detailsIf<semantics::TypeParamDetails>()) {
+ if (isTypeParamInquiry) {
if (auto *designator{UnwrapExpr<Designator<SomeDerived>>(*dtExpr)}) {
if (std::optional<DynamicType> dyType{DynamicType::From(*sym)}) {
if (dyType->category() == TypeCategory::Integer) {
@@ -1350,8 +1365,7 @@ MaybeExpr ExpressionAnalyzer::Analyze(const parser::StructureComponent &sc) {
Say(name, "Type parameter is not INTEGER"_err_en_US);
} else {
Say(name,
- "A type parameter inquiry must be applied to "
- "a designator"_err_en_US);
+ "A type parameter inquiry must be applied to a designator"_err_en_US);
}
} else if (!dtSpec || !dtSpec->scope()) {
CHECK(context_.AnyFatalError() || !foldingContext_.messages().empty());
@@ -1393,8 +1407,7 @@ MaybeExpr ExpressionAnalyzer::Analyze(const parser::StructureComponent &sc) {
return AsGenericExpr(std::move(realExpr));
}
}
- } else if (kind == MiscKind::KindParamInquiry ||
- kind == MiscKind::LenParamInquiry) {
+ } else if (isTypeParamInquiry) { // %kind or %len
ActualArgument arg{std::move(*base)};
SetArgSourceLocation(arg, name);
return MakeFunctionRef(name, ActualArguments{std::move(arg)});
@@ -3129,7 +3142,8 @@ std::optional<characteristics::Procedure> ExpressionAnalyzer::CheckCall(
if (auto iter{implicitInterfaces_.find(name)};
iter != implicitInterfaces_.end()) {
std::string whyNot;
- if (!chars->IsCompatibleWith(iter->second.second, &whyNot)) {
+ if (!chars->IsCompatibleWith(iter->second.second,
+ /*ignoreImplicitVsExplicit=*/false, &whyNot)) {
if (auto *msg{Say(callSite,
"Reference to the procedure '%s' has an implicit interface that is distinct from another reference: %s"_warn_en_US,
name, whyNot)}) {
@@ -3169,7 +3183,7 @@ std::optional<characteristics::Procedure> ExpressionAnalyzer::CheckCall(
}
ok &= semantics::CheckArguments(*chars, arguments, context_,
context_.FindScope(callSite), treatExternalAsImplicit,
- specificIntrinsic);
+ /*ignoreImplicitVsExplicit=*/false, specificIntrinsic);
}
if (procSymbol && !IsPureProcedure(*procSymbol)) {
if (const semantics::Scope *
@@ -3188,7 +3202,8 @@ std::optional<characteristics::Procedure> ExpressionAnalyzer::CheckCall(
if (auto globalChars{characteristics::Procedure::Characterize(
*global, context_.foldingContext())}) {
semantics::CheckArguments(*globalChars, arguments, context_,
- context_.FindScope(callSite), true,
+ context_.FindScope(callSite), /*treatExternalAsImplicit=*/true,
+ /*ignoreImplicitVsExplicit=*/false,
nullptr /*not specific intrinsic*/);
}
}
@@ -3741,9 +3756,12 @@ MaybeExpr ExpressionAnalyzer::Analyze(const parser::Selector &selector) {
}
}
}
+ // Not a Variable -> FunctionReference
+ auto restorer{AllowWholeAssumedSizeArray()};
+ return Analyze(selector.u);
+ } else { // Expr
+ return Analyze(selector.u);
}
- // Not a Variable -> FunctionReference; handle normally as Variable or Expr
- return Analyze(selector.u);
}
MaybeExpr ExpressionAnalyzer::Analyze(const parser::DataStmtConstant &x) {
@@ -3999,6 +4017,7 @@ void ArgumentAnalyzer::Analyze(
const parser::ActualArgSpec &arg, bool isSubroutine) {
// TODO: C1534: Don't allow a "restricted" specific intrinsic to be passed.
std::optional<ActualArgument> actual;
+ auto restorer{context_.AllowWholeAssumedSizeArray()};
common::visit(
common::visitors{
[&](const common::Indirection<parser::Expr> &x) {
@@ -4613,8 +4632,9 @@ evaluate::Expr<evaluate::SubscriptInteger> AnalyzeKindSelector(
SemanticsContext &context, common::TypeCategory category,
const std::optional<parser::KindSelector> &selector) {
evaluate::ExpressionAnalyzer analyzer{context};
+ CHECK(context.location().has_value());
auto restorer{
- analyzer.GetContextualMessages().SetLocation(context.location().value())};
+ analyzer.GetContextualMessages().SetLocation(*context.location())};
return analyzer.AnalyzeKindSelector(category, selector);
}
diff --git a/flang/lib/Semantics/mod-file.cpp b/flang/lib/Semantics/mod-file.cpp
index 7072ddee18eb..37fe0240537b 100644
--- a/flang/lib/Semantics/mod-file.cpp
+++ b/flang/lib/Semantics/mod-file.cpp
@@ -41,11 +41,13 @@ struct ModHeader {
static constexpr const char magic[magicLen + 1]{"!mod$ v1 sum:"};
static constexpr char terminator{'\n'};
static constexpr int len{magicLen + 1 + sumLen};
+ static constexpr int needLen{7};
+ static constexpr const char need[needLen + 1]{"!need$ "};
};
static std::optional<SourceName> GetSubmoduleParent(const parser::Program &);
static void CollectSymbols(const Scope &, SymbolVector &, SymbolVector &,
- std::map<const Symbol *, SourceName> &);
+ std::map<const Symbol *, SourceName> &, UnorderedSymbolSet &);
static void PutPassName(llvm::raw_ostream &, const std::optional<SourceName> &);
static void PutInit(llvm::raw_ostream &, const Symbol &, const MaybeExpr &,
const parser::Expr *, const std::map<const Symbol *, SourceName> &);
@@ -58,11 +60,12 @@ static void PutShape(
static llvm::raw_ostream &PutAttr(llvm::raw_ostream &, Attr);
static llvm::raw_ostream &PutType(llvm::raw_ostream &, const DeclTypeSpec &);
static llvm::raw_ostream &PutLower(llvm::raw_ostream &, std::string_view);
-static std::error_code WriteFile(
- const std::string &, const std::string &, bool = true);
+static std::error_code WriteFile(const std::string &, const std::string &,
+ ModuleCheckSumType &, bool debug = true);
static bool FileContentsMatch(
const std::string &, const std::string &, const std::string &);
-static std::string CheckSum(const std::string_view &);
+static ModuleCheckSumType ComputeCheckSum(const std::string_view &);
+static std::string CheckSumString(ModuleCheckSumType);
// Collect symbols needed for a subprogram interface
class SubprogramSymbolCollector {
@@ -129,17 +132,23 @@ static std::string ModFileName(const SourceName &name,
// Write the module file for symbol, which must be a module or submodule.
void ModFileWriter::Write(const Symbol &symbol) {
- auto *ancestor{symbol.get<ModuleDetails>().ancestor()};
+ auto &module{symbol.get<ModuleDetails>()};
+ if (module.moduleFileHash()) {
+ return; // already written
+ }
+ auto *ancestor{module.ancestor()};
isSubmodule_ = ancestor != nullptr;
auto ancestorName{ancestor ? ancestor->GetName().value().ToString() : ""s};
auto path{context_.moduleDirectory() + '/' +
ModFileName(symbol.name(), ancestorName, context_.moduleFileSuffix())};
PutSymbols(DEREF(symbol.scope()));
- if (std::error_code error{
- WriteFile(path, GetAsString(symbol), context_.debugModuleWriter())}) {
+ ModuleCheckSumType checkSum;
+ if (std::error_code error{WriteFile(
+ path, GetAsString(symbol), checkSum, context_.debugModuleWriter())}) {
context_.Say(
symbol.name(), "Error writing %s: %s"_err_en_US, path, error.message());
}
+ const_cast<ModuleDetails &>(module).set_moduleFileHash(checkSum);
}
// Return the entire body of the module file
@@ -147,6 +156,8 @@ void ModFileWriter::Write(const Symbol &symbol) {
std::string ModFileWriter::GetAsString(const Symbol &symbol) {
std::string buf;
llvm::raw_string_ostream all{buf};
+ all << needs_.str();
+ needs_.str().clear();
auto &details{symbol.get<ModuleDetails>()};
if (!details.isSubmodule()) {
all << "module " << symbol.name();
@@ -258,7 +269,17 @@ void ModFileWriter::PutSymbols(const Scope &scope) {
SymbolVector sorted;
SymbolVector uses;
PrepareRenamings(scope);
- CollectSymbols(scope, sorted, uses, renamings_);
+ UnorderedSymbolSet modules;
+ CollectSymbols(scope, sorted, uses, renamings_, modules);
+ // Write module files for dependencies first so that their
+ // hashes are known.
+ for (auto ref : modules) {
+ Write(*ref);
+ needs_ << ModHeader::need
+ << CheckSumString(ref->get<ModuleDetails>().moduleFileHash().value())
+ << (ref->owner().IsIntrinsicModules() ? " i " : " n ")
+ << ref->name().ToString() << '\n';
+ }
std::string buf; // stuff after CONTAINS in derived type
llvm::raw_string_ostream typeBindings{buf};
for (const Symbol &symbol : sorted) {
@@ -730,16 +751,26 @@ static inline SourceName NameInModuleFile(const Symbol &symbol) {
// Collect the symbols of this scope sorted by their original order, not name.
// Generics and namelists are exceptions: they are sorted after other symbols.
void CollectSymbols(const Scope &scope, SymbolVector &sorted,
- SymbolVector &uses, std::map<const Symbol *, SourceName> &renamings) {
+ SymbolVector &uses, std::map<const Symbol *, SourceName> &renamings,
+ UnorderedSymbolSet &modules) {
SymbolVector namelist, generics;
auto symbols{scope.GetSymbols()};
std::size_t commonSize{scope.commonBlocks().size()};
sorted.reserve(symbols.size() + commonSize);
for (SymbolRef symbol : symbols) {
+ const auto *generic{symbol->detailsIf<GenericDetails>()};
+ if (generic) {
+ uses.insert(uses.end(), generic->uses().begin(), generic->uses().end());
+ for (auto ref : generic->uses()) {
+ modules.insert(GetUsedModule(ref->get<UseDetails>()));
+ }
+ } else if (const auto *use{symbol->detailsIf<UseDetails>()}) {
+ modules.insert(GetUsedModule(*use));
+ }
if (symbol->test(Symbol::Flag::ParentComp)) {
} else if (symbol->has<NamelistDetails>()) {
namelist.push_back(symbol);
- } else if (const auto *generic{symbol->detailsIf<GenericDetails>()}) {
+ } else if (generic) {
if (generic->specific() &&
&generic->specific()->owner() == &symbol->owner()) {
sorted.push_back(*generic->specific());
@@ -751,9 +782,6 @@ void CollectSymbols(const Scope &scope, SymbolVector &sorted,
} else {
sorted.push_back(symbol);
}
- if (const auto *details{symbol->detailsIf<GenericDetails>()}) {
- uses.insert(uses.end(), details->uses().begin(), details->uses().end());
- }
}
// Sort most symbols by name: use of Symbol::ReplaceName ensures the source
// location of a symbol's name is the first "real" use.
@@ -1100,10 +1128,11 @@ static llvm::ErrorOr<Temp> MkTemp(const std::string &path) {
// Write the module file at path, prepending header. If an error occurs,
// return errno, otherwise 0.
-static std::error_code WriteFile(
- const std::string &path, const std::string &contents, bool debug) {
+static std::error_code WriteFile(const std::string &path,
+ const std::string &contents, ModuleCheckSumType &checkSum, bool debug) {
+ checkSum = ComputeCheckSum(contents);
auto header{std::string{ModHeader::bom} + ModHeader::magic +
- CheckSum(contents) + ModHeader::terminator};
+ CheckSumString(checkSum) + ModHeader::terminator};
if (debug) {
llvm::dbgs() << "Processing module " << path << ": ";
}
@@ -1155,12 +1184,16 @@ static bool FileContentsMatch(const std::string &path,
// Compute a simple hash of the contents of a module file and
// return it as a string of hex digits.
// This uses the Fowler-Noll-Vo hash function.
-static std::string CheckSum(const std::string_view &contents) {
- std::uint64_t hash{0xcbf29ce484222325ull};
+static ModuleCheckSumType ComputeCheckSum(const std::string_view &contents) {
+ ModuleCheckSumType hash{0xcbf29ce484222325ull};
for (char c : contents) {
hash ^= c & 0xff;
hash *= 0x100000001b3;
}
+ return hash;
+}
+
+static std::string CheckSumString(ModuleCheckSumType hash) {
static const char *digits = "0123456789abcdef";
std::string result(ModHeader::sumLen, '0');
for (size_t i{ModHeader::sumLen}; hash != 0; hash >>= 4) {
@@ -1169,18 +1202,74 @@ static std::string CheckSum(const std::string_view &contents) {
return result;
}
-static bool VerifyHeader(llvm::ArrayRef<char> content) {
+std::optional<ModuleCheckSumType> ExtractCheckSum(const std::string_view &str) {
+ if (str.size() == ModHeader::sumLen) {
+ ModuleCheckSumType hash{0};
+ for (size_t j{0}; j < ModHeader::sumLen; ++j) {
+ hash <<= 4;
+ char ch{str.at(j)};
+ if (ch >= '0' && ch <= '9') {
+ hash += ch - '0';
+ } else if (ch >= 'a' && ch <= 'f') {
+ hash += ch - 'a' + 10;
+ } else {
+ return std::nullopt;
+ }
+ }
+ return hash;
+ }
+ return std::nullopt;
+}
+
+static std::optional<ModuleCheckSumType> VerifyHeader(
+ llvm::ArrayRef<char> content) {
std::string_view sv{content.data(), content.size()};
if (sv.substr(0, ModHeader::magicLen) != ModHeader::magic) {
- return false;
+ return std::nullopt;
}
+ ModuleCheckSumType checkSum{ComputeCheckSum(sv.substr(ModHeader::len))};
std::string_view expectSum{sv.substr(ModHeader::magicLen, ModHeader::sumLen)};
- std::string actualSum{CheckSum(sv.substr(ModHeader::len))};
- return expectSum == actualSum;
+ if (auto extracted{ExtractCheckSum(expectSum)};
+ extracted && *extracted == checkSum) {
+ return checkSum;
+ } else {
+ return std::nullopt;
+ }
}
-Scope *ModFileReader::Read(const SourceName &name,
- std::optional<bool> isIntrinsic, Scope *ancestor, bool silent) {
+static void GetModuleDependences(
+ ModuleDependences &dependences, llvm::ArrayRef<char> content) {
+ std::size_t limit{content.size()};
+ std::string_view str{content.data(), limit};
+ for (std::size_t j{ModHeader::len};
+ str.substr(j, ModHeader::needLen) == ModHeader::need;) {
+ j += 7;
+ auto checkSum{ExtractCheckSum(str.substr(j, ModHeader::sumLen))};
+ if (!checkSum) {
+ break;
+ }
+ j += ModHeader::sumLen;
+ bool intrinsic{false};
+ if (str.substr(j, 3) == " i ") {
+ intrinsic = true;
+ } else if (str.substr(j, 3) != " n ") {
+ break;
+ }
+ j += 3;
+ std::size_t start{j};
+ for (; j < limit && str.at(j) != '\n'; ++j) {
+ }
+ if (j > start && j < limit && str.at(j) == '\n') {
+ dependences.AddDependence(
+ std::string{str.substr(start, j - start)}, intrinsic, *checkSum);
+ } else {
+ break;
+ }
+ }
+}
+
+Scope *ModFileReader::Read(SourceName name, std::optional<bool> isIntrinsic,
+ Scope *ancestor, bool silent) {
std::string ancestorName; // empty for module
Symbol *notAModule{nullptr};
bool fatalError{false};
@@ -1190,12 +1279,26 @@ Scope *ModFileReader::Read(const SourceName &name,
}
ancestorName = ancestor->GetName().value().ToString();
}
+ auto requiredHash{context_.moduleDependences().GetRequiredHash(
+ name.ToString(), isIntrinsic.value_or(false))};
if (!isIntrinsic.value_or(false) && !ancestor) {
// Already present in the symbol table as a usable non-intrinsic module?
auto it{context_.globalScope().find(name)};
if (it != context_.globalScope().end()) {
Scope *scope{it->second->scope()};
if (scope->kind() == Scope::Kind::Module) {
+ if (requiredHash) {
+ if (const Symbol * foundModule{scope->symbol()}) {
+ if (const auto *module{foundModule->detailsIf<ModuleDetails>()};
+ module && module->moduleFileHash() &&
+ *requiredHash != *module->moduleFileHash()) {
+ Say(name, ancestorName,
+ "Multiple versions of the module '%s' cannot be required by the same compilation"_err_en_US,
+ name.ToString());
+ return nullptr;
+ }
+ }
+ }
return scope;
} else {
notAModule = scope->symbol();
@@ -1249,7 +1352,50 @@ Scope *ModFileReader::Read(const SourceName &name,
for (const auto &dir : context_.intrinsicModuleDirectories()) {
options.searchDirectories.push_back(dir);
}
+ if (!requiredHash) {
+ requiredHash =
+ context_.moduleDependences().GetRequiredHash(name.ToString(), true);
+ }
}
+
+ // Look for the right module file if its hash is known
+ if (requiredHash && !fatalError) {
+ std::vector<std::string> misses;
+ for (const std::string &maybe :
+ parser::LocateSourceFileAll(path, options.searchDirectories)) {
+ if (const auto *srcFile{context_.allCookedSources().allSources().OpenPath(
+ maybe, llvm::errs())}) {
+ if (auto checkSum{VerifyHeader(srcFile->content())}) {
+ if (*checkSum == *requiredHash) {
+ path = maybe;
+ if (!misses.empty()) {
+ auto &msg{context_.Say(name,
+ "Module file for '%s' appears later in the module search path than conflicting modules with different checksums"_warn_en_US,
+ name.ToString())};
+ for (const std::string &m : misses) {
+ msg.Attach(
+ name, "Module file with a conflicting name: '%s'"_en_US, m);
+ }
+ }
+ misses.clear();
+ break;
+ } else {
+ misses.emplace_back(maybe);
+ }
+ }
+ }
+ }
+ if (!misses.empty()) {
+ auto &msg{Say(name, ancestorName,
+ "Could not find a module file for '%s' in the module search path with the expected checksum"_err_en_US,
+ name.ToString())};
+ for (const std::string &m : misses) {
+ msg.Attach(name, "Module file with different checksum: '%s'"_en_US, m);
+ }
+ return nullptr;
+ }
+ }
+
const auto *sourceFile{fatalError ? nullptr : parsing.Prescan(path, options)};
if (fatalError || parsing.messages().AnyFatalError()) {
if (!silent) {
@@ -1270,10 +1416,17 @@ Scope *ModFileReader::Read(const SourceName &name,
return nullptr;
}
CHECK(sourceFile);
- if (!VerifyHeader(sourceFile->content())) {
+ std::optional<ModuleCheckSumType> checkSum{
+ VerifyHeader(sourceFile->content())};
+ if (!checkSum) {
Say(name, ancestorName, "File has invalid checksum: %s"_warn_en_US,
sourceFile->path());
return nullptr;
+ } else if (requiredHash && *requiredHash != *checkSum) {
+ Say(name, ancestorName,
+ "File is not the right module file for %s"_warn_en_US,
+ "'"s + name.ToString() + "': "s + sourceFile->path());
+ return nullptr;
}
llvm::raw_null_ostream NullStream;
parsing.Parse(NullStream);
@@ -1316,6 +1469,7 @@ Scope *ModFileReader::Read(const SourceName &name,
// Process declarations from the module file
bool wasInModuleFile{context_.foldingContext().inModuleFile()};
context_.foldingContext().set_inModuleFile(true);
+ GetModuleDependences(context_.moduleDependences(), sourceFile->content());
ResolveNames(context_, parseTree, topScope);
context_.foldingContext().set_inModuleFile(wasInModuleFile);
if (!moduleSymbol) {
@@ -1331,8 +1485,8 @@ Scope *ModFileReader::Read(const SourceName &name,
}
}
if (moduleSymbol) {
- CHECK(moduleSymbol->has<ModuleDetails>());
CHECK(moduleSymbol->test(Symbol::Flag::ModFile));
+ moduleSymbol->get<ModuleDetails>().set_moduleFileHash(checkSum.value());
if (isIntrinsic.value_or(false)) {
moduleSymbol->attrs().set(Attr::INTRINSIC);
}
@@ -1342,7 +1496,7 @@ Scope *ModFileReader::Read(const SourceName &name,
}
}
-parser::Message &ModFileReader::Say(const SourceName &name,
+parser::Message &ModFileReader::Say(SourceName name,
const std::string &ancestor, parser::MessageFixedText &&msg,
const std::string &arg) {
return context_.Say(name, "Cannot read module file for %s: %s"_err_en_US,
diff --git a/flang/lib/Semantics/mod-file.h b/flang/lib/Semantics/mod-file.h
index 5be117153dd4..b4ece4018c05 100644
--- a/flang/lib/Semantics/mod-file.h
+++ b/flang/lib/Semantics/mod-file.h
@@ -38,7 +38,8 @@ public:
private:
SemanticsContext &context_;
- // Buffer to use with raw_string_ostream
+ // Buffers to use with raw_string_ostream
+ std::string needsBuf_;
std::string usesBuf_;
std::string useExtraAttrsBuf_;
std::string declsBuf_;
@@ -46,6 +47,7 @@ private:
// Tracks nested DEC structures and fields of that type
UnorderedSymbolSet emittedDECStructures_, emittedDECFields_;
+ llvm::raw_string_ostream needs_{needsBuf_};
llvm::raw_string_ostream uses_{usesBuf_};
llvm::raw_string_ostream useExtraAttrs_{
useExtraAttrsBuf_}; // attrs added to used entity
@@ -83,18 +85,17 @@ private:
class ModFileReader {
public:
- // directories specifies where to search for module files
ModFileReader(SemanticsContext &context) : context_{context} {}
// Find and read the module file for a module or submodule.
// If ancestor is specified, look for a submodule of that module.
// Return the Scope for that module/submodule or nullptr on error.
- Scope *Read(const SourceName &, std::optional<bool> isIntrinsic,
- Scope *ancestor, bool silent = false);
+ Scope *Read(SourceName, std::optional<bool> isIntrinsic, Scope *ancestor,
+ bool silent);
private:
SemanticsContext &context_;
- parser::Message &Say(const SourceName &, const std::string &,
+ parser::Message &Say(SourceName, const std::string &,
parser::MessageFixedText &&, const std::string &);
};
diff --git a/flang/lib/Semantics/pointer-assignment.cpp b/flang/lib/Semantics/pointer-assignment.cpp
index 4c293e85cf9d..58155a29da1e 100644
--- a/flang/lib/Semantics/pointer-assignment.cpp
+++ b/flang/lib/Semantics/pointer-assignment.cpp
@@ -362,7 +362,8 @@ bool PointerAssignmentChecker::Check(parser::CharBlock rhsName, bool isCall,
std::optional<std::string> warning;
CharacterizeProcedure();
if (std::optional<MessageFixedText> msg{evaluate::CheckProcCompatibility(
- isCall, procedure_, rhsProcedure, specific, whyNot, warning)}) {
+ isCall, procedure_, rhsProcedure, specific, whyNot, warning,
+ /*ignoreImplicitVsExplicit=*/isCall)}) {
Say(std::move(*msg), description_, rhsName, whyNot);
return false;
}
diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp
index 0cbe0b492fa4..5a95d3a98992 100644
--- a/flang/lib/Semantics/resolve-names.cpp
+++ b/flang/lib/Semantics/resolve-names.cpp
@@ -1118,7 +1118,6 @@ private:
// Set when walking DATA & array constructor implied DO loop bounds
// to warn about use of the implied DO intex therein.
std::optional<SourceName> checkIndexUseInOwnBounds_;
- bool hasBindCName_{false};
bool isVectorType_{false};
UnorderedSymbolSet mustBeScalar_;
@@ -1225,8 +1224,7 @@ public:
void Post(const parser::ForallConstruct &);
bool Pre(const parser::ForallStmt &);
void Post(const parser::ForallStmt &);
- bool Pre(const parser::BlockStmt &);
- bool Pre(const parser::EndBlockStmt &);
+ bool Pre(const parser::BlockConstruct &);
void Post(const parser::Selector &);
void Post(const parser::AssociateStmt &);
void Post(const parser::EndAssociateStmt &);
@@ -1283,6 +1281,8 @@ public:
void Post(const parser::CycleStmt &x) { CheckRef(x.v); }
void Post(const parser::ExitStmt &x) { CheckRef(x.v); }
+ void HandleImpliedAsynchronousInScope(const parser::Block &);
+
private:
// R1105 selector -> expr | variable
// expr is set in either case unless there were errors
@@ -3375,7 +3375,8 @@ void ModuleVisitor::BeginModule(const parser::Name &name, bool isSubmodule) {
Scope *ModuleVisitor::FindModule(const parser::Name &name,
std::optional<bool> isIntrinsic, Scope *ancestor) {
ModFileReader reader{context()};
- Scope *scope{reader.Read(name.source, isIntrinsic, ancestor)};
+ Scope *scope{
+ reader.Read(name.source, isIntrinsic, ancestor, /*silent=*/false)};
if (!scope) {
return nullptr;
}
@@ -3709,13 +3710,17 @@ bool SubprogramVisitor::Pre(const parser::Suffix &suffix) {
bool SubprogramVisitor::Pre(const parser::PrefixSpec &x) {
// Save this to process after UseStmt and ImplicitPart
if (const auto *parsedType{std::get_if<parser::DeclarationTypeSpec>(&x.u)}) {
- FuncResultStack::FuncInfo &info{DEREF(funcResultStack().Top())};
- if (info.parsedType) { // C1543
- Say(currStmtSource().value(),
- "FUNCTION prefix cannot specify the type more than once"_err_en_US);
+ if (FuncResultStack::FuncInfo * info{funcResultStack().Top()}) {
+ if (info->parsedType) { // C1543
+ Say(currStmtSource().value(),
+ "FUNCTION prefix cannot specify the type more than once"_err_en_US);
+ } else {
+ info->parsedType = parsedType;
+ info->source = currStmtSource();
+ }
} else {
- info.parsedType = parsedType;
- info.source = currStmtSource();
+ Say(currStmtSource().value(),
+ "SUBROUTINE prefix cannot specify a type"_err_en_US);
}
return false;
} else {
@@ -4216,7 +4221,12 @@ bool SubprogramVisitor::BeginMpSubprogram(const parser::Name &name) {
EraseSymbol(name);
Symbol &newSymbol{MakeSymbol(name, SubprogramDetails{})};
PushScope(Scope::Kind::Subprogram, &newSymbol);
- newSymbol.get<SubprogramDetails>().set_moduleInterface(*symbol);
+ auto &newSubprogram{newSymbol.get<SubprogramDetails>()};
+ newSubprogram.set_moduleInterface(*symbol);
+ auto &subprogram{symbol->get<SubprogramDetails>()};
+ if (const auto *name{subprogram.bindName()}) {
+ newSubprogram.set_bindName(std::string{*name});
+ }
newSymbol.attrs() |= symbol->attrs();
newSymbol.set(symbol->test(Symbol::Flag::Subroutine)
? Symbol::Flag::Subroutine
@@ -4965,13 +4975,13 @@ Symbol &DeclarationVisitor::DeclareProcEntity(
const parser::Name &name, Attrs attrs, const Symbol *interface) {
Symbol &symbol{DeclareEntity<ProcEntityDetails>(name, attrs)};
if (auto *details{symbol.detailsIf<ProcEntityDetails>()}) {
- if (details->IsInterfaceSet()) {
- SayWithDecl(name, symbol,
- "The interface for procedure '%s' has already been "
- "declared"_err_en_US);
- context().SetError(symbol);
+ if (context().HasError(symbol)) {
} else if (HasCycle(symbol, interface)) {
return symbol;
+ } else if (interface && (details->procInterface() || details->type())) {
+ SayWithDecl(name, symbol,
+ "The interface for procedure '%s' has already been declared"_err_en_US);
+ context().SetError(symbol);
} else if (interface) {
details->set_procInterface(*interface);
if (interface->test(Symbol::Flag::Function)) {
@@ -5578,7 +5588,10 @@ bool DeclarationVisitor::Pre(const parser::ProcedureDeclarationStmt &x) {
for (const parser::ProcAttrSpec &procAttr : procAttrSpec) {
if (auto *bindC{std::get_if<parser::LanguageBindingSpec>(&procAttr.u)}) {
if (bindC->v.has_value()) {
- hasBindCName_ = true;
+ if (std::get<std::list<parser::ProcDecl>>(x.t).size() > 1) {
+ Say(context().location().value(),
+ "A procedure declaration statement with a binding name may not declare multiple procedures"_err_en_US);
+ }
break;
}
}
@@ -5587,7 +5600,6 @@ bool DeclarationVisitor::Pre(const parser::ProcedureDeclarationStmt &x) {
}
void DeclarationVisitor::Post(const parser::ProcedureDeclarationStmt &) {
interfaceName_ = nullptr;
- hasBindCName_ = false;
EndDecl();
}
bool DeclarationVisitor::Pre(const parser::DataComponentDefStmt &x) {
@@ -5647,10 +5659,8 @@ void DeclarationVisitor::Post(const parser::ProcInterface &x) {
void DeclarationVisitor::Post(const parser::ProcDecl &x) {
const auto &name{std::get<parser::Name>(x.t)};
const Symbol *procInterface{nullptr};
- if (interfaceName_) {
- procInterface = interfaceName_->symbol->has<GenericDetails>()
- ? interfaceName_->symbol->get<GenericDetails>().specific()
- : interfaceName_->symbol;
+ if (interfaceName_ && interfaceName_->symbol) {
+ procInterface = &BypassGeneric(*interfaceName_->symbol);
}
auto attrs{HandleSaveName(name.source, GetAttrs())};
DerivedTypeDetails *dtDetails{nullptr};
@@ -6027,7 +6037,7 @@ void DeclarationVisitor::Post(const parser::BasedPointer &bp) {
if (const auto *derived{pointeeType->AsDerived()}) {
if (!IsSequenceOrBindCType(derived)) {
Say(pointeeName,
- "Type of Cray pointee '%s' is a derived type that is neither SEQUENCE nor BIND(C)"_err_en_US);
+ "Type of Cray pointee '%s' is a derived type that is neither SEQUENCE nor BIND(C)"_warn_en_US);
}
}
}
@@ -6956,14 +6966,17 @@ bool ConstructVisitor::Pre(const parser::ForallStmt &) {
}
void ConstructVisitor::Post(const parser::ForallStmt &) { PopScope(); }
-bool ConstructVisitor::Pre(const parser::BlockStmt &x) {
- CheckDef(x.v);
+bool ConstructVisitor::Pre(const parser::BlockConstruct &x) {
+ const auto &[blockStmt, specPart, execPart, endBlockStmt] = x.t;
+ Walk(blockStmt);
+ CheckDef(blockStmt.statement.v);
PushScope(Scope::Kind::BlockConstruct, nullptr);
- return false;
-}
-bool ConstructVisitor::Pre(const parser::EndBlockStmt &x) {
+ Walk(specPart);
+ HandleImpliedAsynchronousInScope(execPart);
+ Walk(execPart);
+ Walk(endBlockStmt);
PopScope();
- CheckRef(x.v);
+ CheckRef(endBlockStmt.statement.v);
return false;
}
@@ -7337,6 +7350,224 @@ const DeclTypeSpec &ConstructVisitor::ToDeclTypeSpec(
}
}
+class ExecutionPartSkimmerBase {
+public:
+ template <typename A> bool Pre(const A &) { return true; }
+ template <typename A> void Post(const A &) {}
+
+ bool InNestedBlockConstruct() const { return blockDepth_ > 0; }
+
+ bool Pre(const parser::AssociateConstruct &) {
+ PushScope();
+ return true;
+ }
+ void Post(const parser::AssociateConstruct &) { PopScope(); }
+ bool Pre(const parser::Association &x) {
+ Hide(std::get<parser::Name>(x.t));
+ return true;
+ }
+ bool Pre(const parser::BlockConstruct &) {
+ PushScope();
+ ++blockDepth_;
+ return true;
+ }
+ void Post(const parser::BlockConstruct &) {
+ --blockDepth_;
+ PopScope();
+ }
+ bool Pre(const parser::EntityDecl &x) {
+ Hide(std::get<parser::ObjectName>(x.t));
+ return true;
+ }
+ void Post(const parser::ImportStmt &x) {
+ if (x.kind == common::ImportKind::None ||
+ x.kind == common::ImportKind::Only) {
+ if (!nestedScopes_.front().importOnly.has_value()) {
+ nestedScopes_.front().importOnly.emplace();
+ }
+ for (const auto &name : x.names) {
+ nestedScopes_.front().importOnly->emplace(name.source);
+ }
+ } else {
+ // no special handling needed for explicit names or IMPORT, ALL
+ }
+ }
+ void Post(const parser::UseStmt &x) {
+ if (const auto *onlyList{std::get_if<std::list<parser::Only>>(&x.u)}) {
+ for (const auto &only : *onlyList) {
+ if (const auto *name{std::get_if<parser::Name>(&only.u)}) {
+ Hide(*name);
+ } else if (const auto *rename{std::get_if<parser::Rename>(&only.u)}) {
+ if (const auto *names{
+ std::get_if<parser::Rename::Names>(&rename->u)}) {
+ Hide(std::get<0>(names->t));
+ }
+ }
+ }
+ } else {
+ // USE may or may not shadow symbols in host scopes
+ nestedScopes_.front().hasUseWithoutOnly = true;
+ }
+ }
+ bool Pre(const parser::DerivedTypeStmt &x) {
+ Hide(std::get<parser::Name>(x.t));
+ PushScope();
+ return true;
+ }
+ void Post(const parser::DerivedTypeDef &) { PopScope(); }
+ bool Pre(const parser::SelectTypeConstruct &) {
+ PushScope();
+ return true;
+ }
+ void Post(const parser::SelectTypeConstruct &) { PopScope(); }
+ bool Pre(const parser::SelectTypeStmt &x) {
+ if (const auto &maybeName{std::get<1>(x.t)}) {
+ Hide(*maybeName);
+ }
+ return true;
+ }
+ bool Pre(const parser::SelectRankConstruct &) {
+ PushScope();
+ return true;
+ }
+ void Post(const parser::SelectRankConstruct &) { PopScope(); }
+ bool Pre(const parser::SelectRankStmt &x) {
+ if (const auto &maybeName{std::get<1>(x.t)}) {
+ Hide(*maybeName);
+ }
+ return true;
+ }
+
+protected:
+ bool IsHidden(SourceName name) {
+ for (const auto &scope : nestedScopes_) {
+ if (scope.locals.find(name) != scope.locals.end()) {
+ return true; // shadowed by nested declaration
+ }
+ if (scope.hasUseWithoutOnly) {
+ break;
+ }
+ if (scope.importOnly &&
+ scope.importOnly->find(name) == scope.importOnly->end()) {
+ return true; // not imported
+ }
+ }
+ return false;
+ }
+
+ void EndWalk() { CHECK(nestedScopes_.empty()); }
+
+private:
+ void PushScope() { nestedScopes_.emplace_front(); }
+ void PopScope() { nestedScopes_.pop_front(); }
+ void Hide(const parser::Name &name) {
+ nestedScopes_.front().locals.emplace(name.source);
+ }
+
+ int blockDepth_{0};
+ struct NestedScopeInfo {
+ bool hasUseWithoutOnly{false};
+ std::set<SourceName> locals;
+ std::optional<std::set<SourceName>> importOnly;
+ };
+ std::list<NestedScopeInfo> nestedScopes_;
+};
+
+class ExecutionPartAsyncIOSkimmer : public ExecutionPartSkimmerBase {
+public:
+ explicit ExecutionPartAsyncIOSkimmer(SemanticsContext &context)
+ : context_{context} {}
+
+ void Walk(const parser::Block &block) {
+ parser::Walk(block, *this);
+ EndWalk();
+ }
+
+ const std::set<SourceName> asyncIONames() const { return asyncIONames_; }
+
+ using ExecutionPartSkimmerBase::Post;
+ using ExecutionPartSkimmerBase::Pre;
+
+ bool Pre(const parser::IoControlSpec::Asynchronous &async) {
+ if (auto folded{evaluate::Fold(
+ context_.foldingContext(), AnalyzeExpr(context_, async.v))}) {
+ if (auto str{
+ evaluate::GetScalarConstantValue<evaluate::Ascii>(*folded)}) {
+ for (char ch : *str) {
+ if (ch != ' ') {
+ inAsyncIO_ = ch == 'y' || ch == 'Y';
+ break;
+ }
+ }
+ }
+ }
+ return true;
+ }
+ void Post(const parser::ReadStmt &) { inAsyncIO_ = false; }
+ void Post(const parser::WriteStmt &) { inAsyncIO_ = false; }
+ void Post(const parser::IoControlSpec::Size &size) {
+ if (const auto *designator{
+ std::get_if<common::Indirection<parser::Designator>>(
+ &size.v.thing.thing.u)}) {
+ NoteAsyncIODesignator(designator->value());
+ }
+ }
+ void Post(const parser::InputItem &x) {
+ if (const auto *var{std::get_if<parser::Variable>(&x.u)}) {
+ if (const auto *designator{
+ std::get_if<common::Indirection<parser::Designator>>(&var->u)}) {
+ NoteAsyncIODesignator(designator->value());
+ }
+ }
+ }
+ void Post(const parser::OutputItem &x) {
+ if (const auto *expr{std::get_if<parser::Expr>(&x.u)}) {
+ if (const auto *designator{
+ std::get_if<common::Indirection<parser::Designator>>(&expr->u)}) {
+ NoteAsyncIODesignator(designator->value());
+ }
+ }
+ }
+
+private:
+ void NoteAsyncIODesignator(const parser::Designator &designator) {
+ if (inAsyncIO_ && !InNestedBlockConstruct()) {
+ const parser::Name &name{parser::GetFirstName(designator)};
+ if (!IsHidden(name.source)) {
+ asyncIONames_.insert(name.source);
+ }
+ }
+ }
+
+ SemanticsContext &context_;
+ bool inAsyncIO_{false};
+ std::set<SourceName> asyncIONames_;
+};
+
+// Any data list item or SIZE= specifier of an I/O data transfer statement
+// with ASYNCHRONOUS="YES" implicitly has the ASYNCHRONOUS attribute in the
+// local scope.
+void ConstructVisitor::HandleImpliedAsynchronousInScope(
+ const parser::Block &block) {
+ ExecutionPartAsyncIOSkimmer skimmer{context()};
+ skimmer.Walk(block);
+ for (auto name : skimmer.asyncIONames()) {
+ if (Symbol * symbol{currScope().FindSymbol(name)}) {
+ if (!symbol->attrs().test(Attr::ASYNCHRONOUS)) {
+ if (&symbol->owner() != &currScope()) {
+ symbol = &*currScope()
+ .try_emplace(name, HostAssocDetails{*symbol})
+ .first->second;
+ }
+ if (symbol->has<AssocEntityDetails>()) {
+ symbol = const_cast<Symbol *>(&GetAssociationRoot(*symbol));
+ }
+ SetImplicitAttr(*symbol, Attr::ASYNCHRONOUS);
+ }
+ }
+ }
+}
+
// ResolveNamesVisitor implementation
bool ResolveNamesVisitor::Pre(const parser::FunctionReference &x) {
@@ -8247,6 +8478,7 @@ void ResolveNamesVisitor::FinishSpecificationPart(
if (const auto *statement{std::get_if<
parser::Statement<common::Indirection<parser::StmtFunctionStmt>>>(
&decl.u)}) {
+ messageHandler().set_currStmtSource(statement->source);
AnalyzeStmtFunctionStmt(statement->statement.value());
}
}
@@ -8362,16 +8594,25 @@ bool ResolveNamesVisitor::Pre(const parser::PointerAssignmentStmt &x) {
const auto &bounds{std::get<parser::PointerAssignmentStmt::Bounds>(x.t)};
const auto &expr{std::get<parser::Expr>(x.t)};
ResolveDataRef(dataRef);
+ Symbol *ptrSymbol{parser::GetLastName(dataRef).symbol};
Walk(bounds);
// Resolve unrestricted specific intrinsic procedures as in "p => cos".
if (const parser::Name * name{parser::Unwrap<parser::Name>(expr)}) {
if (NameIsKnownOrIntrinsic(*name)) {
- // If the name is known because it is an object entity from a host
- // procedure, create a host associated symbol.
- if (Symbol * symbol{name->symbol}; symbol &&
- symbol->GetUltimate().has<ObjectEntityDetails>() &&
- IsUplevelReference(*symbol)) {
- MakeHostAssocSymbol(*name, *symbol);
+ if (Symbol * symbol{name->symbol}) {
+ if (IsProcedurePointer(ptrSymbol) &&
+ !ptrSymbol->test(Symbol::Flag::Function) &&
+ !ptrSymbol->test(Symbol::Flag::Subroutine)) {
+ if (symbol->test(Symbol::Flag::Function)) {
+ ApplyImplicitRules(*ptrSymbol);
+ }
+ }
+ // If the name is known because it is an object entity from a host
+ // procedure, create a host associated symbol.
+ if (symbol->GetUltimate().has<ObjectEntityDetails>() &&
+ IsUplevelReference(*symbol)) {
+ MakeHostAssocSymbol(*name, *symbol);
+ }
}
return false;
}
@@ -8667,138 +8908,38 @@ bool ResolveNamesVisitor::Pre(const parser::Program &x) {
// References to procedures need to record that their symbols are known
// to be procedures, so that they don't get converted to objects by default.
-class ExecutionPartSkimmer {
+class ExecutionPartCallSkimmer : public ExecutionPartSkimmerBase {
public:
- explicit ExecutionPartSkimmer(ResolveNamesVisitor &resolver)
+ explicit ExecutionPartCallSkimmer(ResolveNamesVisitor &resolver)
: resolver_{resolver} {}
- void Walk(const parser::ExecutionPart *exec) {
- if (exec) {
- parser::Walk(*exec, *this);
- CHECK(nestedScopes_.empty());
- }
+ void Walk(const parser::ExecutionPart &exec) {
+ parser::Walk(exec, *this);
+ EndWalk();
}
- template <typename A> bool Pre(const A &) { return true; }
- template <typename A> void Post(const A &) {}
+ using ExecutionPartSkimmerBase::Post;
+ using ExecutionPartSkimmerBase::Pre;
+
void Post(const parser::FunctionReference &fr) {
NoteCall(Symbol::Flag::Function, fr.v, false);
}
void Post(const parser::CallStmt &cs) {
NoteCall(Symbol::Flag::Subroutine, cs.call, cs.chevrons.has_value());
}
- bool Pre(const parser::AssociateConstruct &) {
- PushScope();
- return true;
- }
- void Post(const parser::AssociateConstruct &) { PopScope(); }
- bool Pre(const parser::Association &x) {
- Hide(std::get<parser::Name>(x.t));
- return true;
- }
- bool Pre(const parser::BlockConstruct &) {
- PushScope();
- return true;
- }
- void Post(const parser::BlockConstruct &) { PopScope(); }
- bool Pre(const parser::EntityDecl &x) {
- Hide(std::get<parser::ObjectName>(x.t));
- return true;
- }
- void Post(const parser::ImportStmt &x) {
- if (x.kind == common::ImportKind::None ||
- x.kind == common::ImportKind::Only) {
- if (!nestedScopes_.front().importOnly.has_value()) {
- nestedScopes_.front().importOnly.emplace();
- }
- for (const auto &name : x.names) {
- nestedScopes_.front().importOnly->emplace(name.source);
- }
- } else {
- // no special handling needed for explicit names or IMPORT, ALL
- }
- }
- void Post(const parser::UseStmt &x) {
- if (const auto *onlyList{std::get_if<std::list<parser::Only>>(&x.u)}) {
- for (const auto &only : *onlyList) {
- if (const auto *name{std::get_if<parser::Name>(&only.u)}) {
- Hide(*name);
- } else if (const auto *rename{std::get_if<parser::Rename>(&only.u)}) {
- if (const auto *names{
- std::get_if<parser::Rename::Names>(&rename->u)}) {
- Hide(std::get<0>(names->t));
- }
- }
- }
- } else {
- // USE may or may not shadow symbols in host scopes
- nestedScopes_.front().hasUseWithoutOnly = true;
- }
- }
- bool Pre(const parser::DerivedTypeStmt &x) {
- Hide(std::get<parser::Name>(x.t));
- PushScope();
- return true;
- }
- void Post(const parser::DerivedTypeDef &) { PopScope(); }
- bool Pre(const parser::SelectTypeConstruct &) {
- PushScope();
- return true;
- }
- void Post(const parser::SelectTypeConstruct &) { PopScope(); }
- bool Pre(const parser::SelectTypeStmt &x) {
- if (const auto &maybeName{std::get<1>(x.t)}) {
- Hide(*maybeName);
- }
- return true;
- }
- bool Pre(const parser::SelectRankConstruct &) {
- PushScope();
- return true;
- }
- void Post(const parser::SelectRankConstruct &) { PopScope(); }
- bool Pre(const parser::SelectRankStmt &x) {
- if (const auto &maybeName{std::get<1>(x.t)}) {
- Hide(*maybeName);
- }
- return true;
- }
private:
void NoteCall(
Symbol::Flag flag, const parser::Call &call, bool hasCUDAChevrons) {
auto &designator{std::get<parser::ProcedureDesignator>(call.t)};
if (const auto *name{std::get_if<parser::Name>(&designator.u)}) {
- for (const auto &scope : nestedScopes_) {
- if (scope.locals.find(name->source) != scope.locals.end()) {
- return; // shadowed by nested declaration
- }
- if (scope.hasUseWithoutOnly) {
- break;
- }
- if (scope.importOnly &&
- scope.importOnly->find(name->source) == scope.importOnly->end()) {
- return; // not imported
- }
+ if (!IsHidden(name->source)) {
+ resolver_.NoteExecutablePartCall(flag, name->source, hasCUDAChevrons);
}
- resolver_.NoteExecutablePartCall(flag, name->source, hasCUDAChevrons);
}
}
- void PushScope() { nestedScopes_.emplace_front(); }
- void PopScope() { nestedScopes_.pop_front(); }
- void Hide(const parser::Name &name) {
- nestedScopes_.front().locals.emplace(name.source);
- }
-
ResolveNamesVisitor &resolver_;
-
- struct NestedScopeInfo {
- bool hasUseWithoutOnly{false};
- std::set<SourceName> locals;
- std::optional<std::set<SourceName>> importOnly;
- };
- std::list<NestedScopeInfo> nestedScopes_;
};
// Build the scope tree and resolve names in the specification parts of this
@@ -8843,7 +8984,10 @@ void ResolveNamesVisitor::ResolveSpecificationParts(ProgramTree &node) {
for (auto &child : node.children()) {
ResolveSpecificationParts(child);
}
- ExecutionPartSkimmer{*this}.Walk(node.exec());
+ if (node.exec()) {
+ ExecutionPartCallSkimmer{*this}.Walk(*node.exec());
+ HandleImpliedAsynchronousInScope(node.exec()->v);
+ }
EndScopeForNode(node);
// Ensure that every object entity has a type.
for (auto &pair : *node.scope()) {
@@ -8950,6 +9094,8 @@ void ResolveNamesVisitor::EndScopeForNode(const ProgramTree &node) {
// pointers, are deferred until all of the pertinent specification parts
// have been visited. This deferred processing enables the use of forward
// references in these circumstances.
+// Data statement objects with implicit derived types are finally
+// resolved here.
class DeferredCheckVisitor {
public:
explicit DeferredCheckVisitor(ResolveNamesVisitor &resolver)
@@ -8965,21 +9111,20 @@ public:
if (Symbol * symbol{name.symbol}) {
if (Scope * scope{symbol->scope()}) {
if (scope->IsDerivedType()) {
- resolver_.PushScope(*scope);
- pushedScope_ = true;
+ CHECK(outerScope_ == nullptr);
+ outerScope_ = &resolver_.currScope();
+ resolver_.SetScope(*scope);
}
}
}
}
void Post(const parser::EndTypeStmt &) {
- if (pushedScope_) {
- resolver_.PopScope();
- pushedScope_ = false;
+ if (outerScope_) {
+ resolver_.SetScope(*outerScope_);
+ outerScope_ = nullptr;
}
}
- bool Pre(const parser::BlockConstruct &x) { return true; }
-
void Post(const parser::ProcInterface &pi) {
if (const auto *name{std::get_if<parser::Name>(&pi.u)}) {
resolver_.CheckExplicitInterface(*name);
@@ -9006,10 +9151,20 @@ public:
resolver_.CheckExplicitInterface(tbps.interfaceName);
}
void Post(const parser::TypeBoundProcedureStmt::WithoutInterface &tbps) {
- if (pushedScope_) {
+ if (outerScope_) {
resolver_.CheckBindings(tbps);
}
}
+ bool Pre(const parser::DataStmtObject &) {
+ ++dataStmtObjectNesting_;
+ return true;
+ }
+ void Post(const parser::DataStmtObject &) { --dataStmtObjectNesting_; }
+ void Post(const parser::Designator &x) {
+ if (dataStmtObjectNesting_ > 0) {
+ resolver_.ResolveDesignator(x);
+ }
+ }
private:
void Init(const parser::Name &name,
@@ -9031,7 +9186,8 @@ private:
}
ResolveNamesVisitor &resolver_;
- bool pushedScope_{false};
+ Scope *outerScope_{nullptr};
+ int dataStmtObjectNesting_{0};
};
// Perform checks and completions that need to happen after all of
diff --git a/flang/lib/Semantics/resolve-names.h b/flang/lib/Semantics/resolve-names.h
index 78fdc2edc54a..a6797b456359 100644
--- a/flang/lib/Semantics/resolve-names.h
+++ b/flang/lib/Semantics/resolve-names.h
@@ -9,10 +9,6 @@
#ifndef FORTRAN_SEMANTICS_RESOLVE_NAMES_H_
#define FORTRAN_SEMANTICS_RESOLVE_NAMES_H_
-#include <iosfwd>
-#include <string>
-#include <vector>
-
namespace llvm {
class raw_ostream;
}
diff --git a/flang/lib/Semantics/semantics.cpp b/flang/lib/Semantics/semantics.cpp
index a76c42ae4f44..e58a8f3b22c0 100644
--- a/flang/lib/Semantics/semantics.cpp
+++ b/flang/lib/Semantics/semantics.cpp
@@ -515,7 +515,7 @@ bool SemanticsContext::IsTempName(const std::string &name) {
Scope *SemanticsContext::GetBuiltinModule(const char *name) {
return ModFileReader{*this}.Read(SourceName{name, std::strlen(name)},
- true /*intrinsic*/, nullptr, true /*silence errors*/);
+ true /*intrinsic*/, nullptr, /*silent=*/true);
}
void SemanticsContext::UseFortranBuiltinsModule() {
diff --git a/flang/runtime/Float128Math/CMakeLists.txt b/flang/runtime/Float128Math/CMakeLists.txt
index 8d276e8f1227..60d44c78be0f 100644
--- a/flang/runtime/Float128Math/CMakeLists.txt
+++ b/flang/runtime/Float128Math/CMakeLists.txt
@@ -59,7 +59,9 @@ set(sources
erf.cpp
erfc.cpp
exp.cpp
+ exponent.cpp
floor.cpp
+ fraction.cpp
hypot.cpp
j0.cpp
j1.cpp
@@ -69,10 +71,18 @@ set(sources
log.cpp
log10.cpp
lround.cpp
+ mod-real.cpp
+ modulo-real.cpp
+ nearest.cpp
+ norm2.cpp
pow.cpp
round.cpp
+ rrspacing.cpp
+ scale.cpp
+ set-exponent.cpp
sin.cpp
sinh.cpp
+ spacing.cpp
sqrt.cpp
tan.cpp
tanh.cpp
diff --git a/flang/runtime/Float128Math/acos.cpp b/flang/runtime/Float128Math/acos.cpp
index 531c79c7444b..14ff69448568 100644
--- a/flang/runtime/Float128Math/acos.cpp
+++ b/flang/runtime/Float128Math/acos.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(AcosF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Acos<RTNAME(AcosF128)>::invoke(x);
+ return Acos<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/acosh.cpp b/flang/runtime/Float128Math/acosh.cpp
index 1495120edd1a..9d70804e44a4 100644
--- a/flang/runtime/Float128Math/acosh.cpp
+++ b/flang/runtime/Float128Math/acosh.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(AcoshF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Acosh<RTNAME(AcoshF128)>::invoke(x);
+ return Acosh<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/asin.cpp b/flang/runtime/Float128Math/asin.cpp
index 2fb8c6c5e97d..6781b23f0363 100644
--- a/flang/runtime/Float128Math/asin.cpp
+++ b/flang/runtime/Float128Math/asin.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(AsinF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Asin<RTNAME(AsinF128)>::invoke(x);
+ return Asin<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/asinh.cpp b/flang/runtime/Float128Math/asinh.cpp
index 3630a77be42b..1310bc61c1de 100644
--- a/flang/runtime/Float128Math/asinh.cpp
+++ b/flang/runtime/Float128Math/asinh.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(AsinhF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Asinh<RTNAME(AsinhF128)>::invoke(x);
+ return Asinh<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/atan.cpp b/flang/runtime/Float128Math/atan.cpp
index 4609343e9d12..f01382df90c0 100644
--- a/flang/runtime/Float128Math/atan.cpp
+++ b/flang/runtime/Float128Math/atan.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(AtanF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Atan<RTNAME(AtanF128)>::invoke(x);
+ return Atan<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/atan2.cpp b/flang/runtime/Float128Math/atan2.cpp
index c0175e67ec71..dd646b0452b1 100644
--- a/flang/runtime/Float128Math/atan2.cpp
+++ b/flang/runtime/Float128Math/atan2.cpp
@@ -15,7 +15,7 @@ extern "C" {
CppTypeFor<TypeCategory::Real, 16> RTDEF(Atan2F128)(
CppTypeFor<TypeCategory::Real, 16> x,
CppTypeFor<TypeCategory::Real, 16> y) {
- return Atan2<RTNAME(Atan2F128)>::invoke(x, y);
+ return Atan2<true>::invoke(x, y);
}
#endif
diff --git a/flang/runtime/Float128Math/atanh.cpp b/flang/runtime/Float128Math/atanh.cpp
index bfacb967117d..5fc5ba5debc8 100644
--- a/flang/runtime/Float128Math/atanh.cpp
+++ b/flang/runtime/Float128Math/atanh.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(AtanhF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Atanh<RTNAME(AtanhF128)>::invoke(x);
+ return Atanh<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/cabs.cpp b/flang/runtime/Float128Math/cabs.cpp
index 63f2bdf8e177..3b8c9d17003c 100644
--- a/flang/runtime/Float128Math/cabs.cpp
+++ b/flang/runtime/Float128Math/cabs.cpp
@@ -10,15 +10,15 @@
namespace Fortran::runtime {
extern "C" {
-
+#if 0
+// FIXME: temporarily disabled. Need to add pure C entry point
+// using C _Complex ABI.
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
-// FIXME: the argument should be CppTypeFor<TypeCategory::Complex, 16>,
-// and it should be translated into the underlying library's
-// corresponding complex128 type.
-CppTypeFor<TypeCategory::Real, 16> RTDEF(CAbsF128)(ComplexF128 x) {
- return CAbs<RTNAME(CAbsF128)>::invoke(x);
+// NOTE: Flang calls the runtime APIs using C _Complex ABI
+CppTypeFor<TypeCategory::Real, 16> RTDEF(CAbsF128)(CFloat128ComplexType x) {
+ return CAbs<true>::invoke(x);
}
#endif
-
+#endif
} // extern "C"
} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/ceil.cpp b/flang/runtime/Float128Math/ceil.cpp
index a53a2c27c616..ed4d164a62be 100644
--- a/flang/runtime/Float128Math/ceil.cpp
+++ b/flang/runtime/Float128Math/ceil.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(CeilF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Ceil<RTNAME(CeilF128)>::invoke(x);
+ return Ceil<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/cos.cpp b/flang/runtime/Float128Math/cos.cpp
index 845c970bd8e6..b93c92f275f7 100644
--- a/flang/runtime/Float128Math/cos.cpp
+++ b/flang/runtime/Float128Math/cos.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(CosF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Cos<RTNAME(CosF128)>::invoke(x);
+ return Cos<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/cosh.cpp b/flang/runtime/Float128Math/cosh.cpp
index acf6ff4130ee..a3662a826dcb 100644
--- a/flang/runtime/Float128Math/cosh.cpp
+++ b/flang/runtime/Float128Math/cosh.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(CoshF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Cosh<RTNAME(CoshF128)>::invoke(x);
+ return Cosh<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/erf.cpp b/flang/runtime/Float128Math/erf.cpp
index 862f3b974118..631f71c76eff 100644
--- a/flang/runtime/Float128Math/erf.cpp
+++ b/flang/runtime/Float128Math/erf.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(ErfF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Erf<RTNAME(ErfF128)>::invoke(x);
+ return Erf<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/erfc.cpp b/flang/runtime/Float128Math/erfc.cpp
index 0ac0b9455637..ea3cd646d8c4 100644
--- a/flang/runtime/Float128Math/erfc.cpp
+++ b/flang/runtime/Float128Math/erfc.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(ErfcF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Erfc<RTNAME(ErfcF128)>::invoke(x);
+ return Erfc<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/exp.cpp b/flang/runtime/Float128Math/exp.cpp
index 50386fdbfb64..b1161b0f2929 100644
--- a/flang/runtime/Float128Math/exp.cpp
+++ b/flang/runtime/Float128Math/exp.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(ExpF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Exp<RTNAME(ExpF128)>::invoke(x);
+ return Exp<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/exponent.cpp b/flang/runtime/Float128Math/exponent.cpp
new file mode 100644
index 000000000000..c0e43c0ee8d3
--- /dev/null
+++ b/flang/runtime/Float128Math/exponent.cpp
@@ -0,0 +1,26 @@
+//===-- runtime/Float128Math/exponent.cpp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+#include "numeric-template-specs.h"
+
+namespace Fortran::runtime {
+extern "C" {
+
+#if LDBL_MANT_DIG != 113 && HAS_FLOAT128
+// EXPONENT (16.9.75)
+CppTypeFor<TypeCategory::Integer, 4> RTDEF(Exponent16_4)(F128Type x) {
+ return Exponent<CppTypeFor<TypeCategory::Integer, 4>>(x);
+}
+CppTypeFor<TypeCategory::Integer, 8> RTDEF(Exponent16_8)(F128Type x) {
+ return Exponent<CppTypeFor<TypeCategory::Integer, 8>>(x);
+}
+#endif
+
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/floor.cpp b/flang/runtime/Float128Math/floor.cpp
index 48cf4e014480..78a94984cac8 100644
--- a/flang/runtime/Float128Math/floor.cpp
+++ b/flang/runtime/Float128Math/floor.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(FloorF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Floor<RTNAME(FloorF128)>::invoke(x);
+ return Floor<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/fraction.cpp b/flang/runtime/Float128Math/fraction.cpp
new file mode 100644
index 000000000000..8de6d3c7ff6c
--- /dev/null
+++ b/flang/runtime/Float128Math/fraction.cpp
@@ -0,0 +1,21 @@
+//===-- runtime/Float128Math/fraction.cpp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+#include "numeric-template-specs.h"
+
+namespace Fortran::runtime {
+extern "C" {
+
+#if LDBL_MANT_DIG != 113 && HAS_FLOAT128
+// FRACTION (16.9.80)
+F128Type RTDEF(Fraction16)(F128Type x) { return Fraction(x); }
+#endif
+
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/hypot.cpp b/flang/runtime/Float128Math/hypot.cpp
index 33c83a165499..b4fa1d66bcfa 100644
--- a/flang/runtime/Float128Math/hypot.cpp
+++ b/flang/runtime/Float128Math/hypot.cpp
@@ -15,7 +15,7 @@ extern "C" {
CppTypeFor<TypeCategory::Real, 16> RTDEF(HypotF128)(
CppTypeFor<TypeCategory::Real, 16> x,
CppTypeFor<TypeCategory::Real, 16> y) {
- return Hypot<RTNAME(HypotF128)>::invoke(x, y);
+ return Hypot<true>::invoke(x, y);
}
#endif
diff --git a/flang/runtime/Float128Math/j0.cpp b/flang/runtime/Float128Math/j0.cpp
index f8f3fe71d8a6..9390a7eeb3c6 100644
--- a/flang/runtime/Float128Math/j0.cpp
+++ b/flang/runtime/Float128Math/j0.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(J0F128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return J0<RTNAME(J0F128)>::invoke(x);
+ return J0<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/j1.cpp b/flang/runtime/Float128Math/j1.cpp
index 9a51b973e1cf..c54927123388 100644
--- a/flang/runtime/Float128Math/j1.cpp
+++ b/flang/runtime/Float128Math/j1.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(J1F128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return J1<RTNAME(J1F128)>::invoke(x);
+ return J1<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/jn.cpp b/flang/runtime/Float128Math/jn.cpp
index 644a66863c0d..15afd83400c3 100644
--- a/flang/runtime/Float128Math/jn.cpp
+++ b/flang/runtime/Float128Math/jn.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(JnF128)(
int n, CppTypeFor<TypeCategory::Real, 16> x) {
- return Jn<RTNAME(JnF128)>::invoke(n, x);
+ return Jn<true>::invoke(n, x);
}
#endif
diff --git a/flang/runtime/Float128Math/lgamma.cpp b/flang/runtime/Float128Math/lgamma.cpp
index fff7dfcb9c15..ac31c89a912b 100644
--- a/flang/runtime/Float128Math/lgamma.cpp
+++ b/flang/runtime/Float128Math/lgamma.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(LgammaF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Lgamma<RTNAME(LgammaF128)>::invoke(x);
+ return Lgamma<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/llround.cpp b/flang/runtime/Float128Math/llround.cpp
index 00c62818af19..b77281c507fe 100644
--- a/flang/runtime/Float128Math/llround.cpp
+++ b/flang/runtime/Float128Math/llround.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Integer, 8> RTDEF(LlroundF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Llround<RTNAME(LlroundF128)>::invoke(x);
+ return Llround<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/log.cpp b/flang/runtime/Float128Math/log.cpp
index 0cfe329c6f7f..38e6b581fd84 100644
--- a/flang/runtime/Float128Math/log.cpp
+++ b/flang/runtime/Float128Math/log.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(LogF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Log<RTNAME(LogF128)>::invoke(x);
+ return Log<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/log10.cpp b/flang/runtime/Float128Math/log10.cpp
index cd8bf27fcb12..3c89c0e70777 100644
--- a/flang/runtime/Float128Math/log10.cpp
+++ b/flang/runtime/Float128Math/log10.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(Log10F128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Log10<RTNAME(Log10F128)>::invoke(x);
+ return Log10<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/lround.cpp b/flang/runtime/Float128Math/lround.cpp
index 6ced66a1b2d3..ce7a228038a1 100644
--- a/flang/runtime/Float128Math/lround.cpp
+++ b/flang/runtime/Float128Math/lround.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Integer, 4> RTDEF(LroundF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Lround<RTNAME(LroundF128)>::invoke(x);
+ return Lround<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/math-entries.h b/flang/runtime/Float128Math/math-entries.h
index fe1525468edc..ad3f6aa18aa9 100644
--- a/flang/runtime/Float128Math/math-entries.h
+++ b/flang/runtime/Float128Math/math-entries.h
@@ -13,36 +13,40 @@
#include "flang/Common/float128.h"
#include "flang/Runtime/entry-names.h"
#include <cfloat>
+#include <cmath>
#include <type_traits>
+namespace {
+using namespace Fortran::runtime;
+using F128RetType = CppTypeFor<TypeCategory::Real, 16>;
+using I32RetType = CppTypeFor<TypeCategory::Integer, 4>;
+using I64RetType = CppTypeFor<TypeCategory::Integer, 8>;
+} // namespace
+
namespace Fortran::runtime {
// Define a class template to gracefully fail, when
// there is no specialized template that implements
// the required function via using the third-party
// implementation.
-#define DEFINE_FALLBACK(caller) \
- template <auto F> struct caller { \
- template <typename... ATs> \
- [[noreturn]] static std::invoke_result_t<decltype(F), ATs...> invoke( \
- ATs... args) { \
+#define DEFINE_FALLBACK(caller, ret_type) \
+ template <bool = false, typename RT = ret_type> struct caller { \
+ template <typename... ATs> [[noreturn]] static RT invoke(ATs... args) { \
Terminator terminator{__FILE__, __LINE__}; \
terminator.Crash("Float128 variant of '%s' is unsupported", #caller); \
} \
};
// Define template specialization that is calling the third-party
-// implementation. The template is specialized by a function pointer
-// that is the FortranFloat128Math entry point. The signatures
-// of the caller and the callee must match.
+// implementation.
//
// Defining the specialization for any target library requires
// adding the generic template via DEFINE_FALLBACK, so that
// a build with another target library that does not define
// the same alias can gracefully fail in runtime.
#define DEFINE_SIMPLE_ALIAS(caller, callee) \
- template <typename RT, typename... ATs, RT (*p)(ATs...)> struct caller<p> { \
- static RT invoke(ATs... args) { \
+ template <typename RT> struct caller<true, RT> { \
+ template <typename... ATs> static RT invoke(ATs... args) { \
static_assert(std::is_invocable_r_v<RT, \
decltype(callee(std::declval<ATs>()...))(ATs...), ATs...>); \
if constexpr (std::is_same_v<RT, void>) { \
@@ -54,61 +58,63 @@ namespace Fortran::runtime {
};
// Define fallback callers.
-DEFINE_FALLBACK(Acos)
-DEFINE_FALLBACK(Acosh)
-DEFINE_FALLBACK(Asin)
-DEFINE_FALLBACK(Asinh)
-DEFINE_FALLBACK(Atan)
-DEFINE_FALLBACK(Atan2)
-DEFINE_FALLBACK(Atanh)
-DEFINE_FALLBACK(CAbs)
-DEFINE_FALLBACK(Ceil)
-DEFINE_FALLBACK(Cos)
-DEFINE_FALLBACK(Cosh)
-DEFINE_FALLBACK(Erf)
-DEFINE_FALLBACK(Erfc)
-DEFINE_FALLBACK(Exp)
-DEFINE_FALLBACK(Floor)
-DEFINE_FALLBACK(Hypot)
-DEFINE_FALLBACK(J0)
-DEFINE_FALLBACK(J1)
-DEFINE_FALLBACK(Jn)
-DEFINE_FALLBACK(Lgamma)
-DEFINE_FALLBACK(Llround)
-DEFINE_FALLBACK(Lround)
-DEFINE_FALLBACK(Log)
-DEFINE_FALLBACK(Log10)
-DEFINE_FALLBACK(Pow)
-DEFINE_FALLBACK(Round)
-DEFINE_FALLBACK(Sin)
-DEFINE_FALLBACK(Sinh)
-DEFINE_FALLBACK(Sqrt)
-DEFINE_FALLBACK(Tan)
-DEFINE_FALLBACK(Tanh)
-DEFINE_FALLBACK(Tgamma)
-DEFINE_FALLBACK(Trunc)
-DEFINE_FALLBACK(Y0)
-DEFINE_FALLBACK(Y1)
-DEFINE_FALLBACK(Yn)
+#define DEFINE_FALLBACK_F128(caller) DEFINE_FALLBACK(caller, ::F128RetType)
+#define DEFINE_FALLBACK_I32(caller) DEFINE_FALLBACK(caller, ::I32RetType)
+#define DEFINE_FALLBACK_I64(caller) DEFINE_FALLBACK(caller, ::I64RetType)
-// Define ComplexF128 type that is compatible with
-// the type of results/arguments of libquadmath.
-// TODO: this may need more work for other libraries/compilers.
-#if !defined(_ARCH_PPC) || defined(__LONG_DOUBLE_IEEE128__)
-typedef _Complex float __attribute__((mode(TC))) ComplexF128;
-#else
-typedef _Complex float __attribute__((mode(KC))) ComplexF128;
-#endif
+DEFINE_FALLBACK_F128(Abs)
+DEFINE_FALLBACK_F128(Acos)
+DEFINE_FALLBACK_F128(Acosh)
+DEFINE_FALLBACK_F128(Asin)
+DEFINE_FALLBACK_F128(Asinh)
+DEFINE_FALLBACK_F128(Atan)
+DEFINE_FALLBACK_F128(Atan2)
+DEFINE_FALLBACK_F128(Atanh)
+DEFINE_FALLBACK_F128(Ceil)
+DEFINE_FALLBACK_F128(Cos)
+DEFINE_FALLBACK_F128(Cosh)
+DEFINE_FALLBACK_F128(Erf)
+DEFINE_FALLBACK_F128(Erfc)
+DEFINE_FALLBACK_F128(Exp)
+DEFINE_FALLBACK_F128(Floor)
+DEFINE_FALLBACK_F128(Frexp)
+DEFINE_FALLBACK_F128(Hypot)
+DEFINE_FALLBACK_I32(Ilogb)
+DEFINE_FALLBACK_I32(Isinf)
+DEFINE_FALLBACK_I32(Isnan)
+DEFINE_FALLBACK_F128(J0)
+DEFINE_FALLBACK_F128(J1)
+DEFINE_FALLBACK_F128(Jn)
+DEFINE_FALLBACK_F128(Ldexp)
+DEFINE_FALLBACK_F128(Lgamma)
+DEFINE_FALLBACK_I64(Llround)
+DEFINE_FALLBACK_F128(Log)
+DEFINE_FALLBACK_F128(Log10)
+DEFINE_FALLBACK_I32(Lround)
+DEFINE_FALLBACK_F128(Nextafter)
+DEFINE_FALLBACK_F128(Pow)
+DEFINE_FALLBACK_F128(Qnan)
+DEFINE_FALLBACK_F128(Round)
+DEFINE_FALLBACK_F128(Sin)
+DEFINE_FALLBACK_F128(Sinh)
+DEFINE_FALLBACK_F128(Sqrt)
+DEFINE_FALLBACK_F128(Tan)
+DEFINE_FALLBACK_F128(Tanh)
+DEFINE_FALLBACK_F128(Tgamma)
+DEFINE_FALLBACK_F128(Trunc)
+DEFINE_FALLBACK_F128(Y0)
+DEFINE_FALLBACK_F128(Y1)
+DEFINE_FALLBACK_F128(Yn)
#if HAS_LIBM
-// Define wrapper callers for libm.
-#include <ccomplex>
-#include <cmath>
+#include <limits>
+// Define wrapper callers for libm.
#if LDBL_MANT_DIG == 113
// Use STD math functions. They provide IEEE-754 128-bit float
// support either via 'long double' or __float128.
// The Bessel's functions are not present in STD namespace.
+DEFINE_SIMPLE_ALIAS(Abs, std::abs)
DEFINE_SIMPLE_ALIAS(Acos, std::acos)
DEFINE_SIMPLE_ALIAS(Acosh, std::acosh)
DEFINE_SIMPLE_ALIAS(Asin, std::asin)
@@ -126,15 +132,21 @@ DEFINE_SIMPLE_ALIAS(Erf, std::erf)
DEFINE_SIMPLE_ALIAS(Erfc, std::erfc)
DEFINE_SIMPLE_ALIAS(Exp, std::exp)
DEFINE_SIMPLE_ALIAS(Floor, std::floor)
+DEFINE_SIMPLE_ALIAS(Frexp, std::frexp)
DEFINE_SIMPLE_ALIAS(Hypot, std::hypot)
+DEFINE_SIMPLE_ALIAS(Ilogb, std::ilogb)
+DEFINE_SIMPLE_ALIAS(Isinf, std::isinf)
+DEFINE_SIMPLE_ALIAS(Isnan, std::isnan)
DEFINE_SIMPLE_ALIAS(J0, j0l)
DEFINE_SIMPLE_ALIAS(J1, j1l)
DEFINE_SIMPLE_ALIAS(Jn, jnl)
+DEFINE_SIMPLE_ALIAS(Ldexp, std::ldexp)
DEFINE_SIMPLE_ALIAS(Lgamma, std::lgamma)
DEFINE_SIMPLE_ALIAS(Llround, std::llround)
-DEFINE_SIMPLE_ALIAS(Lround, std::lround)
DEFINE_SIMPLE_ALIAS(Log, std::log)
DEFINE_SIMPLE_ALIAS(Log10, std::log10)
+DEFINE_SIMPLE_ALIAS(Lround, std::lround)
+DEFINE_SIMPLE_ALIAS(Nextafter, std::nextafter)
DEFINE_SIMPLE_ALIAS(Pow, std::pow)
DEFINE_SIMPLE_ALIAS(Round, std::round)
DEFINE_SIMPLE_ALIAS(Sin, std::sin)
@@ -147,6 +159,12 @@ DEFINE_SIMPLE_ALIAS(Trunc, std::trunc)
DEFINE_SIMPLE_ALIAS(Y0, y0l)
DEFINE_SIMPLE_ALIAS(Y1, y1l)
DEFINE_SIMPLE_ALIAS(Yn, ynl)
+
+// Use numeric_limits to produce infinity of the right type.
+#define F128_RT_INFINITY \
+ (std::numeric_limits<CppTypeFor<TypeCategory::Real, 16>>::infinity())
+#define F128_RT_QNAN \
+ (std::numeric_limits<CppTypeFor<TypeCategory::Real, 16>>::quiet_NaN())
#else // LDBL_MANT_DIG != 113
#if !HAS_LIBMF128
// glibc >=2.26 seems to have complete support for __float128
@@ -165,6 +183,7 @@ DEFINE_SIMPLE_ALIAS(Yn, ynl)
#elif HAS_QUADMATHLIB
// Define wrapper callers for libquadmath.
#include "quadmath.h"
+DEFINE_SIMPLE_ALIAS(Abs, fabsq)
DEFINE_SIMPLE_ALIAS(Acos, acosq)
DEFINE_SIMPLE_ALIAS(Acosh, acoshq)
DEFINE_SIMPLE_ALIAS(Asin, asinq)
@@ -172,7 +191,6 @@ DEFINE_SIMPLE_ALIAS(Asinh, asinhq)
DEFINE_SIMPLE_ALIAS(Atan, atanq)
DEFINE_SIMPLE_ALIAS(Atan2, atan2q)
DEFINE_SIMPLE_ALIAS(Atanh, atanhq)
-DEFINE_SIMPLE_ALIAS(CAbs, cabsq)
DEFINE_SIMPLE_ALIAS(Ceil, ceilq)
DEFINE_SIMPLE_ALIAS(Cos, cosq)
DEFINE_SIMPLE_ALIAS(Cosh, coshq)
@@ -180,15 +198,21 @@ DEFINE_SIMPLE_ALIAS(Erf, erfq)
DEFINE_SIMPLE_ALIAS(Erfc, erfcq)
DEFINE_SIMPLE_ALIAS(Exp, expq)
DEFINE_SIMPLE_ALIAS(Floor, floorq)
+DEFINE_SIMPLE_ALIAS(Frexp, frexpq)
DEFINE_SIMPLE_ALIAS(Hypot, hypotq)
+DEFINE_SIMPLE_ALIAS(Ilogb, ilogbq)
+DEFINE_SIMPLE_ALIAS(Isinf, isinfq)
+DEFINE_SIMPLE_ALIAS(Isnan, isnanq)
DEFINE_SIMPLE_ALIAS(J0, j0q)
DEFINE_SIMPLE_ALIAS(J1, j1q)
DEFINE_SIMPLE_ALIAS(Jn, jnq)
+DEFINE_SIMPLE_ALIAS(Ldexp, ldexpq)
DEFINE_SIMPLE_ALIAS(Lgamma, lgammaq)
DEFINE_SIMPLE_ALIAS(Llround, llroundq)
-DEFINE_SIMPLE_ALIAS(Lround, lroundq)
DEFINE_SIMPLE_ALIAS(Log, logq)
DEFINE_SIMPLE_ALIAS(Log10, log10q)
+DEFINE_SIMPLE_ALIAS(Lround, lroundq)
+DEFINE_SIMPLE_ALIAS(Nextafter, nextafterq)
DEFINE_SIMPLE_ALIAS(Pow, powq)
DEFINE_SIMPLE_ALIAS(Round, roundq)
DEFINE_SIMPLE_ALIAS(Sin, sinq)
@@ -201,7 +225,12 @@ DEFINE_SIMPLE_ALIAS(Trunc, truncq)
DEFINE_SIMPLE_ALIAS(Y0, y0q)
DEFINE_SIMPLE_ALIAS(Y1, y1q)
DEFINE_SIMPLE_ALIAS(Yn, ynq)
+
+// Use cmath INFINITY/NAN definition. Rely on C implicit conversions.
+#define F128_RT_INFINITY (INFINITY)
+#define F128_RT_QNAN (NAN)
#endif
+
} // namespace Fortran::runtime
#endif // FORTRAN_RUNTIME_FLOAT128MATH_MATH_ENTRIES_H_
diff --git a/flang/runtime/Float128Math/mod-real.cpp b/flang/runtime/Float128Math/mod-real.cpp
new file mode 100644
index 000000000000..9cc2926e45d5
--- /dev/null
+++ b/flang/runtime/Float128Math/mod-real.cpp
@@ -0,0 +1,24 @@
+//===-- runtime/Float128Math/mod-real.cpp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+#include "numeric-template-specs.h"
+
+namespace Fortran::runtime {
+extern "C" {
+
+#if LDBL_MANT_DIG != 113 && HAS_FLOAT128
+// MOD (16.9.135)
+F128Type RTDEF(ModReal16)(
+ F128Type x, F128Type p, const char *sourceFile, int sourceLine) {
+ return RealMod<false>(x, p, sourceFile, sourceLine);
+}
+#endif
+
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/modulo-real.cpp b/flang/runtime/Float128Math/modulo-real.cpp
new file mode 100644
index 000000000000..b25797fd8f41
--- /dev/null
+++ b/flang/runtime/Float128Math/modulo-real.cpp
@@ -0,0 +1,24 @@
+//===-- runtime/Float128Math/modulo-real.cpp ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+#include "numeric-template-specs.h"
+
+namespace Fortran::runtime {
+extern "C" {
+
+#if LDBL_MANT_DIG != 113 && HAS_FLOAT128
+// MODULO (16.9.136)
+F128Type RTDEF(ModuloReal16)(
+ F128Type x, F128Type p, const char *sourceFile, int sourceLine) {
+ return RealMod<true>(x, p, sourceFile, sourceLine);
+}
+#endif
+
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/nearest.cpp b/flang/runtime/Float128Math/nearest.cpp
new file mode 100644
index 000000000000..fd990532e522
--- /dev/null
+++ b/flang/runtime/Float128Math/nearest.cpp
@@ -0,0 +1,23 @@
+//===-- runtime/Float128Math/nearest.cpp ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+
+namespace Fortran::runtime {
+extern "C" {
+
+#if LDBL_MANT_DIG != 113 && HAS_FLOAT128
+CppTypeFor<TypeCategory::Real, 16> RTDEF(Nearest16)(
+ CppTypeFor<TypeCategory::Real, 16> x, bool positive) {
+ return Nextafter<true>::invoke(
+ x, positive ? F128_RT_INFINITY : -F128_RT_INFINITY);
+}
+#endif
+
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/norm2.cpp b/flang/runtime/Float128Math/norm2.cpp
new file mode 100644
index 000000000000..15c482f7f007
--- /dev/null
+++ b/flang/runtime/Float128Math/norm2.cpp
@@ -0,0 +1,35 @@
+//===-- runtime/Float128Math/norm2.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+#include "numeric-template-specs.h"
+#include "reduction-templates.h"
+
+namespace Fortran::runtime {
+extern "C" {
+
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+CppTypeFor<TypeCategory::Real, 16> RTDEF(Norm2_16)(
+ const Descriptor &x, const char *source, int line, int dim) {
+ return GetTotalReduction<TypeCategory::Real, 16>(
+ x, source, line, dim, nullptr, Norm2Accumulator<16>{x}, "NORM2");
+}
+
+void RTDEF(Norm2DimReal16)(Descriptor &result, const Descriptor &x, int dim,
+ const char *source, int line) {
+ Terminator terminator{source, line};
+ auto type{x.type().GetCategoryAndKind()};
+ RUNTIME_CHECK(terminator, type);
+ RUNTIME_CHECK(
+ terminator, type->first == TypeCategory::Real && type->second == 16);
+ Norm2Helper<16>{}(result, x, dim, nullptr, terminator);
+}
+#endif
+
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/numeric-template-specs.h b/flang/runtime/Float128Math/numeric-template-specs.h
new file mode 100644
index 000000000000..a0a77230c3e9
--- /dev/null
+++ b/flang/runtime/Float128Math/numeric-template-specs.h
@@ -0,0 +1,55 @@
+//===-- runtime/Float128Math/numeric-template-specs.h -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FORTRAN_RUNTIME_FLOAT128MATH_NUMERIC_TEMPLATE_SPECS_H_
+#define FORTRAN_RUNTIME_FLOAT128MATH_NUMERIC_TEMPLATE_SPECS_H_
+
+#include "math-entries.h"
+#include "numeric-templates.h"
+
+namespace Fortran::runtime {
+using F128Type = CppTypeFor<TypeCategory::Real, 16>;
+
+template <> struct ABSTy<F128Type> {
+ static F128Type compute(F128Type x) { return Abs<true>::invoke(x); }
+};
+
+template <> struct FREXPTy<F128Type> {
+ static F128Type compute(F128Type x, int *e) {
+ return Frexp<true>::invoke(x, e);
+ }
+};
+
+template <> struct ILOGBTy<F128Type> {
+ static int compute(F128Type x) { return Ilogb<true>::invoke(x); }
+};
+
+template <> struct ISINFTy<F128Type> {
+ static bool compute(F128Type x) { return Isinf<true>::invoke(x); }
+};
+
+template <> struct ISNANTy<F128Type> {
+ static bool compute(F128Type x) { return Isnan<true>::invoke(x); }
+};
+
+template <> struct LDEXPTy<F128Type> {
+ template <typename ET> static F128Type compute(F128Type x, ET p) {
+ return Ldexp<true>::invoke(x, p);
+ }
+};
+
+template <> struct QNANTy<F128Type> {
+ static F128Type compute() { return F128_RT_QNAN; }
+};
+
+template <> struct SQRTTy<F128Type> {
+ static F128Type compute(F128Type x) { return Sqrt<true>::invoke(x); }
+};
+
+} // namespace Fortran::runtime
+#endif // FORTRAN_RUNTIME_FLOAT128MATH_NUMERIC_TEMPLATE_SPECS_H_
diff --git a/flang/runtime/Float128Math/pow.cpp b/flang/runtime/Float128Math/pow.cpp
index 02958a890e52..7a48828ee3e7 100644
--- a/flang/runtime/Float128Math/pow.cpp
+++ b/flang/runtime/Float128Math/pow.cpp
@@ -15,7 +15,7 @@ extern "C" {
CppTypeFor<TypeCategory::Real, 16> RTDEF(PowF128)(
CppTypeFor<TypeCategory::Real, 16> x,
CppTypeFor<TypeCategory::Real, 16> y) {
- return Pow<RTNAME(PowF128)>::invoke(x, y);
+ return Pow<true>::invoke(x, y);
}
#endif
diff --git a/flang/runtime/Float128Math/round.cpp b/flang/runtime/Float128Math/round.cpp
index 43ab57768cb7..6420c1bc9cd2 100644
--- a/flang/runtime/Float128Math/round.cpp
+++ b/flang/runtime/Float128Math/round.cpp
@@ -18,7 +18,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(RoundF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Round<RTNAME(RoundF128)>::invoke(x);
+ return Round<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/rrspacing.cpp b/flang/runtime/Float128Math/rrspacing.cpp
new file mode 100644
index 000000000000..f2187f42313a
--- /dev/null
+++ b/flang/runtime/Float128Math/rrspacing.cpp
@@ -0,0 +1,21 @@
+//===-- runtime/Float128Math/rrspacing.cpp --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+#include "numeric-template-specs.h"
+
+namespace Fortran::runtime {
+extern "C" {
+
+#if LDBL_MANT_DIG != 113 && HAS_FLOAT128
+// FRACTION (16.9.80)
+F128Type RTDEF(RRSpacing16)(F128Type x) { return RRSpacing<113>(x); }
+#endif
+
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/scale.cpp b/flang/runtime/Float128Math/scale.cpp
new file mode 100644
index 000000000000..d6b843150e72
--- /dev/null
+++ b/flang/runtime/Float128Math/scale.cpp
@@ -0,0 +1,28 @@
+//===-- runtime/Float128Math/scale.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+#include "numeric-template-specs.h"
+#include <limits>
+
+namespace Fortran::runtime {
+extern "C" {
+
+#if LDBL_MANT_DIG != 113 && HAS_FLOAT128
+F128Type RTDEF(Scale16)(F128Type x, std::int64_t p) {
+ auto ip{static_cast<int>(p)};
+ if (ip != p) {
+ ip = p < 0 ? std::numeric_limits<int>::min()
+ : std::numeric_limits<int>::max();
+ }
+ return LDEXPTy<F128Type>::compute(x, ip);
+}
+#endif
+
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/set-exponent.cpp b/flang/runtime/Float128Math/set-exponent.cpp
new file mode 100644
index 000000000000..0f942d238b8f
--- /dev/null
+++ b/flang/runtime/Float128Math/set-exponent.cpp
@@ -0,0 +1,23 @@
+//===-- runtime/Float128Math/set-exponent.cpp -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+#include "numeric-template-specs.h"
+
+namespace Fortran::runtime {
+extern "C" {
+
+#if LDBL_MANT_DIG != 113 && HAS_FLOAT128
+// SET_EXPONENT (16.9.171)
+F128Type RTDEF(SetExponent16)(F128Type x, std::int64_t p) {
+ return SetExponent(x, p);
+}
+#endif
+
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/sin.cpp b/flang/runtime/Float128Math/sin.cpp
index 013eb9d119a6..8ebc3f988158 100644
--- a/flang/runtime/Float128Math/sin.cpp
+++ b/flang/runtime/Float128Math/sin.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(SinF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Sin<RTNAME(SinF128)>::invoke(x);
+ return Sin<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/sinh.cpp b/flang/runtime/Float128Math/sinh.cpp
index 9c907041fd7e..aa716a3e51ef 100644
--- a/flang/runtime/Float128Math/sinh.cpp
+++ b/flang/runtime/Float128Math/sinh.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(SinhF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Sinh<RTNAME(SinhF128)>::invoke(x);
+ return Sinh<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/spacing.cpp b/flang/runtime/Float128Math/spacing.cpp
new file mode 100644
index 000000000000..d00e74644f8a
--- /dev/null
+++ b/flang/runtime/Float128Math/spacing.cpp
@@ -0,0 +1,21 @@
+//===-- runtime/Float128Math/spacing.cpp ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+#include "numeric-template-specs.h"
+
+namespace Fortran::runtime {
+extern "C" {
+
+#if LDBL_MANT_DIG != 113 && HAS_FLOAT128
+// SPACING (16.9.180)
+F128Type RTDEF(Spacing16)(F128Type x) { return Spacing<113>(x); }
+#endif
+
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/Float128Math/sqrt.cpp b/flang/runtime/Float128Math/sqrt.cpp
index aafbd850ca97..83165a4c6231 100644
--- a/flang/runtime/Float128Math/sqrt.cpp
+++ b/flang/runtime/Float128Math/sqrt.cpp
@@ -7,15 +7,13 @@
//===----------------------------------------------------------------------===//
#include "math-entries.h"
+#include "numeric-template-specs.h"
namespace Fortran::runtime {
extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
-CppTypeFor<TypeCategory::Real, 16> RTDEF(SqrtF128)(
- CppTypeFor<TypeCategory::Real, 16> x) {
- return Sqrt<RTNAME(SqrtF128)>::invoke(x);
-}
+F128Type RTDEF(SqrtF128)(F128Type x) { return SQRTTy<F128Type>::compute(x); }
#endif
} // extern "C"
diff --git a/flang/runtime/Float128Math/tan.cpp b/flang/runtime/Float128Math/tan.cpp
index 01d3c7bdd2e8..8f4b723ca977 100644
--- a/flang/runtime/Float128Math/tan.cpp
+++ b/flang/runtime/Float128Math/tan.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(TanF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Tan<RTNAME(TanF128)>::invoke(x);
+ return Tan<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/tanh.cpp b/flang/runtime/Float128Math/tanh.cpp
index fedc1a4120ca..b43a89520b67 100644
--- a/flang/runtime/Float128Math/tanh.cpp
+++ b/flang/runtime/Float128Math/tanh.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(TanhF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Tanh<RTNAME(TanhF128)>::invoke(x);
+ return Tanh<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/tgamma.cpp b/flang/runtime/Float128Math/tgamma.cpp
index 329defff38cf..93f97800bdc9 100644
--- a/flang/runtime/Float128Math/tgamma.cpp
+++ b/flang/runtime/Float128Math/tgamma.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(TgammaF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Tgamma<RTNAME(TgammaF128)>::invoke(x);
+ return Tgamma<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/trunc.cpp b/flang/runtime/Float128Math/trunc.cpp
index 3cab219ce31c..ca15a739c030 100644
--- a/flang/runtime/Float128Math/trunc.cpp
+++ b/flang/runtime/Float128Math/trunc.cpp
@@ -18,7 +18,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(TruncF128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Trunc<RTNAME(TruncF128)>::invoke(x);
+ return Trunc<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/y0.cpp b/flang/runtime/Float128Math/y0.cpp
index f3e2ee454aea..d6f39aac1053 100644
--- a/flang/runtime/Float128Math/y0.cpp
+++ b/flang/runtime/Float128Math/y0.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(Y0F128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Y0<RTNAME(Y0F128)>::invoke(x);
+ return Y0<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/y1.cpp b/flang/runtime/Float128Math/y1.cpp
index c117bbcb2b5a..477d36a9ea3c 100644
--- a/flang/runtime/Float128Math/y1.cpp
+++ b/flang/runtime/Float128Math/y1.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(Y1F128)(
CppTypeFor<TypeCategory::Real, 16> x) {
- return Y1<RTNAME(Y1F128)>::invoke(x);
+ return Y1<true>::invoke(x);
}
#endif
diff --git a/flang/runtime/Float128Math/yn.cpp b/flang/runtime/Float128Math/yn.cpp
index 237bc2866a0d..3a040cc88589 100644
--- a/flang/runtime/Float128Math/yn.cpp
+++ b/flang/runtime/Float128Math/yn.cpp
@@ -14,7 +14,7 @@ extern "C" {
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(YnF128)(
int n, CppTypeFor<TypeCategory::Real, 16> x) {
- return Yn<RTNAME(YnF128)>::invoke(n, x);
+ return Yn<true>::invoke(n, x);
}
#endif
diff --git a/flang/runtime/complex-reduction.c b/flang/runtime/complex-reduction.c
index d77e1c0a5500..72c31ce08b87 100644
--- a/flang/runtime/complex-reduction.c
+++ b/flang/runtime/complex-reduction.c
@@ -19,6 +19,11 @@ struct CppComplexDouble {
struct CppComplexLongDouble {
long double r, i;
};
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+struct CppComplexFloat128 {
+ CFloat128Type r, i;
+};
+#endif
/* Not all environments define CMPLXF, CMPLX, CMPLXL. */
@@ -70,6 +75,29 @@ static long_double_Complex_t CMPLXL(long double r, long double i) {
#endif
#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+#ifndef CMPLXF128
+/*
+ * GCC 7.4.0 (currently minimum GCC version for llvm builds)
+ * supports __builtin_complex. For Clang, require >=12.0.
+ * Otherwise, rely on the memory layout compatibility.
+ */
+#if (defined(__clang_major__) && (__clang_major__ >= 12)) || defined(__GNUC__)
+#define CMPLXF128 __builtin_complex
+#else
+static CFloat128ComplexType CMPLXF128(CFloat128Type r, CFloat128Type i) {
+ union {
+ struct CppComplexFloat128 x;
+ CFloat128ComplexType result;
+ } u;
+ u.x.r = r;
+ u.x.i = i;
+ return u.result;
+}
+#endif
+#endif
+#endif
+
/* RTNAME(SumComplex4) calls RTNAME(CppSumComplex4) with the same arguments
* and converts the members of its C++ complex result to C _Complex.
*/
@@ -93,9 +121,10 @@ ADAPT_REDUCTION(SumComplex8, double_Complex_t, CppComplexDouble, CMPLX,
#if LDBL_MANT_DIG == 64
ADAPT_REDUCTION(SumComplex10, long_double_Complex_t, CppComplexLongDouble,
CMPLXL, REDUCTION_ARGS, REDUCTION_ARG_NAMES)
-#elif LDBL_MANT_DIG == 113
-ADAPT_REDUCTION(SumComplex16, long_double_Complex_t, CppComplexLongDouble,
- CMPLXL, REDUCTION_ARGS, REDUCTION_ARG_NAMES)
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+ADAPT_REDUCTION(SumComplex16, CFloat128ComplexType, CppComplexFloat128,
+ CMPLXF128, REDUCTION_ARGS, REDUCTION_ARG_NAMES)
#endif
/* PRODUCT() */
@@ -106,9 +135,10 @@ ADAPT_REDUCTION(ProductComplex8, double_Complex_t, CppComplexDouble, CMPLX,
#if LDBL_MANT_DIG == 64
ADAPT_REDUCTION(ProductComplex10, long_double_Complex_t, CppComplexLongDouble,
CMPLXL, REDUCTION_ARGS, REDUCTION_ARG_NAMES)
-#elif LDBL_MANT_DIG == 113
-ADAPT_REDUCTION(ProductComplex16, long_double_Complex_t, CppComplexLongDouble,
- CMPLXL, REDUCTION_ARGS, REDUCTION_ARG_NAMES)
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+ADAPT_REDUCTION(ProductComplex16, CFloat128ComplexType, CppComplexFloat128,
+ CMPLXF128, REDUCTION_ARGS, REDUCTION_ARG_NAMES)
#endif
/* DOT_PRODUCT() */
@@ -119,7 +149,8 @@ ADAPT_REDUCTION(DotProductComplex8, double_Complex_t, CppComplexDouble, CMPLX,
#if LDBL_MANT_DIG == 64
ADAPT_REDUCTION(DotProductComplex10, long_double_Complex_t,
CppComplexLongDouble, CMPLXL, DOT_PRODUCT_ARGS, DOT_PRODUCT_ARG_NAMES)
-#elif LDBL_MANT_DIG == 113
-ADAPT_REDUCTION(DotProductComplex16, long_double_Complex_t,
- CppComplexLongDouble, CMPLXL, DOT_PRODUCT_ARGS, DOT_PRODUCT_ARG_NAMES)
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+ADAPT_REDUCTION(DotProductComplex16, CFloat128ComplexType, CppComplexFloat128,
+ CMPLXF128, DOT_PRODUCT_ARGS, DOT_PRODUCT_ARG_NAMES)
#endif
diff --git a/flang/runtime/complex-reduction.h b/flang/runtime/complex-reduction.h
index 5c4f1f5126e3..1d37b235d519 100644
--- a/flang/runtime/complex-reduction.h
+++ b/flang/runtime/complex-reduction.h
@@ -15,6 +15,7 @@
#ifndef FORTRAN_RUNTIME_COMPLEX_REDUCTION_H_
#define FORTRAN_RUNTIME_COMPLEX_REDUCTION_H_
+#include "flang/Common/float128.h"
#include "flang/Runtime/entry-names.h"
#include <complex.h>
@@ -40,14 +41,18 @@ float_Complex_t RTNAME(SumComplex3)(REDUCTION_ARGS);
float_Complex_t RTNAME(SumComplex4)(REDUCTION_ARGS);
double_Complex_t RTNAME(SumComplex8)(REDUCTION_ARGS);
long_double_Complex_t RTNAME(SumComplex10)(REDUCTION_ARGS);
-long_double_Complex_t RTNAME(SumComplex16)(REDUCTION_ARGS);
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+CFloat128ComplexType RTNAME(SumComplex16)(REDUCTION_ARGS);
+#endif
float_Complex_t RTNAME(ProductComplex2)(REDUCTION_ARGS);
float_Complex_t RTNAME(ProductComplex3)(REDUCTION_ARGS);
float_Complex_t RTNAME(ProductComplex4)(REDUCTION_ARGS);
double_Complex_t RTNAME(ProductComplex8)(REDUCTION_ARGS);
long_double_Complex_t RTNAME(ProductComplex10)(REDUCTION_ARGS);
-long_double_Complex_t RTNAME(ProductComplex16)(REDUCTION_ARGS);
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+CFloat128ComplexType RTNAME(ProductComplex16)(REDUCTION_ARGS);
+#endif
#define DOT_PRODUCT_ARGS \
const struct CppDescriptor *x, const struct CppDescriptor *y, \
@@ -60,6 +65,8 @@ float_Complex_t RTNAME(DotProductComplex3)(DOT_PRODUCT_ARGS);
float_Complex_t RTNAME(DotProductComplex4)(DOT_PRODUCT_ARGS);
double_Complex_t RTNAME(DotProductComplex8)(DOT_PRODUCT_ARGS);
long_double_Complex_t RTNAME(DotProductComplex10)(DOT_PRODUCT_ARGS);
-long_double_Complex_t RTNAME(DotProductComplex16)(DOT_PRODUCT_ARGS);
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+CFloat128ComplexType RTNAME(DotProductComplex16)(DOT_PRODUCT_ARGS);
+#endif
#endif // FORTRAN_RUNTIME_COMPLEX_REDUCTION_H_
diff --git a/flang/runtime/extrema.cpp b/flang/runtime/extrema.cpp
index 3fdc8e159866..61afb0458430 100644
--- a/flang/runtime/extrema.cpp
+++ b/flang/runtime/extrema.cpp
@@ -424,62 +424,6 @@ RT_EXT_API_GROUP_END
// MAXVAL and MINVAL
-template <TypeCategory CAT, int KIND, bool IS_MAXVAL, typename Enable = void>
-struct MaxOrMinIdentity {
- using Type = CppTypeFor<CAT, KIND>;
- static constexpr RT_API_ATTRS Type Value() {
- return IS_MAXVAL ? std::numeric_limits<Type>::lowest()
- : std::numeric_limits<Type>::max();
- }
-};
-
-// std::numeric_limits<> may not know int128_t
-template <bool IS_MAXVAL>
-struct MaxOrMinIdentity<TypeCategory::Integer, 16, IS_MAXVAL> {
- using Type = CppTypeFor<TypeCategory::Integer, 16>;
- static constexpr RT_API_ATTRS Type Value() {
- return IS_MAXVAL ? Type{1} << 127 : ~Type{0} >> 1;
- }
-};
-
-#if HAS_FLOAT128
-// std::numeric_limits<> may not support __float128.
-//
-// Usage of GCC quadmath.h's FLT128_MAX is complicated by the fact that
-// even GCC complains about 'Q' literal suffix under -Wpedantic.
-// We just recreate FLT128_MAX ourselves.
-//
-// This specialization must engage only when
-// CppTypeFor<TypeCategory::Real, 16> is __float128.
-template <bool IS_MAXVAL>
-struct MaxOrMinIdentity<TypeCategory::Real, 16, IS_MAXVAL,
- typename std::enable_if_t<
- std::is_same_v<CppTypeFor<TypeCategory::Real, 16>, __float128>>> {
- using Type = __float128;
- static RT_API_ATTRS Type Value() {
- // Create a buffer to store binary representation of __float128 constant.
- constexpr std::size_t alignment =
- std::max(alignof(Type), alignof(std::uint64_t));
- alignas(alignment) char data[sizeof(Type)];
-
- // First, verify that our interpretation of __float128 format is correct,
- // e.g. by checking at least one known constant.
- *reinterpret_cast<Type *>(data) = Type(1.0);
- if (*reinterpret_cast<std::uint64_t *>(data) != 0 ||
- *(reinterpret_cast<std::uint64_t *>(data) + 1) != 0x3FFF000000000000) {
- Terminator terminator{__FILE__, __LINE__};
- terminator.Crash("not yet implemented: no full support for __float128");
- }
-
- // Recreate FLT128_MAX.
- *reinterpret_cast<std::uint64_t *>(data) = 0xFFFFFFFFFFFFFFFF;
- *(reinterpret_cast<std::uint64_t *>(data) + 1) = 0x7FFEFFFFFFFFFFFF;
- Type max = *reinterpret_cast<Type *>(data);
- return IS_MAXVAL ? -max : max;
- }
-};
-#endif // HAS_FLOAT128
-
template <TypeCategory CAT, int KIND, bool IS_MAXVAL>
class NumericExtremumAccumulator {
public:
@@ -528,35 +472,6 @@ inline RT_API_ATTRS CppTypeFor<CAT, KIND> TotalNumericMaxOrMin(
NumericExtremumAccumulator<CAT, KIND, IS_MAXVAL>{x}, intrinsic);
}
-template <TypeCategory CAT, int KIND, typename ACCUMULATOR>
-static RT_API_ATTRS void DoMaxMinNorm2(Descriptor &result, const Descriptor &x,
- int dim, const Descriptor *mask, const char *intrinsic,
- Terminator &terminator) {
- using Type = CppTypeFor<CAT, KIND>;
- ACCUMULATOR accumulator{x};
- if (dim == 0 || x.rank() == 1) {
- // Total reduction
-
- // Element size of the destination descriptor is the same
- // as the element size of the source.
- result.Establish(x.type(), x.ElementBytes(), nullptr, 0, nullptr,
- CFI_attribute_allocatable);
- if (int stat{result.Allocate()}) {
- terminator.Crash(
- "%s: could not allocate memory for result; STAT=%d", intrinsic, stat);
- }
- DoTotalReduction<Type>(x, dim, mask, accumulator, intrinsic, terminator);
- accumulator.GetResult(result.OffsetElement<Type>());
- } else {
- // Partial reduction
-
- // Element size of the destination descriptor is the same
- // as the element size of the source.
- PartialReduction<ACCUMULATOR, CAT, KIND>(result, x, x.ElementBytes(), dim,
- mask, terminator, intrinsic, accumulator);
- }
-}
-
template <TypeCategory CAT, bool IS_MAXVAL> struct MaxOrMinHelper {
template <int KIND> struct Functor {
RT_API_ATTRS void operator()(Descriptor &result, const Descriptor &x,
@@ -802,70 +717,6 @@ RT_EXT_API_GROUP_END
// NORM2
-RT_VAR_GROUP_BEGIN
-
-// Use at least double precision for accumulators.
-// Don't use __float128, it doesn't work with abs() or sqrt() yet.
-static constexpr RT_CONST_VAR_ATTRS int largestLDKind {
-#if LDBL_MANT_DIG == 113
- 16
-#elif LDBL_MANT_DIG == 64
- 10
-#else
- 8
-#endif
-};
-
-RT_VAR_GROUP_END
-
-template <int KIND> class Norm2Accumulator {
-public:
- using Type = CppTypeFor<TypeCategory::Real, KIND>;
- using AccumType =
- CppTypeFor<TypeCategory::Real, std::clamp(KIND, 8, largestLDKind)>;
- explicit RT_API_ATTRS Norm2Accumulator(const Descriptor &array)
- : array_{array} {}
- RT_API_ATTRS void Reinitialize() { max_ = sum_ = 0; }
- template <typename A>
- RT_API_ATTRS void GetResult(A *p, int /*zeroBasedDim*/ = -1) const {
- // m * sqrt(1 + sum((others(:)/m)**2))
- *p = static_cast<Type>(max_ * std::sqrt(1 + sum_));
- }
- RT_API_ATTRS bool Accumulate(Type x) {
- auto absX{std::abs(static_cast<AccumType>(x))};
- if (!max_) {
- max_ = absX;
- } else if (absX > max_) {
- auto t{max_ / absX}; // < 1.0
- auto tsq{t * t};
- sum_ *= tsq; // scale sum to reflect change to the max
- sum_ += tsq; // include a term for the previous max
- max_ = absX;
- } else { // absX <= max_
- auto t{absX / max_};
- sum_ += t * t;
- }
- return true;
- }
- template <typename A>
- RT_API_ATTRS bool AccumulateAt(const SubscriptValue at[]) {
- return Accumulate(*array_.Element<A>(at));
- }
-
-private:
- const Descriptor &array_;
- AccumType max_{0}; // value (m) with largest magnitude
- AccumType sum_{0}; // sum((others(:)/m)**2)
-};
-
-template <int KIND> struct Norm2Helper {
- RT_API_ATTRS void operator()(Descriptor &result, const Descriptor &x, int dim,
- const Descriptor *mask, Terminator &terminator) const {
- DoMaxMinNorm2<TypeCategory::Real, KIND, Norm2Accumulator<KIND>>(
- result, x, dim, mask, "NORM2", terminator);
- }
-};
-
extern "C" {
RT_EXT_API_GROUP_BEGIN
@@ -887,13 +738,6 @@ CppTypeFor<TypeCategory::Real, 10> RTDEF(Norm2_10)(
x, source, line, dim, nullptr, Norm2Accumulator<10>{x}, "NORM2");
}
#endif
-#if LDBL_MANT_DIG == 113
-CppTypeFor<TypeCategory::Real, 16> RTDEF(Norm2_16)(
- const Descriptor &x, const char *source, int line, int dim) {
- return GetTotalReduction<TypeCategory::Real, 16>(
- x, source, line, dim, nullptr, Norm2Accumulator<16>{x}, "NORM2");
-}
-#endif
void RTDEF(Norm2Dim)(Descriptor &result, const Descriptor &x, int dim,
const char *source, int line) {
@@ -901,7 +745,7 @@ void RTDEF(Norm2Dim)(Descriptor &result, const Descriptor &x, int dim,
auto type{x.type().GetCategoryAndKind()};
RUNTIME_CHECK(terminator, type);
if (type->first == TypeCategory::Real) {
- ApplyFloatingPointKind<Norm2Helper, void>(
+ ApplyFloatingPointKind<Norm2Helper, void, true>(
type->second, terminator, result, x, dim, nullptr, terminator);
} else {
terminator.Crash("NORM2: bad type code %d", x.type().raw());
diff --git a/flang/runtime/numeric-templates.h b/flang/runtime/numeric-templates.h
new file mode 100644
index 000000000000..ecc3b2654d96
--- /dev/null
+++ b/flang/runtime/numeric-templates.h
@@ -0,0 +1,340 @@
+//===-- runtime/numeric-templates.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Generic class and function templates used for implementing
+// various numeric intrinsics (EXPONENT, FRACTION, etc.).
+//
+// This header file also defines generic templates for "basic"
+// math operations like abs, isnan, etc. The Float128Math
+// library provides specializations for these templates
+// for the data type corresponding to CppTypeFor<TypeCategory::Real, 16>
+// on the target.
+
+#ifndef FORTRAN_RUNTIME_NUMERIC_TEMPLATES_H_
+#define FORTRAN_RUNTIME_NUMERIC_TEMPLATES_H_
+
+#include "terminator.h"
+#include "tools.h"
+#include "flang/Common/float128.h"
+#include "flang/Runtime/api-attrs.h"
+#include <cstdint>
+#include <limits>
+
+namespace Fortran::runtime {
+
+// MAX/MIN/LOWEST values for different data types.
+
+// MaxOrMinIdentity returns MAX or LOWEST value of the given type.
+template <TypeCategory CAT, int KIND, bool IS_MAXVAL, typename Enable = void>
+struct MaxOrMinIdentity {
+ using Type = CppTypeFor<CAT, KIND>;
+ static constexpr RT_API_ATTRS Type Value() {
+ return IS_MAXVAL ? std::numeric_limits<Type>::lowest()
+ : std::numeric_limits<Type>::max();
+ }
+};
+
+// std::numeric_limits<> may not know int128_t
+template <bool IS_MAXVAL>
+struct MaxOrMinIdentity<TypeCategory::Integer, 16, IS_MAXVAL> {
+ using Type = CppTypeFor<TypeCategory::Integer, 16>;
+ static constexpr RT_API_ATTRS Type Value() {
+ return IS_MAXVAL ? Type{1} << 127 : ~Type{0} >> 1;
+ }
+};
+
+#if HAS_FLOAT128
+// std::numeric_limits<> may not support __float128.
+//
+// Usage of GCC quadmath.h's FLT128_MAX is complicated by the fact that
+// even GCC complains about 'Q' literal suffix under -Wpedantic.
+// We just recreate FLT128_MAX ourselves.
+//
+// This specialization must engage only when
+// CppTypeFor<TypeCategory::Real, 16> is __float128.
+template <bool IS_MAXVAL>
+struct MaxOrMinIdentity<TypeCategory::Real, 16, IS_MAXVAL,
+ typename std::enable_if_t<
+ std::is_same_v<CppTypeFor<TypeCategory::Real, 16>, __float128>>> {
+ using Type = __float128;
+ static RT_API_ATTRS Type Value() {
+ // Create a buffer to store binary representation of __float128 constant.
+ constexpr std::size_t alignment =
+ std::max(alignof(Type), alignof(std::uint64_t));
+ alignas(alignment) char data[sizeof(Type)];
+
+ // First, verify that our interpretation of __float128 format is correct,
+ // e.g. by checking at least one known constant.
+ *reinterpret_cast<Type *>(data) = Type(1.0);
+ if (*reinterpret_cast<std::uint64_t *>(data) != 0 ||
+ *(reinterpret_cast<std::uint64_t *>(data) + 1) != 0x3FFF000000000000) {
+ Terminator terminator{__FILE__, __LINE__};
+ terminator.Crash("not yet implemented: no full support for __float128");
+ }
+
+ // Recreate FLT128_MAX.
+ *reinterpret_cast<std::uint64_t *>(data) = 0xFFFFFFFFFFFFFFFF;
+ *(reinterpret_cast<std::uint64_t *>(data) + 1) = 0x7FFEFFFFFFFFFFFF;
+ Type max = *reinterpret_cast<Type *>(data);
+ return IS_MAXVAL ? -max : max;
+ }
+};
+#endif // HAS_FLOAT128
+
+// Minimum finite representable value.
+// For floating-point types, returns minimum positive normalized value.
+template <typename T> struct MinValue {
+ static RT_API_ATTRS T get() { return std::numeric_limits<T>::min(); }
+};
+
+#if HAS_FLOAT128
+template <> struct MinValue<CppTypeFor<TypeCategory::Real, 16>> {
+ using Type = CppTypeFor<TypeCategory::Real, 16>;
+ static RT_API_ATTRS Type get() {
+ // Create a buffer to store binary representation of __float128 constant.
+ constexpr std::size_t alignment =
+ std::max(alignof(Type), alignof(std::uint64_t));
+ alignas(alignment) char data[sizeof(Type)];
+
+ // First, verify that our interpretation of __float128 format is correct,
+ // e.g. by checking at least one known constant.
+ *reinterpret_cast<Type *>(data) = Type(1.0);
+ if (*reinterpret_cast<std::uint64_t *>(data) != 0 ||
+ *(reinterpret_cast<std::uint64_t *>(data) + 1) != 0x3FFF000000000000) {
+ Terminator terminator{__FILE__, __LINE__};
+ terminator.Crash("not yet implemented: no full support for __float128");
+ }
+
+ // Recreate FLT128_MIN.
+ *reinterpret_cast<std::uint64_t *>(data) = 0;
+ *(reinterpret_cast<std::uint64_t *>(data) + 1) = 0x1000000000000;
+ return *reinterpret_cast<Type *>(data);
+ }
+};
+#endif // HAS_FLOAT128
+
+template <typename T> struct ABSTy {
+ static constexpr RT_API_ATTRS T compute(T x) { return std::abs(x); }
+};
+
+template <typename T> struct FREXPTy {
+ static constexpr RT_API_ATTRS T compute(T x, int *e) {
+ return std::frexp(x, e);
+ }
+};
+
+template <typename T> struct ILOGBTy {
+ static constexpr RT_API_ATTRS int compute(T x) { return std::ilogb(x); }
+};
+
+template <typename T> struct ISINFTy {
+ static constexpr RT_API_ATTRS bool compute(T x) { return std::isinf(x); }
+};
+
+template <typename T> struct ISNANTy {
+ static constexpr RT_API_ATTRS bool compute(T x) { return std::isnan(x); }
+};
+
+template <typename T> struct LDEXPTy {
+ template <typename ET> static constexpr RT_API_ATTRS T compute(T x, ET e) {
+ return std::ldexp(x, e);
+ }
+};
+
+template <typename T> struct MAXTy {
+ static constexpr RT_API_ATTRS T compute() {
+ return std::numeric_limits<T>::max();
+ }
+};
+
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+template <> struct MAXTy<CppTypeFor<TypeCategory::Real, 16>> {
+ static CppTypeFor<TypeCategory::Real, 16> compute() {
+ return MaxOrMinIdentity<TypeCategory::Real, 16, true>::Value();
+ }
+};
+#endif
+
+template <typename T> struct MINTy {
+ static constexpr RT_API_ATTRS T compute() { return MinValue<T>::get(); }
+};
+
+template <typename T> struct QNANTy {
+ static constexpr RT_API_ATTRS T compute() {
+ return std::numeric_limits<T>::quiet_NaN();
+ }
+};
+
+template <typename T> struct SQRTTy {
+ static constexpr RT_API_ATTRS T compute(T x) { return std::sqrt(x); }
+};
+
+// EXPONENT (16.9.75)
+template <typename RESULT, typename ARG>
+inline RT_API_ATTRS RESULT Exponent(ARG x) {
+ if (ISINFTy<ARG>::compute(x) || ISNANTy<ARG>::compute(x)) {
+ return MAXTy<RESULT>::compute(); // +/-Inf, NaN -> HUGE(0)
+ } else if (x == 0) {
+ return 0; // 0 -> 0
+ } else {
+ return ILOGBTy<ARG>::compute(x) + 1;
+ }
+}
+
+// Suppress the warnings about calling __host__-only std::frexp,
+// defined in C++ STD header files, from __device__ code.
+RT_DIAG_PUSH
+RT_DIAG_DISABLE_CALL_HOST_FROM_DEVICE_WARN
+
+// FRACTION (16.9.80)
+template <typename T> inline RT_API_ATTRS T Fraction(T x) {
+ if (ISNANTy<T>::compute(x)) {
+ return x; // NaN -> same NaN
+ } else if (ISINFTy<T>::compute(x)) {
+ return QNANTy<T>::compute(); // +/-Inf -> NaN
+ } else if (x == 0) {
+ return x; // 0 -> same 0
+ } else {
+ int ignoredExp;
+ return FREXPTy<T>::compute(x, &ignoredExp);
+ }
+}
+
+RT_DIAG_POP
+
+// SET_EXPONENT (16.9.171)
+template <typename T> inline RT_API_ATTRS T SetExponent(T x, std::int64_t p) {
+ if (ISNANTy<T>::compute(x)) {
+ return x; // NaN -> same NaN
+ } else if (ISINFTy<T>::compute(x)) {
+ return QNANTy<T>::compute(); // +/-Inf -> NaN
+ } else if (x == 0) {
+ return x; // return negative zero if x is negative zero
+ } else {
+ int expo{ILOGBTy<T>::compute(x) + 1};
+ auto ip{static_cast<int>(p - expo)};
+ if (ip != p - expo) {
+ ip = p < 0 ? std::numeric_limits<int>::min()
+ : std::numeric_limits<int>::max();
+ }
+ return LDEXPTy<T>::compute(x, ip); // x*2**(p-e)
+ }
+}
+
+// MOD & MODULO (16.9.135, .136)
+template <bool IS_MODULO, typename T>
+inline RT_API_ATTRS T RealMod(
+ T a, T p, const char *sourceFile, int sourceLine) {
+ if (p == 0) {
+ Terminator{sourceFile, sourceLine}.Crash(
+ IS_MODULO ? "MODULO with P==0" : "MOD with P==0");
+ }
+ if (ISNANTy<T>::compute(a) || ISNANTy<T>::compute(p) ||
+ ISINFTy<T>::compute(a)) {
+ return QNANTy<T>::compute();
+ } else if (ISINFTy<T>::compute(p)) {
+ return a;
+ }
+ T aAbs{ABSTy<T>::compute(a)};
+ T pAbs{ABSTy<T>::compute(p)};
+ if (aAbs <= static_cast<T>(std::numeric_limits<std::int64_t>::max()) &&
+ pAbs <= static_cast<T>(std::numeric_limits<std::int64_t>::max())) {
+ if (auto aInt{static_cast<std::int64_t>(a)}; a == aInt) {
+ if (auto pInt{static_cast<std::int64_t>(p)}; p == pInt) {
+ // Fast exact case for integer operands
+ auto mod{aInt - (aInt / pInt) * pInt};
+ if (IS_MODULO && (aInt > 0) != (pInt > 0)) {
+ mod += pInt;
+ }
+ return static_cast<T>(mod);
+ }
+ }
+ }
+ if constexpr (std::is_same_v<T, float> || std::is_same_v<T, double> ||
+ std::is_same_v<T, long double>) {
+ // std::fmod() semantics on signed operands seems to match
+ // the requirements of MOD(). MODULO() needs adjustment.
+ T result{std::fmod(a, p)};
+ if constexpr (IS_MODULO) {
+ if ((a < 0) != (p < 0)) {
+ if (result == 0.) {
+ result = -result;
+ } else {
+ result += p;
+ }
+ }
+ }
+ return result;
+ } else {
+ // The standard defines MOD(a,p)=a-AINT(a/p)*p and
+ // MODULO(a,p)=a-FLOOR(a/p)*p, but those definitions lose
+ // precision badly due to cancellation when ABS(a) is
+ // much larger than ABS(p).
+ // Insights:
+ // - MOD(a,p)=MOD(a-n*p,p) when a>0, p>0, integer n>0, and a>=n*p
+ // - when n is a power of two, n*p is exact
+ // - as a>=n*p, a-n*p does not round.
+ // So repeatedly reduce a by all n*p in decreasing order of n;
+ // what's left is the desired remainder. This is basically
+ // the same algorithm as arbitrary precision binary long division,
+ // discarding the quotient.
+ T tmp{aAbs};
+ for (T adj{SetExponent(pAbs, Exponent<int>(aAbs))}; tmp >= pAbs; adj /= 2) {
+ if (tmp >= adj) {
+ tmp -= adj;
+ if (tmp == 0) {
+ break;
+ }
+ }
+ }
+ if (a < 0) {
+ tmp = -tmp;
+ }
+ if constexpr (IS_MODULO) {
+ if ((a < 0) != (p < 0)) {
+ tmp += p;
+ }
+ }
+ return tmp;
+ }
+}
+
+// RRSPACING (16.9.164)
+template <int PREC, typename T> inline RT_API_ATTRS T RRSpacing(T x) {
+ if (ISNANTy<T>::compute(x)) {
+ return x; // NaN -> same NaN
+ } else if (ISINFTy<T>::compute(x)) {
+ return QNANTy<T>::compute(); // +/-Inf -> NaN
+ } else if (x == 0) {
+ return 0; // 0 -> 0
+ } else {
+ return LDEXPTy<T>::compute(
+ ABSTy<T>::compute(x), PREC - (ILOGBTy<T>::compute(x) + 1));
+ }
+}
+
+// SPACING (16.9.180)
+template <int PREC, typename T> inline RT_API_ATTRS T Spacing(T x) {
+ if (ISNANTy<T>::compute(x)) {
+ return x; // NaN -> same NaN
+ } else if (ISINFTy<T>::compute(x)) {
+ return QNANTy<T>::compute(); // +/-Inf -> NaN
+ } else if (x == 0) {
+ // The standard-mandated behavior seems broken, since TINY() can't be
+ // subnormal.
+ return MINTy<T>::compute(); // 0 -> TINY(x)
+ } else {
+ T result{LDEXPTy<T>::compute(
+ static_cast<T>(1.0), ILOGBTy<T>::compute(x) + 1 - PREC)}; // 2**(e-p)
+ return result == 0 ? /*TINY(x)*/ MINTy<T>::compute() : result;
+ }
+}
+
+} // namespace Fortran::runtime
+
+#endif // FORTRAN_RUNTIME_NUMERIC_TEMPLATES_H_
diff --git a/flang/runtime/numeric.cpp b/flang/runtime/numeric.cpp
index ede00d69f20e..d61f32e1d5b8 100644
--- a/flang/runtime/numeric.cpp
+++ b/flang/runtime/numeric.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "flang/Runtime/numeric.h"
+#include "numeric-templates.h"
#include "terminator.h"
#include "flang/Common/float128.h"
#include <cfloat>
@@ -68,58 +69,6 @@ inline RT_API_ATTRS RESULT Floor(ARG x) {
return std::floor(x);
}
-// EXPONENT (16.9.75)
-template <typename RESULT, typename ARG>
-inline RT_API_ATTRS RESULT Exponent(ARG x) {
- if (std::isinf(x) || std::isnan(x)) {
- return std::numeric_limits<RESULT>::max(); // +/-Inf, NaN -> HUGE(0)
- } else if (x == 0) {
- return 0; // 0 -> 0
- } else {
- return std::ilogb(x) + 1;
- }
-}
-
-// Suppress the warnings about calling __host__-only std::frexp,
-// defined in C++ STD header files, from __device__ code.
-RT_DIAG_PUSH
-RT_DIAG_DISABLE_CALL_HOST_FROM_DEVICE_WARN
-
-// FRACTION (16.9.80)
-template <typename T> inline RT_API_ATTRS T Fraction(T x) {
- if (std::isnan(x)) {
- return x; // NaN -> same NaN
- } else if (std::isinf(x)) {
- return std::numeric_limits<T>::quiet_NaN(); // +/-Inf -> NaN
- } else if (x == 0) {
- return x; // 0 -> same 0
- } else {
- int ignoredExp;
- return std::frexp(x, &ignoredExp);
- }
-}
-
-RT_DIAG_POP
-
-// SET_EXPONENT (16.9.171)
-template <typename T> inline RT_API_ATTRS T SetExponent(T x, std::int64_t p) {
- if (std::isnan(x)) {
- return x; // NaN -> same NaN
- } else if (std::isinf(x)) {
- return std::numeric_limits<T>::quiet_NaN(); // +/-Inf -> NaN
- } else if (x == 0) {
- return x; // return negative zero if x is negative zero
- } else {
- int expo{std::ilogb(x) + 1};
- auto ip{static_cast<int>(p - expo)};
- if (ip != p - expo) {
- ip = p < 0 ? std::numeric_limits<int>::min()
- : std::numeric_limits<int>::max();
- }
- return std::ldexp(x, ip); // x*2**(p-e)
- }
-}
-
// MOD & MODULO (16.9.135, .136)
template <bool IS_MODULO, typename T>
inline RT_API_ATTRS T IntMod(T x, T p, const char *sourceFile, int sourceLine) {
@@ -133,94 +82,6 @@ inline RT_API_ATTRS T IntMod(T x, T p, const char *sourceFile, int sourceLine) {
}
return mod;
}
-template <bool IS_MODULO, typename T>
-inline RT_API_ATTRS T RealMod(
- T a, T p, const char *sourceFile, int sourceLine) {
- if (p == 0) {
- Terminator{sourceFile, sourceLine}.Crash(
- IS_MODULO ? "MODULO with P==0" : "MOD with P==0");
- }
- if (std::isnan(a) || std::isnan(p) || std::isinf(a)) {
- return std::numeric_limits<T>::quiet_NaN();
- } else if (std::isinf(p)) {
- return a;
- }
- T aAbs{std::abs(a)};
- T pAbs{std::abs(p)};
- if (aAbs <= static_cast<T>(std::numeric_limits<std::int64_t>::max()) &&
- pAbs <= static_cast<T>(std::numeric_limits<std::int64_t>::max())) {
- if (auto aInt{static_cast<std::int64_t>(a)}; a == aInt) {
- if (auto pInt{static_cast<std::int64_t>(p)}; p == pInt) {
- // Fast exact case for integer operands
- auto mod{aInt - (aInt / pInt) * pInt};
- if (IS_MODULO && (aInt > 0) != (pInt > 0)) {
- mod += pInt;
- }
- return static_cast<T>(mod);
- }
- }
- }
- if constexpr (std::is_same_v<T, float> || std::is_same_v<T, double> ||
- std::is_same_v<T, long double>) {
- // std::fmod() semantics on signed operands seems to match
- // the requirements of MOD(). MODULO() needs adjustment.
- T result{std::fmod(a, p)};
- if constexpr (IS_MODULO) {
- if ((a < 0) != (p < 0)) {
- if (result == 0.) {
- result = -result;
- } else {
- result += p;
- }
- }
- }
- return result;
- } else {
- // The standard defines MOD(a,p)=a-AINT(a/p)*p and
- // MODULO(a,p)=a-FLOOR(a/p)*p, but those definitions lose
- // precision badly due to cancellation when ABS(a) is
- // much larger than ABS(p).
- // Insights:
- // - MOD(a,p)=MOD(a-n*p,p) when a>0, p>0, integer n>0, and a>=n*p
- // - when n is a power of two, n*p is exact
- // - as a>=n*p, a-n*p does not round.
- // So repeatedly reduce a by all n*p in decreasing order of n;
- // what's left is the desired remainder. This is basically
- // the same algorithm as arbitrary precision binary long division,
- // discarding the quotient.
- T tmp{aAbs};
- for (T adj{SetExponent(pAbs, Exponent<int>(aAbs))}; tmp >= pAbs; adj /= 2) {
- if (tmp >= adj) {
- tmp -= adj;
- if (tmp == 0) {
- break;
- }
- }
- }
- if (a < 0) {
- tmp = -tmp;
- }
- if constexpr (IS_MODULO) {
- if ((a < 0) != (p < 0)) {
- tmp += p;
- }
- }
- return tmp;
- }
-}
-
-// RRSPACING (16.9.164)
-template <int PREC, typename T> inline RT_API_ATTRS T RRSpacing(T x) {
- if (std::isnan(x)) {
- return x; // NaN -> same NaN
- } else if (std::isinf(x)) {
- return std::numeric_limits<T>::quiet_NaN(); // +/-Inf -> NaN
- } else if (x == 0) {
- return 0; // 0 -> 0
- } else {
- return std::ldexp(std::abs(x), PREC - (std::ilogb(x) + 1));
- }
-}
// SCALE (16.9.166)
template <typename T> inline RT_API_ATTRS T Scale(T x, std::int64_t p) {
@@ -229,7 +90,7 @@ template <typename T> inline RT_API_ATTRS T Scale(T x, std::int64_t p) {
ip = p < 0 ? std::numeric_limits<int>::min()
: std::numeric_limits<int>::max();
}
- return std::ldexp(x, p); // x*2**p
+ return std::ldexp(x, ip); // x*2**p
}
// SELECTED_INT_KIND (16.9.169)
@@ -300,23 +161,6 @@ inline RT_API_ATTRS CppTypeFor<TypeCategory::Integer, 4> SelectedRealKind(
return error ? error : kind;
}
-// SPACING (16.9.180)
-template <int PREC, typename T> inline RT_API_ATTRS T Spacing(T x) {
- if (std::isnan(x)) {
- return x; // NaN -> same NaN
- } else if (std::isinf(x)) {
- return std::numeric_limits<T>::quiet_NaN(); // +/-Inf -> NaN
- } else if (x == 0) {
- // The standard-mandated behavior seems broken, since TINY() can't be
- // subnormal.
- return std::numeric_limits<T>::min(); // 0 -> TINY(x)
- } else {
- T result{
- std::ldexp(static_cast<T>(1.0), std::ilogb(x) + 1 - PREC)}; // 2**(e-p)
- return result == 0 ? /*TINY(x)*/ std::numeric_limits<T>::min() : result;
- }
-}
-
// NEAREST (16.9.139)
template <int PREC, typename T>
inline RT_API_ATTRS T Nearest(T x, bool positive) {
@@ -481,6 +325,7 @@ CppTypeFor<TypeCategory::Integer, 8> RTDEF(Exponent10_8)(
return Exponent<CppTypeFor<TypeCategory::Integer, 8>>(x);
}
#elif LDBL_MANT_DIG == 113
+// The __float128 implementation resides in FortranFloat128Math library.
CppTypeFor<TypeCategory::Integer, 4> RTDEF(Exponent16_4)(
CppTypeFor<TypeCategory::Real, 16> x) {
return Exponent<CppTypeFor<TypeCategory::Integer, 4>>(x);
@@ -597,6 +442,7 @@ CppTypeFor<TypeCategory::Real, 10> RTDEF(Fraction10)(
return Fraction(x);
}
#elif LDBL_MANT_DIG == 113
+// The __float128 implementation resides in FortranFloat128Math library.
CppTypeFor<TypeCategory::Real, 16> RTDEF(Fraction16)(
CppTypeFor<TypeCategory::Real, 16> x) {
return Fraction(x);
@@ -684,6 +530,7 @@ CppTypeFor<TypeCategory::Real, 10> RTDEF(ModReal10)(
return RealMod<false>(x, p, sourceFile, sourceLine);
}
#elif LDBL_MANT_DIG == 113
+// The __float128 implementation resides in FortranFloat128Math library.
CppTypeFor<TypeCategory::Real, 16> RTDEF(ModReal16)(
CppTypeFor<TypeCategory::Real, 16> x, CppTypeFor<TypeCategory::Real, 16> p,
const char *sourceFile, int sourceLine) {
@@ -740,6 +587,7 @@ CppTypeFor<TypeCategory::Real, 10> RTDEF(ModuloReal10)(
return RealMod<true>(x, p, sourceFile, sourceLine);
}
#elif LDBL_MANT_DIG == 113
+// The __float128 implementation resides in FortranFloat128Math library.
CppTypeFor<TypeCategory::Real, 16> RTDEF(ModuloReal16)(
CppTypeFor<TypeCategory::Real, 16> x, CppTypeFor<TypeCategory::Real, 16> p,
const char *sourceFile, int sourceLine) {
@@ -761,6 +609,7 @@ CppTypeFor<TypeCategory::Real, 10> RTDEF(Nearest10)(
return Nearest<64>(x, positive);
}
#elif LDBL_MANT_DIG == 113
+// The __float128 implementation resides in FortranFloat128Math library.
CppTypeFor<TypeCategory::Real, 16> RTDEF(Nearest16)(
CppTypeFor<TypeCategory::Real, 16> x, bool positive) {
return Nearest<113>(x, positive);
@@ -873,6 +722,7 @@ CppTypeFor<TypeCategory::Real, 10> RTDEF(RRSpacing10)(
return RRSpacing<64>(x);
}
#elif LDBL_MANT_DIG == 113
+// The __float128 implementation resides in FortranFloat128Math library.
CppTypeFor<TypeCategory::Real, 16> RTDEF(RRSpacing16)(
CppTypeFor<TypeCategory::Real, 16> x) {
return RRSpacing<113>(x);
@@ -893,6 +743,7 @@ CppTypeFor<TypeCategory::Real, 10> RTDEF(SetExponent10)(
return SetExponent(x, p);
}
#elif LDBL_MANT_DIG == 113
+// The __float128 implementation resides in FortranFloat128Math library.
CppTypeFor<TypeCategory::Real, 16> RTDEF(SetExponent16)(
CppTypeFor<TypeCategory::Real, 16> x, std::int64_t p) {
return SetExponent(x, p);
@@ -913,6 +764,7 @@ CppTypeFor<TypeCategory::Real, 10> RTDEF(Scale10)(
return Scale(x, p);
}
#elif LDBL_MANT_DIG == 113
+// The __float128 implementation resides in FortranFloat128Math library.
CppTypeFor<TypeCategory::Real, 16> RTDEF(Scale16)(
CppTypeFor<TypeCategory::Real, 16> x, std::int64_t p) {
return Scale(x, p);
@@ -972,6 +824,7 @@ CppTypeFor<TypeCategory::Real, 10> RTDEF(Spacing10)(
return Spacing<64>(x);
}
#elif LDBL_MANT_DIG == 113
+// The __float128 implementation resides in FortranFloat128Math library.
CppTypeFor<TypeCategory::Real, 16> RTDEF(Spacing16)(
CppTypeFor<TypeCategory::Real, 16> x) {
return Spacing<113>(x);
diff --git a/flang/runtime/product.cpp b/flang/runtime/product.cpp
index a516bc51a959..4c3b8c33a12e 100644
--- a/flang/runtime/product.cpp
+++ b/flang/runtime/product.cpp
@@ -123,7 +123,8 @@ CppTypeFor<TypeCategory::Real, 10> RTDEF(ProductReal10)(const Descriptor &x,
NonComplexProductAccumulator<CppTypeFor<TypeCategory::Real, 10>>{x},
"PRODUCT");
}
-#elif LDBL_MANT_DIG == 113
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
CppTypeFor<TypeCategory::Real, 16> RTDEF(ProductReal16)(const Descriptor &x,
const char *source, int line, int dim, const Descriptor *mask) {
return GetTotalReduction<TypeCategory::Real, 16>(x, source, line, dim, mask,
@@ -154,7 +155,8 @@ void RTDEF(CppProductComplex10)(CppTypeFor<TypeCategory::Complex, 10> &result,
mask, ComplexProductAccumulator<CppTypeFor<TypeCategory::Real, 10>>{x},
"PRODUCT");
}
-#elif LDBL_MANT_DIG == 113
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
void RTDEF(CppProductComplex16)(CppTypeFor<TypeCategory::Complex, 16> &result,
const Descriptor &x, const char *source, int line, int dim,
const Descriptor *mask) {
diff --git a/flang/runtime/reduction-templates.h b/flang/runtime/reduction-templates.h
index 7d0f82d59a08..5b793deb2a12 100644
--- a/flang/runtime/reduction-templates.h
+++ b/flang/runtime/reduction-templates.h
@@ -1,4 +1,4 @@
-//===-- runtime/reduction-templates.h -------------------------------------===//
+//===-- runtime/reduction-templates.h ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -21,10 +21,12 @@
#ifndef FORTRAN_RUNTIME_REDUCTION_TEMPLATES_H_
#define FORTRAN_RUNTIME_REDUCTION_TEMPLATES_H_
+#include "numeric-templates.h"
#include "terminator.h"
#include "tools.h"
#include "flang/Runtime/cpp-type.h"
#include "flang/Runtime/descriptor.h"
+#include <algorithm>
namespace Fortran::runtime {
@@ -332,5 +334,104 @@ template <typename ACCUMULATOR> struct PartialLocationHelper {
};
};
+// NORM2 templates
+
+RT_VAR_GROUP_BEGIN
+
+// Use at least double precision for accumulators.
+// Don't use __float128, it doesn't work with abs() or sqrt() yet.
+static constexpr RT_CONST_VAR_ATTRS int Norm2LargestLDKind {
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+ 16
+#elif LDBL_MANT_DIG == 64
+ 10
+#else
+ 8
+#endif
+};
+
+RT_VAR_GROUP_END
+
+template <TypeCategory CAT, int KIND, typename ACCUMULATOR>
+inline RT_API_ATTRS void DoMaxMinNorm2(Descriptor &result, const Descriptor &x,
+ int dim, const Descriptor *mask, const char *intrinsic,
+ Terminator &terminator) {
+ using Type = CppTypeFor<CAT, KIND>;
+ ACCUMULATOR accumulator{x};
+ if (dim == 0 || x.rank() == 1) {
+ // Total reduction
+
+ // Element size of the destination descriptor is the same
+ // as the element size of the source.
+ result.Establish(x.type(), x.ElementBytes(), nullptr, 0, nullptr,
+ CFI_attribute_allocatable);
+ if (int stat{result.Allocate()}) {
+ terminator.Crash(
+ "%s: could not allocate memory for result; STAT=%d", intrinsic, stat);
+ }
+ DoTotalReduction<Type>(x, dim, mask, accumulator, intrinsic, terminator);
+ accumulator.GetResult(result.OffsetElement<Type>());
+ } else {
+ // Partial reduction
+
+ // Element size of the destination descriptor is the same
+ // as the element size of the source.
+ PartialReduction<ACCUMULATOR, CAT, KIND>(result, x, x.ElementBytes(), dim,
+ mask, terminator, intrinsic, accumulator);
+ }
+}
+
+// The data type used by Norm2Accumulator.
+template <int KIND>
+using Norm2AccumType =
+ CppTypeFor<TypeCategory::Real, std::clamp(KIND, 8, Norm2LargestLDKind)>;
+
+template <int KIND> class Norm2Accumulator {
+public:
+ using Type = CppTypeFor<TypeCategory::Real, KIND>;
+ using AccumType = Norm2AccumType<KIND>;
+ explicit RT_API_ATTRS Norm2Accumulator(const Descriptor &array)
+ : array_{array} {}
+ RT_API_ATTRS void Reinitialize() { max_ = sum_ = 0; }
+ template <typename A>
+ RT_API_ATTRS void GetResult(A *p, int /*zeroBasedDim*/ = -1) const {
+ // m * sqrt(1 + sum((others(:)/m)**2))
+ *p = static_cast<Type>(max_ * SQRTTy<AccumType>::compute(1 + sum_));
+ }
+ RT_API_ATTRS bool Accumulate(Type x) {
+ auto absX{ABSTy<AccumType>::compute(static_cast<AccumType>(x))};
+ if (!max_) {
+ max_ = absX;
+ } else if (absX > max_) {
+ auto t{max_ / absX}; // < 1.0
+ auto tsq{t * t};
+ sum_ *= tsq; // scale sum to reflect change to the max
+ sum_ += tsq; // include a term for the previous max
+ max_ = absX;
+ } else { // absX <= max_
+ auto t{absX / max_};
+ sum_ += t * t;
+ }
+ return true;
+ }
+ template <typename A>
+ RT_API_ATTRS bool AccumulateAt(const SubscriptValue at[]) {
+ return Accumulate(*array_.Element<A>(at));
+ }
+
+private:
+ const Descriptor &array_;
+ AccumType max_{0}; // value (m) with largest magnitude
+ AccumType sum_{0}; // sum((others(:)/m)**2)
+};
+
+template <int KIND> struct Norm2Helper {
+ RT_API_ATTRS void operator()(Descriptor &result, const Descriptor &x, int dim,
+ const Descriptor *mask, Terminator &terminator) const {
+ DoMaxMinNorm2<TypeCategory::Real, KIND, Norm2Accumulator<KIND>>(
+ result, x, dim, mask, "NORM2", terminator);
+ }
+};
+
} // namespace Fortran::runtime
#endif // FORTRAN_RUNTIME_REDUCTION_TEMPLATES_H_
diff --git a/flang/runtime/sum.cpp b/flang/runtime/sum.cpp
index 048399737c85..d2495e3e956f 100644
--- a/flang/runtime/sum.cpp
+++ b/flang/runtime/sum.cpp
@@ -175,7 +175,8 @@ void RTDEF(CppSumComplex10)(CppTypeFor<TypeCategory::Complex, 10> &result,
result = GetTotalReduction<TypeCategory::Complex, 10>(
x, source, line, dim, mask, ComplexSumAccumulator<long double>{x}, "SUM");
}
-#elif LDBL_MANT_DIG == 113
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
void RTDEF(CppSumComplex16)(CppTypeFor<TypeCategory::Complex, 16> &result,
const Descriptor &x, const char *source, int line, int dim,
const Descriptor *mask) {
diff --git a/flang/runtime/tools.h b/flang/runtime/tools.h
index 89e506999574..c1f89cadca06 100644
--- a/flang/runtime/tools.h
+++ b/flang/runtime/tools.h
@@ -266,7 +266,8 @@ inline RT_API_ATTRS RESULT ApplyIntegerKind(
}
}
-template <template <int KIND> class FUNC, typename RESULT, typename... A>
+template <template <int KIND> class FUNC, typename RESULT,
+ bool NEEDSMATH = false, typename... A>
inline RT_API_ATTRS RESULT ApplyFloatingPointKind(
int kind, Terminator &terminator, A &&...x) {
switch (kind) {
@@ -287,7 +288,13 @@ inline RT_API_ATTRS RESULT ApplyFloatingPointKind(
break;
case 16:
if constexpr (HasCppTypeFor<TypeCategory::Real, 16>) {
- return FUNC<16>{}(std::forward<A>(x)...);
+ // If FUNC implemenation relies on FP math functions,
+ // then we should not be here. The compiler should have
+ // generated a call to an entry in FortranFloat128Math
+ // library.
+ if constexpr (!NEEDSMATH) {
+ return FUNC<16>{}(std::forward<A>(x)...);
+ }
}
break;
}
diff --git a/flang/runtime/unit.cpp b/flang/runtime/unit.cpp
index 58ca313d9e44..09782d2f8492 100644
--- a/flang/runtime/unit.cpp
+++ b/flang/runtime/unit.cpp
@@ -1001,25 +1001,30 @@ int ExternalFileUnit::GetAsynchronousId(IoErrorHandler &handler) {
if (!mayAsynchronous()) {
handler.SignalError(IostatBadAsynchronous);
return -1;
- } else if (auto least{asyncIdAvailable_.LeastElement()}) {
- asyncIdAvailable_.reset(*least);
- return static_cast<int>(*least);
} else {
+ for (int j{0}; 64 * j < maxAsyncIds; ++j) {
+ if (auto least{asyncIdAvailable_[j].LeastElement()}) {
+ asyncIdAvailable_[j].reset(*least);
+ return 64 * j + static_cast<int>(*least);
+ }
+ }
handler.SignalError(IostatTooManyAsyncOps);
return -1;
}
}
bool ExternalFileUnit::Wait(int id) {
- if (static_cast<std::size_t>(id) >= asyncIdAvailable_.size() ||
- asyncIdAvailable_.test(id)) {
+ if (static_cast<std::size_t>(id) >= maxAsyncIds ||
+ asyncIdAvailable_[id / 64].test(id % 64)) {
return false;
} else {
if (id == 0) { // means "all IDs"
- asyncIdAvailable_.set();
- asyncIdAvailable_.reset(0);
+ for (int j{0}; 64 * j < maxAsyncIds; ++j) {
+ asyncIdAvailable_[j].set();
+ }
+ asyncIdAvailable_[0].reset(0);
} else {
- asyncIdAvailable_.set(id);
+ asyncIdAvailable_[id / 64].set(id % 64);
}
return true;
}
diff --git a/flang/runtime/unit.h b/flang/runtime/unit.h
index 140fda3c4d2a..e3c8757645bb 100644
--- a/flang/runtime/unit.h
+++ b/flang/runtime/unit.h
@@ -36,10 +36,14 @@ class ExternalFileUnit : public ConnectionState,
public OpenFile,
public FileFrame<ExternalFileUnit> {
public:
+ static constexpr int maxAsyncIds{64 * 16};
+
explicit ExternalFileUnit(int unitNumber) : unitNumber_{unitNumber} {
isUTF8 = executionEnvironment.defaultUTF8;
- asyncIdAvailable_.set();
- asyncIdAvailable_.reset(0);
+ for (int j{0}; 64 * j < maxAsyncIds; ++j) {
+ asyncIdAvailable_[j].set();
+ }
+ asyncIdAvailable_[0].reset(0);
}
~ExternalFileUnit() {}
@@ -150,7 +154,7 @@ private:
std::size_t recordOffsetInFrame_{0}; // of currentRecordNumber
bool swapEndianness_{false};
bool createdForInternalChildIo_{false};
- common::BitSet<64> asyncIdAvailable_;
+ common::BitSet<64> asyncIdAvailable_[maxAsyncIds / 64];
// When a synchronous I/O statement is in progress on this unit, holds its
// state.
diff --git a/flang/test/Driver/flang-experimental-polymorphism-flag.f90 b/flang/test/Driver/flang-experimental-polymorphism-flag.f90
index 106e898149a1..095c1cc929e6 100644
--- a/flang/test/Driver/flang-experimental-polymorphism-flag.f90
+++ b/flang/test/Driver/flang-experimental-polymorphism-flag.f90
@@ -1,10 +1,10 @@
! Test -flang-experimental-hlfir flag
! RUN: %flang_fc1 -flang-experimental-polymorphism -emit-fir -o - %s | FileCheck %s
-! RUN: not %flang_fc1 -emit-fir -o - %s 2>&1 | FileCheck %s --check-prefix NO-POLYMORPHISM
+! RUN: %flang_fc1 -emit-fir -o - %s 2>&1 | FileCheck %s --check-prefix NO-POLYMORPHISM
! CHECK: func.func @_QPtest(%{{.*}}: !fir.class<none> {fir.bindc_name = "poly"})
subroutine test(poly)
class(*) :: poly
end subroutine test
-! NO-POLYMORPHISM: not yet implemented: support for polymorphic types
+! NO-POLYMORPHISM: func.func @_QPtest
diff --git a/flang/test/HLFIR/all-lowering.fir b/flang/test/HLFIR/all-lowering.fir
index dfd1ace947d6..e83378eacf9c 100644
--- a/flang/test/HLFIR/all-lowering.fir
+++ b/flang/test/HLFIR/all-lowering.fir
@@ -34,6 +34,7 @@ func.func @_QPall2(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_n
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?x!fir.logical<4>>>
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?x!fir.logical<4>>>
// CHECK: %[[ARG2:.*]]: !fir.ref<i32>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[MASK:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[DIM_VAR:.*]]:2 = hlfir.declare %[[ARG2]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[ARG1]]
@@ -55,7 +56,6 @@ func.func @_QPall2(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_n
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[EXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?x!fir.logical<4>>>, i1) -> !hlfir.expr<?x!fir.logical<4>>
// CHECK: hlfir.assign %[[EXPR]] to %[[RES]]#0
// CHECK: hlfir.destroy %[[EXPR]]
@@ -79,6 +79,7 @@ func.func @_QPall3(%arg0: !fir.ref<!fir.array<2x!fir.logical<4>>> {fir.bindc_nam
}
// CHECK-LABEL: func.func @_QPall3(
// CHECK: %[[ARG0:.*]]: !fir.ref<!fir.array<2x!fir.logical<4>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[RET_BOX:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x!fir.logical<4>>>>
// CHECK-DAG: %[[RET_ADDR:.*]] = fir.zero_bits !fir.heap<!fir.array<?x!fir.logical<4>>>
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
@@ -101,7 +102,6 @@ func.func @_QPall3(%arg0: !fir.ref<!fir.array<2x!fir.logical<4>>> {fir.bindc_nam
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[EXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?x!fir.logical<4>>>, i1) -> !hlfir.expr<?x!fir.logical<4>>
// CHECK: hlfir.assign %[[EXPR]] to %[[RES]]
// CHECK: hlfir.destroy %[[EXPR]]
@@ -125,6 +125,7 @@ func.func @_QPall4(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_n
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?x!fir.logical<4>>>
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?x!fir.logical<4>>>
// CHECK: %[[ARG2:.*]]: !fir.ref<!fir.box<!fir.ptr<i32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[MASK:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[DIM_ARG:.*]]:2 = hlfir.declare %[[ARG2]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[ARG1]]
@@ -149,9 +150,8 @@ func.func @_QPall4(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_n
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[EXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?x!fir.logical<4>>>, i1) -> !hlfir.expr<?x!fir.logical<4>>
// CHECK: hlfir.assign %[[EXPR]] to %[[RES]]
// CHECK: hlfir.destroy %[[EXPR]]
// CHECK-NEXT: return
-// CHECK-NEXT: } \ No newline at end of file
+// CHECK-NEXT: }
diff --git a/flang/test/HLFIR/any-lowering.fir b/flang/test/HLFIR/any-lowering.fir
index ef8b89502931..039146727d3f 100644
--- a/flang/test/HLFIR/any-lowering.fir
+++ b/flang/test/HLFIR/any-lowering.fir
@@ -36,6 +36,7 @@ func.func @_QPany2(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_n
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?x!fir.logical<4>>>
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?x!fir.logical<4>>>
// CHECK: %[[ARG2:.*]]: !fir.ref<i32>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[MASK:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[DIM_VAR:.*]]:2 = hlfir.declare %[[ARG2]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[ARG1]]
@@ -57,7 +58,6 @@ func.func @_QPany2(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_n
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[EXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?x!fir.logical<4>>>, i1) -> !hlfir.expr<?x!fir.logical<4>>
// CHECK: hlfir.assign %[[EXPR]] to %[[RES]]#0
// CHECK: hlfir.destroy %[[EXPR]]
@@ -82,6 +82,7 @@ func.func @_QPany3(%arg0: !fir.ref<!fir.array<2x!fir.logical<4>>> {fir.bindc_nam
}
// CHECK-LABEL: func.func @_QPany3(
// CHECK: %[[ARG0:.*]]: !fir.ref<!fir.array<2x!fir.logical<4>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[RET_BOX:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x!fir.logical<4>>>>
// CHECK-DAG: %[[RET_ADDR:.*]] = fir.zero_bits !fir.heap<!fir.array<?x!fir.logical<4>>>
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
@@ -104,7 +105,6 @@ func.func @_QPany3(%arg0: !fir.ref<!fir.array<2x!fir.logical<4>>> {fir.bindc_nam
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[EXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?x!fir.logical<4>>>, i1) -> !hlfir.expr<?x!fir.logical<4>>
// CHECK: hlfir.assign %[[EXPR]] to %[[RES]]
// CHECK: hlfir.destroy %[[EXPR]]
@@ -129,6 +129,7 @@ func.func @_QPany4(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_n
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?x!fir.logical<4>>>
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?x!fir.logical<4>>>
// CHECK: %[[ARG2:.*]]: !fir.ref<!fir.box<!fir.ptr<i32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[MASK:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[DIM_ARG:.*]]:2 = hlfir.declare %[[ARG2]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[ARG1]]
@@ -153,7 +154,6 @@ func.func @_QPany4(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_n
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[EXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?x!fir.logical<4>>>, i1) -> !hlfir.expr<?x!fir.logical<4>>
// CHECK: hlfir.assign %[[EXPR]] to %[[RES]]
// CHECK: hlfir.destroy %[[EXPR]]
diff --git a/flang/test/HLFIR/count-lowering-default-int-kinds.fir b/flang/test/HLFIR/count-lowering-default-int-kinds.fir
index ea66c435e6a8..68bc7fdbaad8 100644
--- a/flang/test/HLFIR/count-lowering-default-int-kinds.fir
+++ b/flang/test/HLFIR/count-lowering-default-int-kinds.fir
@@ -2,9 +2,9 @@
// RUN: fir-opt %s -lower-hlfir-intrinsics | FileCheck %s
module attributes {fir.defaultkind = "a1c4d8i8l4r4", fir.kindmap = ""} {
- func.func @test_i8(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_name = "x"}, %arg1: i64) {
+ func.func @test_i8(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_name = "x"}, %arg1: i64) -> !hlfir.expr<?xi64> {
%4 = hlfir.count %arg0 dim %arg1 : (!fir.box<!fir.array<?x?x!fir.logical<4>>>, i64) -> !hlfir.expr<?xi64>
- return
+ return %4 : !hlfir.expr<?xi64>
}
}
// CHECK-LABEL: func.func @test_i8
@@ -12,9 +12,9 @@ module attributes {fir.defaultkind = "a1c4d8i8l4r4", fir.kindmap = ""} {
// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32) -> none
module attributes {fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = ""} {
- func.func @test_i4(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_name = "x"}, %arg1: i64) {
+ func.func @test_i4(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_name = "x"}, %arg1: i64) -> !hlfir.expr<?xi32> {
%4 = hlfir.count %arg0 dim %arg1 : (!fir.box<!fir.array<?x?x!fir.logical<4>>>, i64) -> !hlfir.expr<?xi32>
- return
+ return %4 : !hlfir.expr<?xi32>
}
}
// CHECK-LABEL: func.func @test_i4
@@ -22,9 +22,9 @@ module attributes {fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = ""} {
// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32) -> none
module attributes {fir.defaultkind = "a1c4d8i2l4r4", fir.kindmap = ""} {
- func.func @test_i2(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_name = "x"}, %arg1: i64) {
+ func.func @test_i2(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_name = "x"}, %arg1: i64) -> !hlfir.expr<?xi16> {
%4 = hlfir.count %arg0 dim %arg1 : (!fir.box<!fir.array<?x?x!fir.logical<4>>>, i64) -> !hlfir.expr<?xi16>
- return
+ return %4 : !hlfir.expr<?xi16>
}
}
// CHECK-LABEL: func.func @test_i2
@@ -32,9 +32,9 @@ module attributes {fir.defaultkind = "a1c4d8i2l4r4", fir.kindmap = ""} {
// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32) -> none
module attributes {fir.defaultkind = "a1c4d8i1l4r4", fir.kindmap = ""} {
- func.func @test_i1(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_name = "x"}, %arg1: i64) {
+ func.func @test_i1(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc_name = "x"}, %arg1: i64) -> !hlfir.expr<?xi8> {
%4 = hlfir.count %arg0 dim %arg1 : (!fir.box<!fir.array<?x?x!fir.logical<4>>>, i64) -> !hlfir.expr<?xi8>
- return
+ return %4 : !hlfir.expr<?xi8>
}
}
// CHECK-LABEL: func.func @test_i1
diff --git a/flang/test/HLFIR/count-lowering.fir b/flang/test/HLFIR/count-lowering.fir
index da0f250dceef..c3309724981a 100644
--- a/flang/test/HLFIR/count-lowering.fir
+++ b/flang/test/HLFIR/count-lowering.fir
@@ -34,6 +34,7 @@ func.func @_QPcount2(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?x!fir.logical<4>>>
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>
// CHECK: %[[ARG2:.*]]: !fir.ref<i32>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[KIND:.*]] = arith.constant 4 : i32
// CHECK-DAG: %[[MASK:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[DIM_VAR:.*]]:2 = hlfir.declare %[[ARG2]]
@@ -56,7 +57,6 @@ func.func @_QPcount2(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[EXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK: hlfir.assign %[[EXPR]] to %[[RES]]#0
// CHECK: hlfir.destroy %[[EXPR]]
@@ -80,6 +80,7 @@ func.func @_QPcount3(%arg0: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"})
}
// CHECK-LABEL: func.func @_QPcount3(
// CHECK: %[[ARG0:.*]]: !fir.ref<!fir.array<2xi32>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[RET_BOX:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-DAG: %[[KIND:.*]] = arith.constant 4 : i32
// CHECK-DAG: %[[RET_ADDR:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
@@ -104,7 +105,6 @@ func.func @_QPcount3(%arg0: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"})
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[EXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK: hlfir.assign %[[EXPR]] to %[[RES]]
// CHECK: hlfir.destroy %[[EXPR]]
@@ -133,6 +133,7 @@ func.func @_QPcount4(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?x!fir.logical<4>>>
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>
// CHECK: %[[ARG2:.*]]: !fir.ref<i32>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[MASK:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[DIM_VAR:.*]]:2 = hlfir.declare %[[ARG2]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[ARG1]]
@@ -155,7 +156,6 @@ func.func @_QPcount4(%arg0: !fir.box<!fir.array<?x?x!fir.logical<4>>> {fir.bindc
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[EXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi64>>, i1) -> !hlfir.expr<?xi64>
// CHECK-NEXT: %[[OUT_SHAPE:.*]] = hlfir.shape_of %[[EXPR]]
// CHECK-NEXT: %[[OUT:.*]] = hlfir.elemental %[[OUT_SHAPE]] : (!fir.shape<1>) -> !hlfir.expr<?xi32>
diff --git a/flang/test/HLFIR/dot_product-lowering.fir b/flang/test/HLFIR/dot_product-lowering.fir
index e4f91eabfc09..64d65665433f 100644
--- a/flang/test/HLFIR/dot_product-lowering.fir
+++ b/flang/test/HLFIR/dot_product-lowering.fir
@@ -96,7 +96,6 @@ func.func @_QPdot_product4(%arg0: !fir.box<!fir.array<?x!fir.logical<4>>> {fir.b
// CHECK: %[[VAL_2:.*]] = fir.alloca !fir.logical<4>
// CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFdot_product2Elhs"} : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> (!fir.box<!fir.array<?x!fir.logical<4>>>, !fir.box<!fir.array<?x!fir.logical<4>>>)
// CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFdot_product2Erhs"} : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> (!fir.box<!fir.array<?x!fir.logical<4>>>, !fir.box<!fir.array<?x!fir.logical<4>>>)
-// CHECK: %[[VAL_5:.*]] = fir.absent !fir.box<!fir.logical<4>>
// CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_3]]#1 : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> !fir.box<none>
// CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_4]]#1 : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> !fir.box<none>
// CHECK: %[[VAL_12:.*]] = fir.call @_FortranADotProductLogical(%[[VAL_9]], %[[VAL_10]], %{{.*}}, %{{.*}}) fastmath<contract> : (!fir.box<none>, !fir.box<none>, !fir.ref<i8>, i32) -> i1
diff --git a/flang/test/HLFIR/extents-of-shape-of.f90 b/flang/test/HLFIR/extents-of-shape-of.f90
index d807f8b70302..1168004597d1 100644
--- a/flang/test/HLFIR/extents-of-shape-of.f90
+++ b/flang/test/HLFIR/extents-of-shape-of.f90
@@ -31,18 +31,17 @@ end subroutine
! CHECK-HLFIR-NEXT: hlfir.destroy %[[MUL]]
! ...
+! CHECK-FIR-DAG: %[[C0:.*]] = arith.constant 0 : index
+! CHECK-FIR-DAG: %[[C1:.*]] = arith.constant 1 : index
+! CHECK-FIR-DAG: %[[C2:.*]] = arith.constant 2 : index
! CHECK-FIR: fir.call @_FortranAMatmul
! CHECK-FIR-NEXT: %[[MUL:.*]] = fir.load %[[MUL_BOX:.*]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
-! CHECK-FIR-NEXT: %[[C0:.*]] = arith.constant 0 : index
! CHECK-FIR-NEXT: %[[DIMS0:.*]]:3 = fir.box_dims %[[MUL]], %[[C0]]
-! CHECK-FIR-NEXT: %[[C1:.*]] = arith.constant 1 : index
! CHECK-FIR-NEXT: %[[DIMS1:.*]]:3 = fir.box_dims %[[MUL]], %[[C1]]
! ...
! CHECK-FIR: %[[SHAPE:.*]] = fir.shape %[[DIMS0]]#1, %[[DIMS1]]#1
-! CHECK-FIR-NEXT: %[[C2:.*]] = arith.constant 2 : index
-! CHECK-FIR-NEXT: %[[C1_1:.*]] = arith.constant 1 : index
-! CHECK-FIR-NEXT: fir.do_loop %[[ARG2:.*]] = %[[C1_1]] to %[[DIMS1]]#1 step %[[C1_1]] unordered {
-! CHECK-FIR-NEXT: fir.do_loop %[[ARG3:.*]] = %[[C1_1]] to %[[C2]] step %[[C1_1]] unordered {
+! CHECK-FIR-NEXT: fir.do_loop %[[ARG2:.*]] = %[[C1]] to %[[DIMS1]]#1 step %[[C1]] unordered {
+! CHECK-FIR-NEXT: fir.do_loop %[[ARG3:.*]] = %[[C1]] to %[[C2]] step %[[C1]] unordered {
! ...
! CHECK-ALL: return
diff --git a/flang/test/HLFIR/matmul-lowering.fir b/flang/test/HLFIR/matmul-lowering.fir
index ee921da9a525..85a73dd45160 100644
--- a/flang/test/HLFIR/matmul-lowering.fir
+++ b/flang/test/HLFIR/matmul-lowering.fir
@@ -14,6 +14,7 @@ func.func @_QPmatmul1(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "lh
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "lhs"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "rhs"}
// CHECK: %[[ARG2:.*]]: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "res"}
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[LHS_VAR:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[RHS_VAR:.*]]:2 = hlfir.declare %[[ARG1]]
// CHECK-DAG: %[[RES_VAR:.*]]:2 = hlfir.declare %[[ARG2]]
@@ -37,7 +38,6 @@ func.func @_QPmatmul1(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "lh
// TODO: fix alias analysis in hlfir.assign bufferization
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
// TODO: add shape information from original intrinsic op
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[ASEXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?x?xi32>>, i1) -> !hlfir.expr<?x?xi32>
// CHECK: hlfir.assign %[[ASEXPR]] to %[[RES_VAR]]#0
// CHECK: hlfir.destroy %[[ASEXPR]]
diff --git a/flang/test/HLFIR/maxloc-lowering.fir b/flang/test/HLFIR/maxloc-lowering.fir
index 9e52a074a6e2..a51c9b483fa0 100644
--- a/flang/test/HLFIR/maxloc-lowering.fir
+++ b/flang/test/HLFIR/maxloc-lowering.fir
@@ -13,29 +13,28 @@ func.func @_QPmaxloc1(%arg0: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"},
// CHECK-LABEL: func.func @_QPmaxloc1(
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFmaxloc1Ea"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFmaxloc1Es"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
// CHECK-NEXT: %[[V3:.*]] = fir.absent !fir.box<i1>
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V4:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V5:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V5:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V6:.*]] = fir.embox %[[V4]](%[[V5]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V6]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.box<none>
// CHECK: %[[V12:.*]] = fir.convert %[[V3]] : (!fir.box<i1>) -> !fir.box<none>
-// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMaxlocInteger4(%[[V8]], %[[V9]], %c4_i32, {{.*}}, {{.*}}, %[[V12]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMaxlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %c0_0 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V17:.*]] = fir.shape_shift %[[V15]]#0, %[[V15]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V18:.*]]:2 = hlfir.declare %[[V16]](%[[V17]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %true : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V19]] to %[[V2]]#0 : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
// CHECK-NEXT: hlfir.destroy %[[V19]] : !hlfir.expr<?xi32>
@@ -55,32 +54,31 @@ func.func @_QPmaxloc2(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a"
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
// CHECK: %[[ARG2:.*]]: !fir.ref<index>
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFmaxloc2Ea"} : (!fir.box<!fir.array<?x?xi32>>) -> (!fir.box<!fir.array<?x?xi32>>, !fir.box<!fir.array<?x?xi32>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG2]] {uniq_name = "_QFmaxloc2Ed"} : (!fir.ref<index>) -> (!fir.ref<index>, !fir.ref<index>)
// CHECK-NEXT: %[[V3:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFmaxloc2Es"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V4:.*]] = fir.load %[[V2]]#0 : !fir.ref<index>
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
// CHECK-NEXT: %[[V5:.*]] = fir.convert %[[V4]] : (index) -> i32
// CHECK-NEXT: %[[V6:.*]] = fir.absent !fir.box<i1>
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V7:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V8:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V8:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V9:.*]] = fir.embox %[[V7]](%[[V8]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V9]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V11:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V12:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?x?xi32>>) -> !fir.box<none>
// CHECK: %[[V15:.*]] = fir.convert %[[V6]] : (!fir.box<i1>) -> !fir.box<none>
-// CHECK-NEXT: %[[V16:.*]] = fir.call @_FortranAMaxlocDim(%[[V11]], %[[V12]], %c4_i32, %[[V5]], {{.*}}, {{.*}}, %[[V15]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V16:.*]] = fir.call @_FortranAMaxlocDim(%[[V11]], %[[V12]], %[[C4]], %[[V5]], {{.*}}, {{.*}}, %[[V15]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V17:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V18:.*]]:3 = fir.box_dims %[[V17]], %c0_0 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V18:.*]]:3 = fir.box_dims %[[V17]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V19:.*]] = fir.box_addr %[[V17]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V20:.*]] = fir.shape_shift %[[V18]]#0, %[[V18]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V21:.*]]:2 = hlfir.declare %[[V19]](%[[V20]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %[[V22:.*]] = hlfir.as_expr %[[V21]]#0 move %true : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V22:.*]] = hlfir.as_expr %[[V21]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V22]] to %[[V3]]#0 : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
// CHECK-NEXT: hlfir.destroy %[[V22]] : !hlfir.expr<?xi32>
// CHECK-NEXT: return
@@ -100,30 +98,29 @@ func.func @_QPmaxloc3(%arg0: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"},
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
// CHECK: %[[ARG2:.*]]: !fir.ref<!fir.logical<4>>
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFmaxloc3Ea"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG2]] {uniq_name = "_QFmaxloc3Em"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
// CHECK-NEXT: %[[V3:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFmaxloc3Es"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
// CHECK-NEXT: %[[V4:.*]] = fir.embox %[[V2]]#1 : (!fir.ref<!fir.logical<4>>) -> !fir.box<!fir.logical<4>>
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V5:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V6:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V6:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V7:.*]] = fir.embox %[[V5]](%[[V6]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V7]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V9:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V10:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.box<none>
// CHECK: %[[V13:.*]] = fir.convert %[[V4]] : (!fir.box<!fir.logical<4>>) -> !fir.box<none>
-// CHECK-NEXT: %[[V14:.*]] = fir.call @_FortranAMaxlocInteger4(%[[V9]], %[[V10]], %c4_i32, {{.*}}, {{.*}}, %[[V13]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V14:.*]] = fir.call @_FortranAMaxlocInteger4(%[[V9]], %[[V10]], %[[C4]], {{.*}}, {{.*}}, %[[V13]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V15:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V16:.*]]:3 = fir.box_dims %[[V15]], %c0_0 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V16:.*]]:3 = fir.box_dims %[[V15]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V17:.*]] = fir.box_addr %[[V15]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V18:.*]] = fir.shape_shift %[[V16]]#0, %[[V16]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V19:.*]]:2 = hlfir.declare %[[V17]](%[[V18]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %[[V20:.*]] = hlfir.as_expr %[[V19]]#0 move %true : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V20:.*]] = hlfir.as_expr %[[V19]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V20]] to %[[V3]]#0 : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
// CHECK-NEXT: hlfir.destroy %[[V20]] : !hlfir.expr<?xi32>
// CHECK-NEXT: return
@@ -143,29 +140,28 @@ func.func @_QPmaxloc4(%arg0: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"},
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
// CHECK: %[[ARG2:.*]]: !fir.box<!fir.array<?x!fir.logical<4>>>
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFmaxloc4Ea"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG2]] {uniq_name = "_QFmaxloc4Em"} : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> (!fir.box<!fir.array<?x!fir.logical<4>>>, !fir.box<!fir.array<?x!fir.logical<4>>>)
// CHECK-NEXT: %[[V3:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFmaxloc4Es"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V4:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V5:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V5:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V6:.*]] = fir.embox %[[V4]](%[[V5]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V6]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.box<none>
// CHECK: %[[V12:.*]] = fir.convert %[[V2]]#1 : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> !fir.box<none>
-// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMaxlocInteger4(%[[V8]], %[[V9]], %c4_i32, {{.*}}, {{.*}}, %[[V12]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMaxlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %c0_0 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V17:.*]] = fir.shape_shift %[[V15]]#0, %[[V15]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V18:.*]]:2 = hlfir.declare %[[V16]](%[[V17]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %true : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V19]] to %[[V3]]#0 : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
// CHECK-NEXT: hlfir.destroy %[[V19]] : !hlfir.expr<?xi32>
// CHECK-NEXT: return
@@ -205,42 +201,38 @@ func.func @_QPmaxloc5(%arg0: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"})
}
// CHECK-LABEL: func.func @_QPmaxloc5(
// CHECK: %[[ARG0:.*]]: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"}
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
+// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : i32
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]] = fir.alloca !fir.logical<4>
// CHECK-NEXT: %[[V2:.*]] = fir.address_of(@_QFmaxloc5Ea) : !fir.ref<!fir.array<2x2xi32>>
-// CHECK-NEXT: %c2 = arith.constant 2 : index
-// CHECK-NEXT: %c2_0 = arith.constant 2 : index
-// CHECK-NEXT: %[[V3:.*]] = fir.shape %c2, %c2_0 : (index, index) -> !fir.shape<2>
+// CHECK-NEXT: %[[V3:.*]] = fir.shape %[[C2]], %[[C2]] : (index, index) -> !fir.shape<2>
// CHECK-NEXT: %[[V4:.*]]:2 = hlfir.declare %[[V2]](%[[V3]]) {uniq_name = "_QFmaxloc5Ea"} : (!fir.ref<!fir.array<2x2xi32>>, !fir.shape<2>) -> (!fir.ref<!fir.array<2x2xi32>>, !fir.ref<!fir.array<2x2xi32>>)
-// CHECK-NEXT: %c2_1 = arith.constant 2 : index
-// CHECK-NEXT: %[[V5:.*]] = fir.shape %c2_1 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V5:.*]] = fir.shape %[[C2]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V6:.*]]:2 = hlfir.declare %[[ARG0]](%[[V5]]) {uniq_name = "_QFmaxloc5Es"} : (!fir.ref<!fir.array<2xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<2xi32>>, !fir.ref<!fir.array<2xi32>>)
-// CHECK-NEXT: %c1_i32 = arith.constant 1 : i32
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
-// CHECK-NEXT: %[[V7:.*]] = fir.shape %c2, %c2_0 : (index, index) -> !fir.shape<2>
+// CHECK-NEXT: %[[V7:.*]] = fir.shape %[[C2]], %[[C2]] : (index, index) -> !fir.shape<2>
// CHECK-NEXT: %[[V8:.*]] = fir.embox %[[V4]]#1(%[[V7]]) : (!fir.ref<!fir.array<2x2xi32>>, !fir.shape<2>) -> !fir.box<!fir.array<2x2xi32>>
-// CHECK-NEXT: %[[V9:.*]] = fir.convert %true : (i1) -> !fir.logical<4>
+// CHECK-NEXT: %[[V9:.*]] = fir.convert %[[TRUE]] : (i1) -> !fir.logical<4>
// CHECK-NEXT: fir.store %[[V9]] to %[[V1]] : !fir.ref<!fir.logical<4>>
// CHECK-NEXT: %[[V10:.*]] = fir.embox %[[V1]] : (!fir.ref<!fir.logical<4>>) -> !fir.box<!fir.logical<4>>
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V11:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V12:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V12:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V13:.*]] = fir.embox %[[V11]](%[[V12]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V13]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V15:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V16:.*]] = fir.convert %[[V8]] : (!fir.box<!fir.array<2x2xi32>>) -> !fir.box<none>
// CHECK: %[[V19:.*]] = fir.convert %[[V10]] : (!fir.box<!fir.logical<4>>) -> !fir.box<none>
-// CHECK-NEXT: %[[V20:.*]] = fir.call @_FortranAMaxlocDim(%[[V15]], %[[V16]], %c4_i32, %c1_i32, {{.*}}, {{.*}}, %[[V19]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V20:.*]] = fir.call @_FortranAMaxlocDim(%[[V15]], %[[V16]], %[[C4]], %[[C1]], {{.*}}, {{.*}}, %[[V19]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V21:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_2 = arith.constant 0 : index
-// CHECK-NEXT: %[[V22:.*]]:3 = fir.box_dims %[[V21]], %c0_2 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V22:.*]]:3 = fir.box_dims %[[V21]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V23:.*]] = fir.box_addr %[[V21]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V24:.*]] = fir.shape_shift %[[V22]]#0, %[[V22]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V25:.*]]:2 = hlfir.declare %[[V23]](%[[V24]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true_3 = arith.constant true
-// CHECK-NEXT: %[[V26:.*]] = hlfir.as_expr %[[V25]]#0 move %true_3 : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V26:.*]] = hlfir.as_expr %[[V25]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V26]] to %[[V6]]#0 : !hlfir.expr<?xi32>, !fir.ref<!fir.array<2xi32>>
// CHECK-NEXT: hlfir.destroy %[[V26]] : !hlfir.expr<?xi32>
// CHECK-NEXT: return
@@ -258,29 +250,28 @@ func.func @_QPmaxloc6(%arg0: !fir.box<!fir.array<?x!fir.char<1,?>>> {fir.bindc_n
// CHECK-LABEL: func.func @_QPmaxloc6(
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x!fir.char<1,?>>> {fir.bindc_name = "a"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFmaxloc6Ea"} : (!fir.box<!fir.array<?x!fir.char<1,?>>>) -> (!fir.box<!fir.array<?x!fir.char<1,?>>>, !fir.box<!fir.array<?x!fir.char<1,?>>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFmaxloc4Es"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
// CHECK-NEXT: %[[V3:.*]] = fir.absent !fir.box<i1>
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V4:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V5:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V5:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V6:.*]] = fir.embox %[[V4]](%[[V5]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V6]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?x!fir.char<1,?>>>) -> !fir.box<none>
// CHECK: %[[V12:.*]] = fir.convert %[[V3]] : (!fir.box<i1>) -> !fir.box<none>
-// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMaxlocCharacter(%[[V8]], %[[V9]], %c4_i32, {{.*}}, {{.*}}, %[[V12]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMaxlocCharacter(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %c0_0 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V17:.*]] = fir.shape_shift %[[V15]]#0, %[[V15]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V18:.*]]:2 = hlfir.declare %[[V16]](%[[V17]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %true : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V19]] to %[[V2]]#0 : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
// CHECK-NEXT: hlfir.destroy %[[V19]] : !hlfir.expr<?xi32>
// CHECK-NEXT: return
@@ -304,22 +295,22 @@ func.func @_QPmaxloc7(%arg0: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"},
// CHECK: %[[ARG2:.*]]: !fir.box<!fir.array<?x!fir.logical<4>>> {fir.bindc_name = "m"}
// CHECK: %[[ARG3:.*]]: !fir.ref<!fir.logical<4>> {fir.bindc_name = "b"}
// CHECK: %[[ARG4:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<i32>>
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<i32>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFFtestEa"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG3]] {uniq_name = "_QFFtestEb"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
// CHECK-NEXT: %[[V3:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFFtestEd"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
// CHECK-NEXT: %[[V4:.*]]:2 = hlfir.declare %[[ARG2]] {uniq_name = "_QFFtestEm"} : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> (!fir.box<!fir.array<?x!fir.logical<4>>>, !fir.box<!fir.array<?x!fir.logical<4>>>)
// CHECK-NEXT: %[[V5:.*]]:2 = hlfir.declare %[[ARG4]] {uniq_name = "_QFFtestEs"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V6:.*]] = fir.load %[[V3]]#0 : !fir.ref<i32>
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V7:.*]] = fir.zero_bits !fir.heap<i32>
// CHECK-NEXT: %[[V8:.*]] = fir.embox %[[V7]] : (!fir.heap<i32>) -> !fir.box<!fir.heap<i32>>
// CHECK-NEXT: fir.store %[[V8]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<i32>>>
// CHECK: %[[V10:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<i32>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V11:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.box<none>
// CHECK: %[[V14:.*]] = fir.convert %[[V4]]#1 : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> !fir.box<none>
-// CHECK-NEXT: %[[V15:.*]] = fir.call @_FortranAMaxlocDim(%[[V10]], %[[V11]], %c4_i32, %[[V6]], {{.*}}, {{.*}}, %[[V14]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V15:.*]] = fir.call @_FortranAMaxlocDim(%[[V10]], %[[V11]], %[[C4]], %[[V6]], {{.*}}, {{.*}}, %[[V14]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V16:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<i32>>>
// CHECK-NEXT: %[[V17:.*]] = fir.box_addr %[[V16]] : (!fir.box<!fir.heap<i32>>) -> !fir.heap<i32>
// CHECK-NEXT: %[[V18:.*]] = fir.load %[[V17]] : !fir.heap<i32>
diff --git a/flang/test/HLFIR/maxval-lowering.fir b/flang/test/HLFIR/maxval-lowering.fir
index 3eb36cdef2cb..5a49ed5273ef 100644
--- a/flang/test/HLFIR/maxval-lowering.fir
+++ b/flang/test/HLFIR/maxval-lowering.fir
@@ -37,6 +37,7 @@ func.func @_QPmaxval2(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a"
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?xi32>>
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>>
// CHECK: %[[ARG2:.*]]: !fir.ref<index>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[ARRAY:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[ARG1]]
// CHECK-DAG: %[[DIM_VAR:.*]]:2 = hlfir.declare %[[ARG2]]
@@ -63,7 +64,6 @@ func.func @_QPmaxval2(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a"
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// TODO: fix alias analysis in hlfir.assign bufferization
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[ASEXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK: hlfir.assign %[[ASEXPR]] to %[[RES]]#0
// CHECK: hlfir.destroy %[[ASEXPR]]
@@ -190,6 +190,7 @@ func.func @_QPmaxval6(%arg0: !fir.box<!fir.array<?x!fir.char<1,?>>> {fir.bindc_n
// CHECK-LABEL: func.func @_QPmaxval6(
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x!fir.char<1,?>>>
// CHECK: %[[ARG1:.*]]: !fir.boxchar<1>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[ARRAY:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[UNBOXED:.*]]:2 = fir.unboxchar %[[ARG1]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[UNBOXED]]#0 typeparams %[[UNBOXED]]#1
@@ -210,7 +211,6 @@ func.func @_QPmaxval6(%arg0: !fir.box<!fir.array<?x!fir.char<1,?>>> {fir.bindc_n
// CHECK: %[[BOX_ELESIZE:.*]] = fir.box_elesize %[[RET]]
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]] typeparams %[[BOX_ELESIZE]] {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[ASEXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.boxchar<1>, i1) -> !hlfir.expr<!fir.char<1,?>>
// CHECK: hlfir.assign %[[ASEXPR]] to %[[RES]]#0
// CHECK: hlfir.destroy %[[ASEXPR]]
diff --git a/flang/test/HLFIR/minloc-lowering.fir b/flang/test/HLFIR/minloc-lowering.fir
index fede0a195012..6f3cbd171445 100644
--- a/flang/test/HLFIR/minloc-lowering.fir
+++ b/flang/test/HLFIR/minloc-lowering.fir
@@ -13,29 +13,28 @@ func.func @_QPminloc1(%arg0: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"},
// CHECK-LABEL: func.func @_QPminloc1(
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFminloc1Ea"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFminloc1Es"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
// CHECK-NEXT: %[[V3:.*]] = fir.absent !fir.box<i1>
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V4:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V5:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V5:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V6:.*]] = fir.embox %[[V4]](%[[V5]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V6]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.box<none>
// CHECK: %[[V12:.*]] = fir.convert %[[V3]] : (!fir.box<i1>) -> !fir.box<none>
-// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMinlocInteger4(%[[V8]], %[[V9]], %c4_i32, {{.*}}, {{.*}}, %[[V12]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMinlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %c0_0 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V17:.*]] = fir.shape_shift %[[V15]]#0, %[[V15]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V18:.*]]:2 = hlfir.declare %[[V16]](%[[V17]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %true : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V19]] to %[[V2]]#0 : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
// CHECK-NEXT: hlfir.destroy %[[V19]] : !hlfir.expr<?xi32>
@@ -55,32 +54,31 @@ func.func @_QPminloc2(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a"
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
// CHECK: %[[ARG2:.*]]: !fir.ref<index>
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFminloc2Ea"} : (!fir.box<!fir.array<?x?xi32>>) -> (!fir.box<!fir.array<?x?xi32>>, !fir.box<!fir.array<?x?xi32>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG2]] {uniq_name = "_QFminloc2Ed"} : (!fir.ref<index>) -> (!fir.ref<index>, !fir.ref<index>)
// CHECK-NEXT: %[[V3:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFminloc2Es"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V4:.*]] = fir.load %[[V2]]#0 : !fir.ref<index>
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
// CHECK-NEXT: %[[V5:.*]] = fir.convert %[[V4]] : (index) -> i32
// CHECK-NEXT: %[[V6:.*]] = fir.absent !fir.box<i1>
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V7:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V8:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V8:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V9:.*]] = fir.embox %[[V7]](%[[V8]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V9]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V11:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V12:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?x?xi32>>) -> !fir.box<none>
// CHECK: %[[V15:.*]] = fir.convert %[[V6]] : (!fir.box<i1>) -> !fir.box<none>
-// CHECK-NEXT: %[[V16:.*]] = fir.call @_FortranAMinlocDim(%[[V11]], %[[V12]], %c4_i32, %[[V5]], {{.*}}, {{.*}}, %[[V15]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V16:.*]] = fir.call @_FortranAMinlocDim(%[[V11]], %[[V12]], %[[C4]], %[[V5]], {{.*}}, {{.*}}, %[[V15]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V17:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V18:.*]]:3 = fir.box_dims %[[V17]], %c0_0 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V18:.*]]:3 = fir.box_dims %[[V17]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V19:.*]] = fir.box_addr %[[V17]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V20:.*]] = fir.shape_shift %[[V18]]#0, %[[V18]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V21:.*]]:2 = hlfir.declare %[[V19]](%[[V20]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %[[V22:.*]] = hlfir.as_expr %[[V21]]#0 move %true : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V22:.*]] = hlfir.as_expr %[[V21]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V22]] to %[[V3]]#0 : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
// CHECK-NEXT: hlfir.destroy %[[V22]] : !hlfir.expr<?xi32>
// CHECK-NEXT: return
@@ -100,30 +98,29 @@ func.func @_QPminloc3(%arg0: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"},
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
// CHECK: %[[ARG2:.*]]: !fir.ref<!fir.logical<4>>
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C0]] = arith.constant 0 : index
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFminloc3Ea"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG2]] {uniq_name = "_QFminloc3Em"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
// CHECK-NEXT: %[[V3:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFminloc3Es"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
// CHECK-NEXT: %[[V4:.*]] = fir.embox %[[V2]]#1 : (!fir.ref<!fir.logical<4>>) -> !fir.box<!fir.logical<4>>
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V5:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V6:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V6:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V7:.*]] = fir.embox %[[V5]](%[[V6]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V7]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V9:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V10:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.box<none>
// CHECK: %[[V13:.*]] = fir.convert %[[V4]] : (!fir.box<!fir.logical<4>>) -> !fir.box<none>
-// CHECK-NEXT: %[[V14:.*]] = fir.call @_FortranAMinlocInteger4(%[[V9]], %[[V10]], %c4_i32, {{.*}}, {{.*}}, %[[V13]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V14:.*]] = fir.call @_FortranAMinlocInteger4(%[[V9]], %[[V10]], %[[C4]], {{.*}}, {{.*}}, %[[V13]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V15:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V16:.*]]:3 = fir.box_dims %[[V15]], %c0_0 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V16:.*]]:3 = fir.box_dims %[[V15]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V17:.*]] = fir.box_addr %[[V15]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V18:.*]] = fir.shape_shift %[[V16]]#0, %[[V16]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V19:.*]]:2 = hlfir.declare %[[V17]](%[[V18]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %[[V20:.*]] = hlfir.as_expr %[[V19]]#0 move %true : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V20:.*]] = hlfir.as_expr %[[V19]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V20]] to %[[V3]]#0 : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
// CHECK-NEXT: hlfir.destroy %[[V20]] : !hlfir.expr<?xi32>
// CHECK-NEXT: return
@@ -143,29 +140,28 @@ func.func @_QPminloc4(%arg0: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"},
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
// CHECK: %[[ARG2:.*]]: !fir.box<!fir.array<?x!fir.logical<4>>>
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFminloc4Ea"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG2]] {uniq_name = "_QFminloc4Em"} : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> (!fir.box<!fir.array<?x!fir.logical<4>>>, !fir.box<!fir.array<?x!fir.logical<4>>>)
// CHECK-NEXT: %[[V3:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFminloc4Es"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V4:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V5:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V5:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V6:.*]] = fir.embox %[[V4]](%[[V5]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V6]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.box<none>
// CHECK: %[[V12:.*]] = fir.convert %[[V2]]#1 : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> !fir.box<none>
-// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMinlocInteger4(%[[V8]], %[[V9]], %c4_i32, {{.*}}, {{.*}}, %[[V12]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMinlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %c0_0 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V17:.*]] = fir.shape_shift %[[V15]]#0, %[[V15]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V18:.*]]:2 = hlfir.declare %[[V16]](%[[V17]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %true : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V19]] to %[[V3]]#0 : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
// CHECK-NEXT: hlfir.destroy %[[V19]] : !hlfir.expr<?xi32>
// CHECK-NEXT: return
@@ -205,42 +201,38 @@ func.func @_QPminloc5(%arg0: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"})
}
// CHECK-LABEL: func.func @_QPminloc5(
// CHECK: %[[ARG0:.*]]: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"}
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : i32
+// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]] = fir.alloca !fir.logical<4>
// CHECK-NEXT: %[[V2:.*]] = fir.address_of(@_QFminloc5Ea) : !fir.ref<!fir.array<2x2xi32>>
-// CHECK-NEXT: %c2 = arith.constant 2 : index
-// CHECK-NEXT: %c2_0 = arith.constant 2 : index
-// CHECK-NEXT: %[[V3:.*]] = fir.shape %c2, %c2_0 : (index, index) -> !fir.shape<2>
+// CHECK-NEXT: %[[V3:.*]] = fir.shape %[[C2]], %[[C2]] : (index, index) -> !fir.shape<2>
// CHECK-NEXT: %[[V4:.*]]:2 = hlfir.declare %[[V2]](%[[V3]]) {uniq_name = "_QFminloc5Ea"} : (!fir.ref<!fir.array<2x2xi32>>, !fir.shape<2>) -> (!fir.ref<!fir.array<2x2xi32>>, !fir.ref<!fir.array<2x2xi32>>)
-// CHECK-NEXT: %c2_1 = arith.constant 2 : index
-// CHECK-NEXT: %[[V5:.*]] = fir.shape %c2_1 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V5:.*]] = fir.shape %[[C2]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V6:.*]]:2 = hlfir.declare %[[ARG0]](%[[V5]]) {uniq_name = "_QFminloc5Es"} : (!fir.ref<!fir.array<2xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<2xi32>>, !fir.ref<!fir.array<2xi32>>)
-// CHECK-NEXT: %c1_i32 = arith.constant 1 : i32
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
-// CHECK-NEXT: %[[V7:.*]] = fir.shape %c2, %c2_0 : (index, index) -> !fir.shape<2>
+// CHECK-NEXT: %[[V7:.*]] = fir.shape %[[C2]], %[[C2]] : (index, index) -> !fir.shape<2>
// CHECK-NEXT: %[[V8:.*]] = fir.embox %[[V4]]#1(%[[V7]]) : (!fir.ref<!fir.array<2x2xi32>>, !fir.shape<2>) -> !fir.box<!fir.array<2x2xi32>>
-// CHECK-NEXT: %[[V9:.*]] = fir.convert %true : (i1) -> !fir.logical<4>
+// CHECK-NEXT: %[[V9:.*]] = fir.convert %[[TRUE]] : (i1) -> !fir.logical<4>
// CHECK-NEXT: fir.store %[[V9]] to %[[V1]] : !fir.ref<!fir.logical<4>>
// CHECK-NEXT: %[[V10:.*]] = fir.embox %[[V1]] : (!fir.ref<!fir.logical<4>>) -> !fir.box<!fir.logical<4>>
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V11:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V12:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V12:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V13:.*]] = fir.embox %[[V11]](%[[V12]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V13]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V15:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V16:.*]] = fir.convert %[[V8]] : (!fir.box<!fir.array<2x2xi32>>) -> !fir.box<none>
// CHECK: %[[V19:.*]] = fir.convert %[[V10]] : (!fir.box<!fir.logical<4>>) -> !fir.box<none>
-// CHECK-NEXT: %[[V20:.*]] = fir.call @_FortranAMinlocDim(%[[V15]], %[[V16]], %c4_i32, %c1_i32, {{.*}}, {{.*}}, %[[V19]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V20:.*]] = fir.call @_FortranAMinlocDim(%[[V15]], %[[V16]], %[[C4]], %[[C1]], {{.*}}, {{.*}}, %[[V19]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V21:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_2 = arith.constant 0 : index
-// CHECK-NEXT: %[[V22:.*]]:3 = fir.box_dims %[[V21]], %c0_2 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V22:.*]]:3 = fir.box_dims %[[V21]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V23:.*]] = fir.box_addr %[[V21]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V24:.*]] = fir.shape_shift %[[V22]]#0, %[[V22]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V25:.*]]:2 = hlfir.declare %[[V23]](%[[V24]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true_3 = arith.constant true
-// CHECK-NEXT: %[[V26:.*]] = hlfir.as_expr %[[V25]]#0 move %true_3 : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V26:.*]] = hlfir.as_expr %[[V25]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V26]] to %[[V6]]#0 : !hlfir.expr<?xi32>, !fir.ref<!fir.array<2xi32>>
// CHECK-NEXT: hlfir.destroy %[[V26]] : !hlfir.expr<?xi32>
// CHECK-NEXT: return
@@ -258,29 +250,28 @@ func.func @_QPminloc6(%arg0: !fir.box<!fir.array<?x!fir.char<1,?>>> {fir.bindc_n
// CHECK-LABEL: func.func @_QPminloc6(
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x!fir.char<1,?>>> {fir.bindc_name = "a"}
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFminloc6Ea"} : (!fir.box<!fir.array<?x!fir.char<1,?>>>) -> (!fir.box<!fir.array<?x!fir.char<1,?>>>, !fir.box<!fir.array<?x!fir.char<1,?>>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFminloc4Es"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
// CHECK-NEXT: %[[V3:.*]] = fir.absent !fir.box<i1>
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V4:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
-// CHECK-NEXT: %c0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V5:.*]] = fir.shape %c0 : (index) -> !fir.shape<1>
+// CHECK-NEXT: %[[V5:.*]] = fir.shape %[[C0]] : (index) -> !fir.shape<1>
// CHECK-NEXT: %[[V6:.*]] = fir.embox %[[V4]](%[[V5]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-NEXT: fir.store %[[V6]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
// CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?x!fir.char<1,?>>>) -> !fir.box<none>
// CHECK: %[[V12:.*]] = fir.convert %[[V3]] : (!fir.box<i1>) -> !fir.box<none>
-// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMinlocCharacter(%[[V8]], %[[V9]], %c4_i32, {{.*}}, {{.*}}, %[[V12]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMinlocCharacter(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-// CHECK-NEXT: %c0_0 = arith.constant 0 : index
-// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %c0_0 : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+// CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
// CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
// CHECK-NEXT: %[[V17:.*]] = fir.shape_shift %[[V15]]#0, %[[V15]]#1 : (index, index) -> !fir.shapeshift<1>
// CHECK-NEXT: %[[V18:.*]]:2 = hlfir.declare %[[V16]](%[[V17]]) {uniq_name = ".tmp.intrinsic_result"} : (!fir.heap<!fir.array<?xi32>>, !fir.shapeshift<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-// CHECK-NEXT: %true = arith.constant true
-// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %true : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
+// CHECK-NEXT: %[[V19:.*]] = hlfir.as_expr %[[V18]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK-NEXT: hlfir.assign %[[V19]] to %[[V2]]#0 : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
// CHECK-NEXT: hlfir.destroy %[[V19]] : !hlfir.expr<?xi32>
// CHECK-NEXT: return
@@ -304,22 +295,22 @@ func.func @_QPminloc7(%arg0: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"},
// CHECK: %[[ARG2:.*]]: !fir.box<!fir.array<?x!fir.logical<4>>> {fir.bindc_name = "m"}
// CHECK: %[[ARG3:.*]]: !fir.ref<!fir.logical<4>> {fir.bindc_name = "b"}
// CHECK: %[[ARG4:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "s"}
-// CHECK-NEXT: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<i32>>
+// CHECK-DAG: %[[FALSE:.*]] = arith.constant false
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : i32
+// CHECK: %[[V0:.*]] = fir.alloca !fir.box<!fir.heap<i32>>
// CHECK-NEXT: %[[V1:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFFtestEa"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V2:.*]]:2 = hlfir.declare %[[ARG3]] {uniq_name = "_QFFtestEb"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
// CHECK-NEXT: %[[V3:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFFtestEd"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
// CHECK-NEXT: %[[V4:.*]]:2 = hlfir.declare %[[ARG2]] {uniq_name = "_QFFtestEm"} : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> (!fir.box<!fir.array<?x!fir.logical<4>>>, !fir.box<!fir.array<?x!fir.logical<4>>>)
// CHECK-NEXT: %[[V5:.*]]:2 = hlfir.declare %[[ARG4]] {uniq_name = "_QFFtestEs"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
// CHECK-NEXT: %[[V6:.*]] = fir.load %[[V3]]#0 : !fir.ref<i32>
-// CHECK-NEXT: %c4_i32 = arith.constant 4 : i32
-// CHECK-NEXT: %false = arith.constant false
// CHECK-NEXT: %[[V7:.*]] = fir.zero_bits !fir.heap<i32>
// CHECK-NEXT: %[[V8:.*]] = fir.embox %[[V7]] : (!fir.heap<i32>) -> !fir.box<!fir.heap<i32>>
// CHECK-NEXT: fir.store %[[V8]] to %[[V0]] : !fir.ref<!fir.box<!fir.heap<i32>>>
// CHECK: %[[V10:.*]] = fir.convert %[[V0]] : (!fir.ref<!fir.box<!fir.heap<i32>>>) -> !fir.ref<!fir.box<none>>
// CHECK-NEXT: %[[V11:.*]] = fir.convert %[[V1]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.box<none>
// CHECK: %[[V14:.*]] = fir.convert %[[V4]]#1 : (!fir.box<!fir.array<?x!fir.logical<4>>>) -> !fir.box<none>
-// CHECK-NEXT: %[[V15:.*]] = fir.call @_FortranAMinlocDim(%[[V10]], %[[V11]], %c4_i32, %[[V6]], {{.*}}, {{.*}}, %[[V14]], %false) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
+// CHECK-NEXT: %[[V15:.*]] = fir.call @_FortranAMinlocDim(%[[V10]], %[[V11]], %[[C4]], %[[V6]], {{.*}}, {{.*}}, %[[V14]], %[[FALSE]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, i32, !fir.ref<i8>, i32, !fir.box<none>, i1) -> none
// CHECK-NEXT: %[[V16:.*]] = fir.load %[[V0]] : !fir.ref<!fir.box<!fir.heap<i32>>>
// CHECK-NEXT: %[[V17:.*]] = fir.box_addr %[[V16]] : (!fir.box<!fir.heap<i32>>) -> !fir.heap<i32>
// CHECK-NEXT: %[[V18:.*]] = fir.load %[[V17]] : !fir.heap<i32>
diff --git a/flang/test/HLFIR/minval-lowering.fir b/flang/test/HLFIR/minval-lowering.fir
index fc8fe92dc08a..d03dec155230 100644
--- a/flang/test/HLFIR/minval-lowering.fir
+++ b/flang/test/HLFIR/minval-lowering.fir
@@ -37,6 +37,7 @@ func.func @_QPminval2(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a"
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?xi32>>
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>>
// CHECK: %[[ARG2:.*]]: !fir.ref<index>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[ARRAY:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[ARG1]]
// CHECK-DAG: %[[DIM_VAR:.*]]:2 = hlfir.declare %[[ARG2]]
@@ -63,7 +64,6 @@ func.func @_QPminval2(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a"
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// TODO: fix alias analysis in hlfir.assign bufferization
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[ASEXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK: hlfir.assign %[[ASEXPR]] to %[[RES]]#0
// CHECK: hlfir.destroy %[[ASEXPR]]
@@ -151,6 +151,7 @@ func.func @_QPminval5(%arg0: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"})
}
// CHECK-LABEL: func.func @_QPminval5(
// CHECK: %[[ARG0:.*]]: !fir.ref<!fir.array<2xi32>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[RET_BOX:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-DAG: %[[RET_ADDR:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
@@ -161,7 +162,6 @@ func.func @_QPminval5(%arg0: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"})
// CHECK-DAG: %[[RES_VAR:.*]] = hlfir.declare %[[ARG0]](%[[RES_SHAPE:.*]])
// CHECK-DAG: %[[MASK_ALLOC:.*]] = fir.alloca !fir.logical<4>
-// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[MASK_VAL:.*]] = fir.convert %[[TRUE]] : (i1) -> !fir.logical<4>
// CHECK-DAG: fir.store %[[MASK_VAL]] to %[[MASK_ALLOC]] : !fir.ref<!fir.logical<4>>
// CHECK-DAG: %[[MASK_BOX:.*]] = fir.embox %[[MASK_ALLOC]]
@@ -190,6 +190,7 @@ func.func @_QPminval6(%arg0: !fir.box<!fir.array<?x!fir.char<1,?>>> {fir.bindc_n
// CHECK-LABEL: func.func @_QPminval6(
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x!fir.char<1,?>>>
// CHECK: %[[ARG1:.*]]: !fir.boxchar<1>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[ARRAY:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[UNBOXED:.*]]:2 = fir.unboxchar %[[ARG1]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[UNBOXED]]#0 typeparams %[[UNBOXED]]#1
@@ -210,7 +211,6 @@ func.func @_QPminval6(%arg0: !fir.box<!fir.array<?x!fir.char<1,?>>> {fir.bindc_n
// CHECK: %[[BOX_ELESIZE:.*]] = fir.box_elesize %[[RET]]
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]] typeparams %[[BOX_ELESIZE]] {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[ASEXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.boxchar<1>, i1) -> !hlfir.expr<!fir.char<1,?>>
// CHECK: hlfir.assign %[[ASEXPR]] to %[[RES]]#0
// CHECK: hlfir.destroy %[[ASEXPR]]
diff --git a/flang/test/HLFIR/mul_transpose.f90 b/flang/test/HLFIR/mul_transpose.f90
index a4ec704547b9..378ecfe4886a 100644
--- a/flang/test/HLFIR/mul_transpose.f90
+++ b/flang/test/HLFIR/mul_transpose.f90
@@ -35,24 +35,22 @@ endsubroutine
! CHECK-LOWERING: %[[TRANSPOSE_RES_LD:.*]] = fir.load %[[TRANSPOSE_RES_BOX:.*]]
! CHECK-LOWERING: %[[TRANSPOSE_RES_ADDR:.*]] = fir.box_addr %[[TRANSPOSE_RES_LD]]
! CHECK-LOWERING: %[[TRANSPOSE_RES_VAR:.*]]:2 = hlfir.declare %[[TRANSPOSE_RES_ADDR]]({{.*}}) {uniq_name = ".tmp.intrinsic_result"}
-! CHECK-LOWERING: %[[TRUE:.*]] = arith.constant true
-! CHECK-LOWERING: %[[TRANSPOSE_EXPR:.*]] = hlfir.as_expr %[[TRANSPOSE_RES_VAR]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?x?xf32>>, i1) -> !hlfir.expr<?x?xf32>
+! CHECK-LOWERING: %[[TRANSPOSE_EXPR:.*]] = hlfir.as_expr %[[TRANSPOSE_RES_VAR]]#0 move {{.*}} : (!fir.box<!fir.array<?x?xf32>>, i1) -> !hlfir.expr<?x?xf32>
! CHECK-LOWERING: %[[TRANSPOSE_ASSOC:.*]]:3 = hlfir.associate %[[TRANSPOSE_EXPR]]({{.*}}) {adapt.valuebyref}
-! CHECK-LOWERING: (!hlfir.expr<?x?xf32>, !fir.shape<2>) -> (!fir.box<!fir.array<?x?xf32>>, !fir.ref<!fir.array<?x?xf32>>, i1)
+! CHECK-LOWERING: (!hlfir.expr<?x?xf32>, !fir.shape<2>) -> (!fir.ref<!fir.array<1x2xf32>>, !fir.ref<!fir.array<1x2xf32>>, i1)
! CHECK-LOWERING: %[[LHS_BOX:.*]] = fir.embox %[[TRANSPOSE_ASSOC]]#1
! CHECK-LOWERING: %[[B_BOX:.*]] = fir.embox %[[B_DECL]]#1(%{{.*}})
! CHECK-LOWERING: %[[MUL_CONV_RES:.*]] = fir.convert %[[MUL_RES_BOX:.*]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
-! CHECK-LOWERING: %[[LHS_CONV:.*]] = fir.convert %[[LHS_BOX]] : (!fir.box<!fir.array<?x?xf32>>) -> !fir.box<none>
+! CHECK-LOWERING: %[[LHS_CONV:.*]] = fir.convert %[[LHS_BOX]] : (!fir.box<!fir.array<1x2xf32>>) -> !fir.box<none>
! CHECK-LOWERING: %[[B_BOX_CONV:.*]] = fir.convert %[[B_BOX]] : (!fir.box<!fir.array<2x2xf32>>) -> !fir.box<none>
! CHECK-LOWERING: fir.call @_FortranAMatmul(%[[MUL_CONV_RES]], %[[LHS_CONV]], %[[B_BOX_CONV]], %[[LOC_STR2:.*]], %[[LOC_N2:.*]])
! CHECK-LOWERING: %[[MUL_RES_LD:.*]] = fir.load %[[MUL_RES_BOX:.*]]
! CHECK-LOWERING: %[[MUL_RES_ADDR:.*]] = fir.box_addr %[[MUL_RES_LD]]
! CHECK-LOWERING: %[[MUL_RES_VAR:.*]]:2 = hlfir.declare %[[MUL_RES_ADDR]]({{.*}}) {uniq_name = ".tmp.intrinsic_result"}
-! CHECK-LOWERING: %[[TRUE2:.*]] = arith.constant true
-! CHECK-LOWERING: %[[MUL_EXPR:.*]] = hlfir.as_expr %[[MUL_RES_VAR]]#0 move %[[TRUE2]] : (!fir.box<!fir.array<?x?xf32>>, i1) -> !hlfir.expr<?x?xf32>
+! CHECK-LOWERING: %[[MUL_EXPR:.*]] = hlfir.as_expr %[[MUL_RES_VAR]]#0 move {{.*}} : (!fir.box<!fir.array<?x?xf32>>, i1) -> !hlfir.expr<?x?xf32>
-! CHECK-LOWERING: hlfir.end_associate %[[TRANSPOSE_ASSOC]]#1, %[[TRANSPOSE_ASSOC]]#2 : !fir.ref<!fir.array<?x?xf32>>, i1
+! CHECK-LOWERING: hlfir.end_associate %[[TRANSPOSE_ASSOC]]#1, %[[TRANSPOSE_ASSOC]]#2 : !fir.ref<!fir.array<1x2xf32>>, i1
! CHECK-LOWERING-NEXT: hlfir.assign %[[MUL_EXPR]] to %[[RES_DECL]]#0 : !hlfir.expr<?x?xf32>, !fir.ref<!fir.array<1x2xf32>>
! CHECK-LOWERING-NEXT: hlfir.destroy %[[MUL_EXPR]]
! CHECK-LOWERING-NEXT: hlfir.destroy %[[TRANSPOSE_EXPR]]
@@ -66,8 +64,7 @@ endsubroutine
! CHECK-LOWERING-OPT: %[[MUL_RES_LD:.*]] = fir.load %[[MUL_RES_BOX:.*]]
! CHECK-LOWERING-OPT: %[[MUL_RES_ADDR:.*]] = fir.box_addr %[[MUL_RES_LD]]
! CHECK-LOWERING-OPT: %[[MUL_RES_VAR:.*]]:2 = hlfir.declare %[[MUL_RES_ADDR]]({{.*}}) {uniq_name = ".tmp.intrinsic_result"}
-! CHECK-LOWERING-OPT: %[[TRUE2:.*]] = arith.constant true
-! CHECK-LOWERING-OPT: %[[MUL_EXPR:.*]] = hlfir.as_expr %[[MUL_RES_VAR]]#0 move %[[TRUE2]] : (!fir.box<!fir.array<?x?xf32>>, i1) -> !hlfir.expr<?x?xf32>
+! CHECK-LOWERING-OPT: %[[MUL_EXPR:.*]] = hlfir.as_expr %[[MUL_RES_VAR]]#0 move {{.*}} : (!fir.box<!fir.array<?x?xf32>>, i1) -> !hlfir.expr<?x?xf32>
! CHECK-LOWERING-OPT: hlfir.assign %[[MUL_EXPR]] to %[[RES_DECL]]#0 : !hlfir.expr<?x?xf32>, !fir.ref<!fir.array<1x2xf32>>
! CHECK-LOWERING-OPT: hlfir.destroy %[[MUL_EXPR]]
@@ -76,25 +73,23 @@ endsubroutine
! CHECK-BUFFERING: %[[TRANSPOSE_RES_LD:.*]] = fir.load %[[TRANSPOSE_RES_BOX:.*]]
! CHECK-BUFFERING: %[[TRANSPOSE_RES_ADDR:.*]] = fir.box_addr %[[TRANSPOSE_RES_LD]]
! CHECK-BUFFERING: %[[TRANSPOSE_RES_VAR:.*]]:2 = hlfir.declare %[[TRANSPOSE_RES_ADDR]]({{.*}}) {uniq_name = ".tmp.intrinsic_result"}
-! CHECK-BUFFERING: %[[TRUE:.*]] = arith.constant true
! CHECK-BUFFERING: %[[TUPLE0:.*]] = fir.undefined tuple<!fir.box<!fir.array<?x?xf32>>, i1>
-! CHECK-BUFFERING: %[[TUPLE1:.*]] = fir.insert_value %[[TUPLE0]], %[[TRUE]], [1 : index]
+! CHECK-BUFFERING: %[[TUPLE1:.*]] = fir.insert_value %[[TUPLE0]], {{.*}}, [1 : index]
! CHECK-BUFFERING: %[[TUPLE2:.*]] = fir.insert_value %[[TUPLE1]], %[[TRANSPOSE_RES_VAR]]#0, [0 : index]
-! CHECK-BUFFERING: %[[TRANSPOSE_RES_REF:.*]] = fir.convert %[[TRANSPOSE_RES_VAR]]#1 : (!fir.heap<!fir.array<?x?xf32>>) -> !fir.ref<!fir.array<?x?xf32>>
+! CHECK-BUFFERING: %[[TRANSPOSE_RES_REF:.*]] = fir.convert %[[TRANSPOSE_RES_VAR]]#1 : (!fir.heap<!fir.array<?x?xf32>>) -> !fir.ref<!fir.array<1x2xf32>>
! CHECK-BUFFERING: %[[TRANSPOSE_RES_BOX:.*]] = fir.embox %[[TRANSPOSE_RES_REF]]({{.*}})
-! CHECK-BUFFERING: %[[LHS_CONV:.*]] = fir.convert %[[TRANSPOSE_RES_BOX]] : (!fir.box<!fir.array<?x?xf32>>) -> !fir.box<none>
+! CHECK-BUFFERING: %[[LHS_CONV:.*]] = fir.convert %[[TRANSPOSE_RES_BOX]] : (!fir.box<!fir.array<1x2xf32>>) -> !fir.box<none>
! [argument handling unchanged]
! CHECK-BUFFERING: fir.call @_FortranAMatmul(
! CHECK-BUFFERING: %[[MUL_RES_LD:.*]] = fir.load %[[MUL_RES_BOX:.*]]
! CHECK-BUFFERING: %[[MUL_RES_ADDR:.*]] = fir.box_addr %[[MUL_RES_LD]]
! CHECK-BUFFERING: %[[MUL_RES_VAR:.*]]:2 = hlfir.declare %[[MUL_RES_ADDR]]({{.*}}) {uniq_name = ".tmp.intrinsic_result"}
-! CHECK-BUFFERING: %[[TRUE2:.*]] = arith.constant true
! CHECK-BUFFERING: %[[TUPLE3:.*]] = fir.undefined tuple<!fir.box<!fir.array<?x?xf32>>, i1>
-! CHECK-BUFFERING: %[[TUPLE4:.*]] = fir.insert_value %[[TUPLE3]], %[[TRUE2]], [1 : index]
+! CHECK-BUFFERING: %[[TUPLE4:.*]] = fir.insert_value %[[TUPLE3]], {{.*}}, [1 : index]
! CHECK-BUFFERING: %[[TUPLE5:.*]] = fir.insert_value %[[TUPLE4]], %[[MUL_RES_VAR]]#0, [0 : index]
-! CHECK-BUFFERING: %[[TRANSPOSE_RES_HEAP:.*]] = fir.convert %[[TRANSPOSE_RES_REF]] : (!fir.ref<!fir.array<?x?xf32>>) -> !fir.heap<!fir.array<?x?xf32>>
+! CHECK-BUFFERING: %[[TRANSPOSE_RES_HEAP:.*]] = fir.convert %[[TRANSPOSE_RES_REF]] : (!fir.ref<!fir.array<1x2xf32>>) -> !fir.heap<!fir.array<1x2xf32>>
! CHECK-BUFFERING-NEXT: fir.freemem %[[TRANSPOSE_RES_HEAP]]
! CHECK-BUFFERING-NEXT: hlfir.assign %[[MUL_RES_VAR]]#0 to %[[RES_DECL]]#0 : !fir.box<!fir.array<?x?xf32>>, !fir.ref<!fir.array<1x2xf32>>
! CHECK-BUFFERING-NEXT: %[[MUL_RES_HEAP:.*]] = fir.box_addr %[[MUL_RES_VAR]]#0 : (!fir.box<!fir.array<?x?xf32>>) -> !fir.heap<!fir.array<?x?xf32>>
diff --git a/flang/test/HLFIR/product-lowering.fir b/flang/test/HLFIR/product-lowering.fir
index 337b5fc3d73d..dd3506937cac 100644
--- a/flang/test/HLFIR/product-lowering.fir
+++ b/flang/test/HLFIR/product-lowering.fir
@@ -39,6 +39,7 @@ func.func @_QPproduct2(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?xi32>>
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>>
// CHECK: %[[ARG2:.*]]: !fir.ref<index>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[ARRAY:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[DIM_VAR:.*]]:2 = hlfir.declare %[[ARG2]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[ARG1]]
@@ -64,7 +65,6 @@ func.func @_QPproduct2(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a
// CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]]
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[EXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK: hlfir.assign %[[EXPR]] to %[[RES]]#0
// CHECK: hlfir.destroy %[[EXPR]]
@@ -141,6 +141,7 @@ func.func @_QPproduct5(%arg0: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"}
// CHECK-LABEL: func.func @_QPproduct5(
// CHECK: %[[ARG0:.*]]: !fir.ref<!fir.array<2xi32>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[RET_BOX:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-DAG: %[[RET_ADDR:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
@@ -151,7 +152,6 @@ func.func @_QPproduct5(%arg0: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"}
// CHECK-DAG: %[[RES_VAR:.*]] = hlfir.declare %[[ARG0]](%[[RES_SHAPE:.*]])
// CHECK-DAG: %[[MASK_ALLOC:.*]] = fir.alloca !fir.logical<4>
-// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[MASK_VAL:.*]] = fir.convert %[[TRUE]] : (i1) -> !fir.logical<4>
// CHECK-DAG: fir.store %[[MASK_VAL]] to %[[MASK_ALLOC]] : !fir.ref<!fir.logical<4>>
// CHECK-DAG: %[[MASK_BOX:.*]] = fir.embox %[[MASK_ALLOC]]
diff --git a/flang/test/HLFIR/sum-lowering.fir b/flang/test/HLFIR/sum-lowering.fir
index e33b9bc028ae..d4a79d278acc 100644
--- a/flang/test/HLFIR/sum-lowering.fir
+++ b/flang/test/HLFIR/sum-lowering.fir
@@ -37,6 +37,7 @@ func.func @_QPsum2(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a"},
// CHECK: %[[ARG0:.*]]: !fir.box<!fir.array<?x?xi32>>
// CHECK: %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>>
// CHECK: %[[ARG2:.*]]: !fir.ref<index>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[ARRAY:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[RES:.*]]:2 = hlfir.declare %[[ARG1]]
// CHECK-DAG: %[[DIM_VAR:.*]]:2 = hlfir.declare %[[ARG2]]
@@ -63,7 +64,6 @@ func.func @_QPsum2(%arg0: !fir.box<!fir.array<?x?xi32>> {fir.bindc_name = "a"},
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// TODO: fix alias analysis in hlfir.assign bufferization
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[ASEXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?xi32>>, i1) -> !hlfir.expr<?xi32>
// CHECK: hlfir.assign %[[ASEXPR]] to %[[RES]]#0
// CHECK: hlfir.destroy %[[ASEXPR]]
@@ -151,6 +151,7 @@ func.func @_QPsum5(%arg0: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"}) {
}
// CHECK-LABEL: func.func @_QPsum5(
// CHECK: %[[ARG0:.*]]: !fir.ref<!fir.array<2xi32>>
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[RET_BOX:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>>
// CHECK-DAG: %[[RET_ADDR:.*]] = fir.zero_bits !fir.heap<!fir.array<?xi32>>
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
@@ -161,7 +162,6 @@ func.func @_QPsum5(%arg0: !fir.ref<!fir.array<2xi32>> {fir.bindc_name = "s"}) {
// CHECK-DAG: %[[RES_VAR:.*]] = hlfir.declare %[[ARG0]](%[[RES_SHAPE:.*]])
// CHECK-DAG: %[[MASK_ALLOC:.*]] = fir.alloca !fir.logical<4>
-// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[MASK_VAL:.*]] = fir.convert %[[TRUE]] : (i1) -> !fir.logical<4>
// CHECK-DAG: fir.store %[[MASK_VAL]] to %[[MASK_ALLOC]] : !fir.ref<!fir.logical<4>>
// CHECK-DAG: %[[MASK_BOX:.*]] = fir.embox %[[MASK_ALLOC]]
diff --git a/flang/test/HLFIR/transpose-lowering.fir b/flang/test/HLFIR/transpose-lowering.fir
index 733e6f1a6109..9afe8a058b8b 100644
--- a/flang/test/HLFIR/transpose-lowering.fir
+++ b/flang/test/HLFIR/transpose-lowering.fir
@@ -18,6 +18,7 @@ func.func @_QPtranspose1(%arg0: !fir.ref<!fir.array<1x2xi32>> {fir.bindc_name =
// CHECK-LABEL: func.func @_QPtranspose1(
// CHECK: %[[ARG0:.*]]: !fir.ref<!fir.array<1x2xi32>> {fir.bindc_name = "m"}
// CHECK: %[[ARG1:.*]]: !fir.ref<!fir.array<2x1xi32>> {fir.bindc_name = "res"}
+// CHECK-DAG: %[[TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[M_VAR:.*]]:2 = hlfir.declare %[[ARG0]]
// CHECK-DAG: %[[RES_VAR:.*]]:2 = hlfir.declare %[[ARG1]]
@@ -40,7 +41,6 @@ func.func @_QPtranspose1(%arg0: !fir.ref<!fir.array<1x2xi32>> {fir.bindc_name =
// CHECK-NEXT: %[[SHIFT:.*]] = fir.shape_shift %[[BOX_DIMS]]#0, %[[BOX_DIMS]]#1
// TODO: fix alias analysis in hlfir.assign bufferization
// CHECK-NEXT: %[[TMP:.*]]:2 = hlfir.declare %[[ADDR]](%[[SHIFT]]) {uniq_name = ".tmp.intrinsic_result"}
-// CHECK: %[[TRUE:.*]] = arith.constant true
// CHECK: %[[ASEXPR:.*]] = hlfir.as_expr %[[TMP]]#0 move %[[TRUE]] : (!fir.box<!fir.array<?x?xi32>>, i1) -> !hlfir.expr<?x?xi32>
// CHECK: hlfir.assign %[[ASEXPR]] to %[[RES_VAR]]#0
// CHECK: hlfir.destroy %[[ASEXPR]]
diff --git a/flang/test/Integration/OpenMP/copyprivate.f90 b/flang/test/Integration/OpenMP/copyprivate.f90
new file mode 100644
index 000000000000..d32319a18c28
--- /dev/null
+++ b/flang/test/Integration/OpenMP/copyprivate.f90
@@ -0,0 +1,97 @@
+!===----------------------------------------------------------------------===!
+! This directory can be used to add Integration tests involving multiple
+! stages of the compiler (for eg. from Fortran to LLVM IR). It should not
+! contain executable tests. We should only add tests here sparingly and only
+! if there is no other way to test. Repeat this message in each test that is
+! added to this directory and sub-directories.
+!===----------------------------------------------------------------------===!
+
+!RUN: %flang_fc1 -emit-llvm -fopenmp %s -o - | FileCheck %s
+
+!CHECK-DAG: define void @_copy_box_Uxi32(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_10xi32(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_i64(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_box_Uxi64(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_f32(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_2x3xf32(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_z32(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_10xz32(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_l32(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_5xl32(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_c8x8(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_10xc8x8(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_c16x5(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_rec__QFtest_typesTdt(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_box_heap_Uxi32(ptr %{{.*}}, ptr %{{.*}})
+!CHECK-DAG: define void @_copy_box_ptr_Uxc8x9(ptr %{{.*}}, ptr %{{.*}})
+
+!CHECK-LABEL: define void @_copy_i32(
+!CHECK-SAME: ptr %[[DST:.*]], ptr %[[SRC:.*]]){{.*}} {
+!CHECK-NEXT: %[[SRC_VAL:.*]] = load i32, ptr %[[SRC]]
+!CHECK-NEXT: store i32 %[[SRC_VAL]], ptr %[[DST]]
+!CHECK-NEXT: ret void
+!CHECK-NEXT: }
+
+!CHECK-LABEL: define internal void @test_scalar_..omp_par({{.*}})
+!CHECK: %[[I:.*]] = alloca i32, i64 1
+!CHECK: %[[J:.*]] = alloca i32, i64 1
+!CHECK: %[[DID_IT:.*]] = alloca i32
+!CHECK: store i32 0, ptr %[[DID_IT]]
+!CHECK: %[[THREAD_NUM1:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[LOC:.*]])
+!CHECK: %[[RET:.*]] = call i32 @__kmpc_single({{.*}})
+!CHECK: %[[NOT_ZERO:.*]] = icmp ne i32 %[[RET]], 0
+!CHECK: br i1 %[[NOT_ZERO]], label %[[OMP_REGION_BODY:.*]], label %[[OMP_REGION_END:.*]]
+
+!CHECK: [[OMP_REGION_END]]:
+!CHECK: %[[THREAD_NUM2:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[LOC:.*]])
+!CHECK: %[[DID_IT_VAL:.*]] = load i32, ptr %[[DID_IT]]
+!CHECK: call void @__kmpc_copyprivate(ptr @[[LOC]], i32 %[[THREAD_NUM2]], i64 0, ptr %[[I]], ptr @_copy_i32, i32 %[[DID_IT_VAL]])
+!CHECK: %[[THREAD_NUM3:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[LOC]])
+!CHECK: %[[DID_IT_VAL2:.*]] = load i32, ptr %[[DID_IT]]
+!CHECK: call void @__kmpc_copyprivate(ptr @[[LOC]], i32 %[[THREAD_NUM3]], i64 0, ptr %[[J]], ptr @_copy_i32, i32 %[[DID_IT_VAL2]])
+
+!CHECK: [[OMP_REGION_BODY]]:
+!CHECK: br label %[[OMP_SINGLE_REGION:.*]]
+!CHECK: [[OMP_SINGLE_REGION]]:
+!CHECK: store i32 11, ptr %[[I]]
+!CHECK: store i32 22, ptr %[[J]]
+!CHECK: br label %[[OMP_REGION_CONT3:.*]]
+!CHECK: [[OMP_REGION_CONT3:.*]]:
+!CHECK: store i32 1, ptr %[[DID_IT]]
+!CHECK: call void @__kmpc_end_single(ptr @[[LOC]], i32 %[[THREAD_NUM1]])
+!CHECK: br label %[[OMP_REGION_END]]
+subroutine test_scalar()
+ integer :: i, j
+
+ !$omp parallel private(i, j)
+ !$omp single
+ i = 11
+ j = 22
+ !$omp end single copyprivate(i, j)
+ !$omp end parallel
+end subroutine
+
+subroutine test_types(a, n)
+ integer :: a(:), n
+ integer(4) :: i4, i4a(10)
+ integer(8) :: i8, i8a(n)
+ real :: r, ra(2, 3)
+ complex :: z, za(10)
+ logical :: l, la(5)
+ character(kind=1, len=8) :: c1, c1a(10)
+ character(kind=2, len=5) :: c2
+
+ type dt
+ integer :: i
+ real :: r
+ end type
+ type(dt) :: t
+
+ integer, allocatable :: aloc(:)
+ character(kind=1, len=9), pointer :: ptr(:)
+
+ !$omp parallel private(a, i4, i4a, i8, i8a, r, ra, z, za, l, la, c1, c1a, c2, t, aloc, ptr)
+ !$omp single
+ !$omp end single copyprivate(a, i4, i4a, i8, i8a, r, ra, z, za, l, la, c1, c1a, c2, t, aloc, ptr)
+ !$omp end parallel
+end subroutine
diff --git a/flang/test/Lower/HLFIR/allocatable-end-of-scope-dealloc.f90 b/flang/test/Lower/HLFIR/allocatable-end-of-scope-dealloc.f90
index ad4b015ef944..05cae6e5ba6c 100644
--- a/flang/test/Lower/HLFIR/allocatable-end-of-scope-dealloc.f90
+++ b/flang/test/Lower/HLFIR/allocatable-end-of-scope-dealloc.f90
@@ -224,7 +224,7 @@ contains
allocate(x)
end subroutine
end subroutine
-! CHECK-LABEL: func.func @_QFno_dealloc_host_assocPinternal
+! CHECK-LABEL: func.func private @_QFno_dealloc_host_assocPinternal
! CHECK-NOT: freemem
! CHECK-NOT: Deallocate
! CHECK: return
diff --git a/flang/test/Lower/HLFIR/bindc_internal_proc.f90 b/flang/test/Lower/HLFIR/bindc_internal_proc.f90
index 027c94f95a32..00e24c7016f1 100644
--- a/flang/test/Lower/HLFIR/bindc_internal_proc.f90
+++ b/flang/test/Lower/HLFIR/bindc_internal_proc.f90
@@ -3,7 +3,7 @@
! internal procedures.
! RUN: bbc -emit-hlfir %s -o - | FileCheck %s
-!CHECK: func.func @_QFsub1Pfoo(%{{.*}}: i32
+!CHECK: func.func private @_QFsub1Pfoo(%{{.*}}: i32
subroutine sub1()
call foo(42)
contains
@@ -13,7 +13,7 @@ contains
end subroutine
end subroutine
-!CHECK: func.func @_QFsub2Pfoo(%{{.*}}: i64
+!CHECK: func.func private @_QFsub2Pfoo(%{{.*}}: i64
subroutine sub2()
call foo(42_8)
contains
diff --git a/flang/test/Lower/HLFIR/internal-procedures-2.f90 b/flang/test/Lower/HLFIR/internal-procedures-2.f90
index bb05545bef1a..f1c4780954b2 100644
--- a/flang/test/Lower/HLFIR/internal-procedures-2.f90
+++ b/flang/test/Lower/HLFIR/internal-procedures-2.f90
@@ -23,7 +23,7 @@ contains
end forall
end subroutine
end subroutine
-! CHECK-LABEL: func.func @_QFhost_procedurePinternal_procedure(
+! CHECK-LABEL: func.func private @_QFhost_procedurePinternal_procedure(
! CHECK: fir.address_of(@_QMmodule_used_by_hostEindexed_by_var) : !fir.ref<!fir.array<2xi32>>
! CHECK: fir.address_of(@_QMmodule_used_by_hostEref_in_forall) : !fir.ref<!fir.array<2xi32>>
! CHECK: fir.address_of(@_QMmodule_used_by_hostEref_in_implied_do) : !fir.ref<i32>
diff --git a/flang/test/Lower/HLFIR/internal-procedures-polymorphic.f90 b/flang/test/Lower/HLFIR/internal-procedures-polymorphic.f90
new file mode 100644
index 000000000000..8645488290d7
--- /dev/null
+++ b/flang/test/Lower/HLFIR/internal-procedures-polymorphic.f90
@@ -0,0 +1,81 @@
+! Test lowering of internal procedure capturing OPTIONAL polymorphic
+! objects.
+! RUN: bbc -emit-hlfir --polymorphic-type -o - %s -I nw | FileCheck %s
+
+
+module captured_optional_polymorphic
+ type sometype
+ end type
+contains
+subroutine test(x, y)
+ class(sometype), optional :: x
+ class(sometype), optional :: y(2:)
+ call internal()
+contains
+ subroutine internal()
+ if (present(x).and.present(y)) then
+ print *, same_type_as(x, y)
+ end if
+ end subroutine
+end
+end module
+
+! CHECK-LABEL: func.func @_QMcaptured_optional_polymorphicPtest(
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare{{.*}}Ex
+! CHECK: %[[VAL_3:.*]] = arith.constant 2 : i64
+! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (i64) -> index
+! CHECK: %[[VAL_5:.*]] = fir.shift %[[VAL_4]] : (index) -> !fir.shift<1>
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare{{.*}}Ey
+! CHECK: %[[VAL_7:.*]] = fir.alloca tuple<!fir.class<!fir.type<_QMcaptured_optional_polymorphicTsometype>>, !fir.class<!fir.array<?x!fir.type<_QMcaptured_optional_polymorphicTsometype>>>>
+! CHECK: %[[VAL_8:.*]] = arith.constant 0 : i32
+! CHECK: %[[VAL_9:.*]] = fir.coordinate_of %[[VAL_7]], %[[VAL_8]]
+! CHECK: %[[VAL_10:.*]] = fir.is_present %[[VAL_2]]#1 : (!fir.class<!fir.type<_QMcaptured_optional_polymorphicTsometype>>) -> i1
+! CHECK: fir.if %[[VAL_10]] {
+! CHECK: fir.store %[[VAL_2]]#1 to %[[VAL_9]] : !fir.ref<!fir.class<!fir.type<_QMcaptured_optional_polymorphicTsometype>>>
+! CHECK: } else {
+! CHECK: %[[VAL_11:.*]] = fir.zero_bits !fir.ref<!fir.type<_QMcaptured_optional_polymorphicTsometype>>
+! CHECK: %[[VAL_12:.*]] = fir.embox %[[VAL_11]] : (!fir.ref<!fir.type<_QMcaptured_optional_polymorphicTsometype>>) -> !fir.class<!fir.type<_QMcaptured_optional_polymorphicTsometype>>
+! CHECK: fir.store %[[VAL_12]] to %[[VAL_9]] : !fir.ref<!fir.class<!fir.type<_QMcaptured_optional_polymorphicTsometype>>>
+! CHECK: }
+! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_14:.*]] = fir.coordinate_of %[[VAL_7]], %[[VAL_13]]
+! CHECK: %[[VAL_15:.*]] = fir.is_present %[[VAL_6]]#1 : (!fir.class<!fir.array<?x!fir.type<_QMcaptured_optional_polymorphicTsometype>>>) -> i1
+! CHECK: fir.if %[[VAL_15]] {
+! CHECK: %[[VAL_16:.*]] = fir.shift %[[VAL_4]] : (index) -> !fir.shift<1>
+! CHECK: %[[VAL_17:.*]] = fir.rebox %[[VAL_6]]#1(%[[VAL_16]]) : (!fir.class<!fir.array<?x!fir.type<_QMcaptured_optional_polymorphicTsometype>>>, !fir.shift<1>) -> !fir.class<!fir.array<?x!fir.type<_QMcaptured_optional_polymorphicTsometype>>>
+! CHECK: fir.store %[[VAL_17]] to %[[VAL_14]] : !fir.ref<!fir.class<!fir.array<?x!fir.type<_QMcaptured_optional_polymorphicTsometype>>>>
+! CHECK: } else {
+! CHECK: %[[VAL_18:.*]] = fir.type_desc !fir.type<_QMcaptured_optional_polymorphicTsometype>
+! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_14]] : (!fir.ref<!fir.class<!fir.array<?x!fir.type<_QMcaptured_optional_polymorphicTsometype>>>>) -> !fir.ref<!fir.box<none>>
+! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_18]] : (!fir.tdesc<!fir.type<_QMcaptured_optional_polymorphicTsometype>>) -> !fir.ref<none>
+! CHECK: %[[VAL_21:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_22:.*]] = arith.constant 0 : i32
+! CHECK: %[[VAL_23:.*]] = fir.call @_FortranAPointerNullifyDerived(%[[VAL_19]], %[[VAL_20]], %[[VAL_21]], %[[VAL_22]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.ref<none>, i32, i32) -> none
+! CHECK: }
+! CHECK: fir.call @_QMcaptured_optional_polymorphicFtestPinternal(%[[VAL_7]])
+
+! CHECK-LABEL: func.func{{.*}} @_QMcaptured_optional_polymorphicFtestPinternal(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<{{.*}}>>
+! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
+! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]]
+! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.ref<!fir.class<!fir.type<_QMcaptured_optional_polymorphicTsometype>>>
+! CHECK: %[[VAL_4:.*]] = fir.box_addr %[[VAL_3]] : (!fir.class<!fir.type<_QMcaptured_optional_polymorphicTsometype>>) -> !fir.ref<!fir.type<_QMcaptured_optional_polymorphicTsometype>>
+! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (!fir.ref<!fir.type<_QMcaptured_optional_polymorphicTsometype>>) -> i64
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_7:.*]] = arith.cmpi ne, %[[VAL_5]], %[[VAL_6]] : i64
+! CHECK: %[[VAL_8:.*]] = fir.absent !fir.class<!fir.type<_QMcaptured_optional_polymorphicTsometype>>
+! CHECK: %[[VAL_9:.*]] = arith.select %[[VAL_7]], %[[VAL_3]], %[[VAL_8]] : !fir.class<!fir.type<_QMcaptured_optional_polymorphicTsometype>>
+! CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_9]] {fortran_attrs = #fir.var_attrs<optional, host_assoc>, {{.*}}Ex
+! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_12:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_11]]
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]] : !fir.ref<!fir.class<!fir.array<?x!fir.type<_QMcaptured_optional_polymorphicTsometype>>>>
+! CHECK: %[[VAL_14:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_15:.*]]:3 = fir.box_dims %[[VAL_13]], %[[VAL_14]]
+! CHECK: %[[VAL_16:.*]] = fir.box_addr %[[VAL_13]]
+! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (!fir.ref<!fir.array<?x!fir.type<_QMcaptured_optional_polymorphicTsometype>>>) -> i64
+! CHECK: %[[VAL_18:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_19:.*]] = arith.cmpi ne, %[[VAL_17]], %[[VAL_18]] : i64
+! CHECK: %[[VAL_20:.*]] = fir.absent !fir.class<!fir.array<?x!fir.type<_QMcaptured_optional_polymorphicTsometype>>>
+! CHECK: %[[VAL_21:.*]] = arith.select %[[VAL_19]], %[[VAL_13]], %[[VAL_20]] : !fir.class<!fir.array<?x!fir.type<_QMcaptured_optional_polymorphicTsometype>>>
+! CHECK: %[[VAL_22:.*]] = fir.shift %[[VAL_15]]#0 : (index) -> !fir.shift<1>
+! CHECK: %[[VAL_23:.*]]:2 = hlfir.declare %[[VAL_21]](%[[VAL_22]]) {fortran_attrs = #fir.var_attrs<optional, host_assoc>, {{.*}}Ey
diff --git a/flang/test/Lower/HLFIR/internal-procedures.f90 b/flang/test/Lower/HLFIR/internal-procedures.f90
index d517cb4345af..c898903b6fbe 100644
--- a/flang/test/Lower/HLFIR/internal-procedures.f90
+++ b/flang/test/Lower/HLFIR/internal-procedures.f90
@@ -9,8 +9,8 @@ subroutine internal
call takes_array(x)
end subroutine
end subroutine
-! CHECK-LABEL: func.func @_QFtest_explicit_shape_arrayPinternal(
-! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.box<!fir.array<?xf32>>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func.func private @_QFtest_explicit_shape_arrayPinternal(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.box<!fir.array<?xf32>>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]] : (!fir.ref<tuple<!fir.box<!fir.array<?xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<?xf32>>>
! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.ref<!fir.box<!fir.array<?xf32>>>
@@ -27,8 +27,8 @@ subroutine internal
call takes_array(x)
end subroutine
end subroutine
-! CHECK-LABEL: func.func @_QFtest_assumed_shapePinternal(
-! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.box<!fir.array<?xf32>>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func.func private @_QFtest_assumed_shapePinternal(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.box<!fir.array<?xf32>>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]] : (!fir.ref<tuple<!fir.box<!fir.array<?xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<?xf32>>>
! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.ref<!fir.box<!fir.array<?xf32>>>
@@ -44,8 +44,8 @@ subroutine internal()
call bar(c)
end subroutine
end subroutine
-! CHECK-LABEL: func.func @_QFtest_scalar_charPinternal(
-! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.boxchar<1>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func.func private @_QFtest_scalar_charPinternal(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.boxchar<1>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]] : (!fir.ref<tuple<!fir.boxchar<1>>>, i32) -> !fir.ref<!fir.boxchar<1>>
! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.ref<!fir.boxchar<1>>
diff --git a/flang/test/Lower/Intrinsics/norm2.f90 b/flang/test/Lower/Intrinsics/norm2.f90
index f14cad59d5bd..0d125e36f665 100644
--- a/flang/test/Lower/Intrinsics/norm2.f90
+++ b/flang/test/Lower/Intrinsics/norm2.f90
@@ -76,3 +76,19 @@ subroutine norm2_test_dim_3(a,r)
! CHECK-DAG: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>) -> !fir.heap<!fir.array<?x?xf32>>
! CHECK-DAG: fir.freemem %[[addr]]
end subroutine norm2_test_dim_3
+
+! CHECK-LABEL: func @_QPnorm2_test_real16(
+! CHECK-SAME: %[[arg0:.*]]: !fir.box<!fir.array<?x?x?xf128>>{{.*}}, %[[arg1:.*]]: !fir.box<!fir.array<?x?xf128>>{{.*}})
+subroutine norm2_test_real16(a,r)
+ real(16) :: a(:,:,:)
+ real(16) :: r(:,:)
+ ! CHECK-DAG: %[[dim:.*]] = arith.constant 3 : i32
+ ! CHECK-DAG: %[[r:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x?xf128>>>
+ ! CHECK-DAG: %[[res:.*]] = fir.convert %[[r]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf128>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: %[[arr:.*]] = fir.convert %[[arg0]] : (!fir.box<!fir.array<?x?x?xf128>>) -> !fir.box<none>
+ r = norm2(a,dim=3)
+ ! CHECK: %{{.*}} = fir.call @_FortranANorm2DimReal16(%[[res]], %[[arr]], %[[dim]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32) -> none
+ ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf128>>>>
+ ! CHECK-DAG: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box<!fir.heap<!fir.array<?x?xf128>>>) -> !fir.heap<!fir.array<?x?xf128>>
+ ! CHECK-DAG: fir.freemem %[[addr]]
+end subroutine norm2_test_real16
diff --git a/flang/test/Lower/Intrinsics/random.f90 b/flang/test/Lower/Intrinsics/random.f90
index ca194befd027..4fb1a9a5da27 100644
--- a/flang/test/Lower/Intrinsics/random.f90
+++ b/flang/test/Lower/Intrinsics/random.f90
@@ -45,7 +45,7 @@ subroutine random_test_2
call foo(size)
call bar(size, get)
contains
- ! CHECK-LABEL: func @_QFrandom_test_2Pfoo
+ ! CHECK-LABEL: func private @_QFrandom_test_2Pfoo
subroutine foo(size, put, get)
! CHECK: [[s1:%[0-9]+]] = fir.is_present %arg0
! CHECK: [[s2:%[0-9]+]] = fir.embox %arg0
@@ -70,7 +70,7 @@ contains
print*, size
end subroutine
- ! CHECK-LABEL: func @_QFrandom_test_2Pbar
+ ! CHECK-LABEL: func private @_QFrandom_test_2Pbar
subroutine bar(size, get, put)
integer, optional :: size
! CHECK: [[p1:%[0-9]+]] = fir.is_present %arg2
diff --git a/flang/test/Lower/Intrinsics/ubound01.f90 b/flang/test/Lower/Intrinsics/ubound01.f90
index 4ebe3870cf0b..df51d79eb6af 100644
--- a/flang/test/Lower/Intrinsics/ubound01.f90
+++ b/flang/test/Lower/Intrinsics/ubound01.f90
@@ -16,7 +16,7 @@ contains
End Subroutine
end
-! CHECK-LABEL: func.func @_QFPs2
+! CHECK-LABEL: func.func private @_QFPs2
! CHECK-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?x?xf32>>
! CHECK: %[[BOX:.*]] = fir.rebox %[[ARG0]](%{{.*}}) : (!fir.box<!fir.array<?x?xf32>>, !fir.shift<2>) -> !fir.box<!fir.array<?x?xf32>>
! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[BOX]] : (!fir.box<!fir.array<?x?xf32>>) -> !fir.box<none>
diff --git a/flang/test/Lower/OpenACC/acc-routine04.f90 b/flang/test/Lower/OpenACC/acc-routine04.f90
index b5f5aa2ca488..2339c23eaaf8 100644
--- a/flang/test/Lower/OpenACC/acc-routine04.f90
+++ b/flang/test/Lower/OpenACC/acc-routine04.f90
@@ -31,4 +31,4 @@ end program
! CHECK: acc.routine @acc_routine_0 func(@_QMdummy_modPsub1) seq
! CHECK: func.func @_QMdummy_modPsub1(%arg0: !fir.ref<i32> {fir.bindc_name = "i"}) attributes {acc.routine_info = #acc.routine_info<[@acc_routine_0]>}
! CHECK: func.func @_QQmain() attributes {fir.bindc_name = "test_acc_routine"}
-! CHECK: func.func @_QFPsub2() attributes {acc.routine_info = #acc.routine_info<[@acc_routine_1]>}
+! CHECK: func.func private @_QFPsub2() attributes {acc.routine_info = #acc.routine_info<[@acc_routine_1]>, llvm.linkage = #llvm.linkage<internal>}
diff --git a/flang/test/Lower/OpenMP/FIR/delayed-privatization-firstprivate.f90 b/flang/test/Lower/OpenMP/FIR/delayed-privatization-firstprivate.f90
new file mode 100644
index 000000000000..122542345f10
--- /dev/null
+++ b/flang/test/Lower/OpenMP/FIR/delayed-privatization-firstprivate.f90
@@ -0,0 +1,29 @@
+! Test delayed privatization for the `private` clause.
+
+! RUN: bbc -emit-fir -hlfir=false -fopenmp --openmp-enable-delayed-privatization -o - %s 2>&1 | FileCheck %s
+
+subroutine delayed_privatization_firstprivate
+ implicit none
+ integer :: var1
+
+!$OMP PARALLEL FIRSTPRIVATE(var1)
+ var1 = 10
+!$OMP END PARALLEL
+end subroutine
+
+! CHECK-LABEL: omp.private {type = firstprivate}
+! CHECK-SAME: @[[VAR1_PRIVATIZER_SYM:.*]] : !fir.ref<i32> alloc {
+! CHECK-NEXT: ^bb0(%[[PRIV_ARG:.*]]: !fir.ref<i32>):
+! CHECK-NEXT: %[[PRIV_ALLOC:.*]] = fir.alloca i32 {bindc_name = "var1", pinned, uniq_name = "_QFdelayed_privatization_firstprivateEvar1"}
+! CHECK-NEXT: omp.yield(%[[PRIV_ALLOC]] : !fir.ref<i32>)
+! CHECK: } copy {
+! CHECK: ^bb0(%[[PRIV_ORIG_ARG:.*]]: !fir.ref<i32>, %[[PRIV_PRIV_ARG:.*]]: !fir.ref<i32>):
+! CHECK: %[[ORIG_VAL:.*]] = fir.load %[[PRIV_ORIG_ARG]] : !fir.ref<i32>
+! CHECK: fir.store %[[ORIG_VAL]] to %[[PRIV_PRIV_ARG]] : !fir.ref<i32>
+! CHECK: omp.yield(%[[PRIV_PRIV_ARG]] : !fir.ref<i32>)
+! CHECK: }
+
+! CHECK-LABEL: @_QPdelayed_privatization_firstprivate
+! CHECK: omp.parallel private(@[[VAR1_PRIVATIZER_SYM]] %{{.*}} -> %{{.*}} : !fir.ref<i32>) {
+! CHECK: omp.terminator
+
diff --git a/flang/test/Lower/OpenMP/FIR/delayed-privatization-private.f90 b/flang/test/Lower/OpenMP/FIR/delayed-privatization-private.f90
new file mode 100644
index 000000000000..2e9995ea1fd4
--- /dev/null
+++ b/flang/test/Lower/OpenMP/FIR/delayed-privatization-private.f90
@@ -0,0 +1,38 @@
+! Test delayed privatization for the `private` clause.
+
+! RUN: bbc -emit-fir -hlfir=false -fopenmp --openmp-enable-delayed-privatization -o - %s 2>&1 | FileCheck %s
+
+subroutine delayed_privatization_private
+ implicit none
+ integer :: var1
+
+!$OMP PARALLEL PRIVATE(var1)
+ var1 = 10
+!$OMP END PARALLEL
+
+!$OMP PARALLEL PRIVATE(var1)
+ var1 = 20
+!$OMP END PARALLEL
+
+end subroutine
+
+! CHECK-LABEL: omp.private {type = private}
+! CHECK-SAME: @[[PRIVATIZER_SYM:.*]] : !fir.ref<i32> alloc {
+! CHECK-NEXT: ^bb0(%[[PRIV_ARG:.*]]: !fir.ref<i32>):
+! CHECK-NEXT: %[[PRIV_ALLOC:.*]] = fir.alloca i32 {bindc_name = "var1", pinned, uniq_name = "_QFdelayed_privatization_privateEvar1"}
+! CHECK-NEXT: omp.yield(%[[PRIV_ALLOC]] : !fir.ref<i32>)
+! CHECK-NOT: } copy {
+
+! CHECK-LABEL: @_QPdelayed_privatization_private
+! CHECK: %[[ORIG_ALLOC:.*]] = fir.alloca i32 {bindc_name = "var1", uniq_name = "_QFdelayed_privatization_privateEvar1"}
+! CHECK: omp.parallel private(@[[PRIVATIZER_SYM]] %[[ORIG_ALLOC]] -> %[[PAR_ARG:.*]] : !fir.ref<i32>) {
+! CHECK: %[[C10:.*]] = arith.constant 10 : i32
+! CHECK: fir.store %[[C10]] to %[[PAR_ARG]] : !fir.ref<i32>
+! CHECK: omp.terminator
+
+! Test that the same privatizer is used if the a variable with the same type and
+! name was previously privatized.
+! CHECK: omp.parallel private(@[[PRIVATIZER_SYM]] %[[ORIG_ALLOC]] -> %[[PAR_ARG:.*]] : !fir.ref<i32>) {
+! CHECK: %[[C20:.*]] = arith.constant 20 : i32
+! CHECK: fir.store %[[C20]] to %[[PAR_ARG]] : !fir.ref<i32>
+! CHECK: omp.terminator
diff --git a/flang/test/Lower/OpenMP/FIR/threadprivate-use-association-2.f90 b/flang/test/Lower/OpenMP/FIR/threadprivate-use-association-2.f90
index 14c0dff8da4b..6db5735c21f1 100644
--- a/flang/test/Lower/OpenMP/FIR/threadprivate-use-association-2.f90
+++ b/flang/test/Lower/OpenMP/FIR/threadprivate-use-association-2.f90
@@ -17,7 +17,7 @@ end
! CHECK: return
! CHECK: }
!
-! CHECK-LABEL: func.func @_QMm2FtestPinternal_test() {
+! CHECK-LABEL: func.func private @_QMm2FtestPinternal_test() {{.*}} {
! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QMmEx) : !fir.ref<i32>
! CHECK: %[[VAL_1:.*]] = omp.threadprivate %[[VAL_0]] : !fir.ref<i32> -> !fir.ref<i32>
! CHECK: fir.call @_QPbar(%[[VAL_1]]) {{.*}}: (!fir.ref<i32>) -> ()
diff --git a/flang/test/Lower/OpenMP/delayed-privatization-firstprivate.f90 b/flang/test/Lower/OpenMP/delayed-privatization-firstprivate.f90
new file mode 100644
index 000000000000..e3d2a5a8af26
--- /dev/null
+++ b/flang/test/Lower/OpenMP/delayed-privatization-firstprivate.f90
@@ -0,0 +1,29 @@
+! Test delayed privatization for the `firstprivate` clause.
+
+! RUN: bbc -emit-hlfir -fopenmp --openmp-enable-delayed-privatization -o - %s 2>&1 | FileCheck %s
+
+subroutine delayed_privatization_firstprivate
+ implicit none
+ integer :: var1
+
+!$omp parallel firstprivate(var1)
+ var1 = 10
+!$omp end parallel
+end subroutine
+
+! CHECK-LABEL: omp.private {type = firstprivate}
+! CHECK-SAME: @[[VAR1_PRIVATIZER_SYM:.*]] : !fir.ref<i32> alloc {
+! CHECK-NEXT: ^bb0(%[[PRIV_ARG:.*]]: !fir.ref<i32>):
+! CHECK-NEXT: %[[PRIV_ALLOC:.*]] = fir.alloca i32 {bindc_name = "var1", pinned, uniq_name = "_QFdelayed_privatization_firstprivateEvar1"}
+! CHECK-NEXT: %[[PRIV_DECL:.*]]:2 = hlfir.declare %[[PRIV_ALLOC]] {uniq_name = "_QFdelayed_privatization_firstprivateEvar1"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK-NEXT: omp.yield(%[[PRIV_DECL]]#0 : !fir.ref<i32>)
+! CHECK: } copy {
+! CHECK: ^bb0(%[[PRIV_ORIG_ARG:.*]]: !fir.ref<i32>, %[[PRIV_PRIV_ARG:.*]]: !fir.ref<i32>):
+! CHECK: %[[ORIG_VAL:.*]] = fir.load %[[PRIV_ORIG_ARG]] : !fir.ref<i32>
+! CHECK: hlfir.assign %[[ORIG_VAL]] to %[[PRIV_PRIV_ARG]] temporary_lhs : i32, !fir.ref<i32>
+! CHECK: omp.yield(%[[PRIV_PRIV_ARG]] : !fir.ref<i32>)
+! CHECK: }
+
+! CHECK-LABEL: @_QPdelayed_privatization_firstprivate
+! CHECK: omp.parallel private(@[[VAR1_PRIVATIZER_SYM]] %{{.*}} -> %{{.*}} : !fir.ref<i32>) {
+! CHECK: omp.terminator
diff --git a/flang/test/Lower/OpenMP/delayed-privatization-private-firstprivate.f90 b/flang/test/Lower/OpenMP/delayed-privatization-private-firstprivate.f90
new file mode 100644
index 000000000000..46eef6eb3bcf
--- /dev/null
+++ b/flang/test/Lower/OpenMP/delayed-privatization-private-firstprivate.f90
@@ -0,0 +1,34 @@
+! Test delayed privatization for both `private` and `firstprivate` clauses.
+
+! RUN: bbc -emit-hlfir -fopenmp --openmp-enable-delayed-privatization -o - %s 2>&1 | FileCheck %s
+
+subroutine delayed_privatization_private_firstprivate
+ implicit none
+ integer :: var1
+ integer :: var2
+
+!$omp parallel private(var1) firstprivate(var2)
+ var1 = 10
+ var2 = var1 + var2
+!$omp end parallel
+end subroutine
+
+! CHECK-LABEL: omp.private {type = firstprivate}
+! CHECK-SAME: @[[VAR2_PRIVATIZER_SYM:.*]] : !fir.ref<i32> alloc {
+! CHECK: } copy {
+! CHECK: }
+
+! CHECK-LABEL: omp.private {type = private}
+! CHECK-SAME: @[[VAR1_PRIVATIZER_SYM:.*]] : !fir.ref<i32> alloc {
+! CHECK: }
+
+! CHECK-LABEL: func.func @_QPdelayed_privatization_private_firstprivate() {
+! CHECK: %[[VAR1_ALLOC:.*]] = fir.alloca i32 {bindc_name = "var1"
+! CHECK: %[[VAR1_DECL:.*]]:2 = hlfir.declare %[[VAR1_ALLOC]]
+
+! CHECK: %[[VAR2_ALLOC:.*]] = fir.alloca i32 {bindc_name = "var2"
+! CHECK: %[[VAR2_DECL:.*]]:2 = hlfir.declare %[[VAR2_ALLOC]]
+
+! CHECK: omp.parallel private(
+! CHECK-SAME: @[[VAR1_PRIVATIZER_SYM]] %[[VAR1_DECL]]#0 -> %{{.*}} : !fir.ref<i32>,
+! CHECK-SAME: @[[VAR2_PRIVATIZER_SYM]] %[[VAR2_DECL]]#0 -> %{{.*}} : !fir.ref<i32>) {
diff --git a/flang/test/Lower/OpenMP/delayed-privatization-private.f90 b/flang/test/Lower/OpenMP/delayed-privatization-private.f90
new file mode 100644
index 000000000000..240e0e71bfcd
--- /dev/null
+++ b/flang/test/Lower/OpenMP/delayed-privatization-private.f90
@@ -0,0 +1,28 @@
+! Test delayed privatization for the `private` clause.
+
+! RUN: bbc -emit-hlfir -fopenmp --openmp-enable-delayed-privatization -o - %s 2>&1 | FileCheck %s
+
+subroutine delayed_privatization_private
+ implicit none
+ integer :: var1
+
+!$omp parallel private(var1)
+ var1 = 10
+!$omp end parallel
+end subroutine
+
+! CHECK-LABEL: omp.private {type = private}
+! CHECK-SAME: @[[PRIVATIZER_SYM:.*]] : !fir.ref<i32> alloc {
+! CHECK-NEXT: ^bb0(%[[PRIV_ARG:.*]]: !fir.ref<i32>):
+! CHECK-NEXT: %[[PRIV_ALLOC:.*]] = fir.alloca i32 {bindc_name = "var1", pinned, uniq_name = "_QFdelayed_privatization_privateEvar1"}
+! CHECK-NEXT: %[[PRIV_DECL:.*]]:2 = hlfir.declare %[[PRIV_ALLOC]] {uniq_name = "_QFdelayed_privatization_privateEvar1"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK-NEXT: omp.yield(%[[PRIV_DECL]]#0 : !fir.ref<i32>)
+! CHECK-NOT: } copy {
+
+! CHECK-LABEL: @_QPdelayed_privatization_private
+! CHECK: %[[ORIG_ALLOC:.*]] = fir.alloca i32 {bindc_name = "var1", uniq_name = "_QFdelayed_privatization_privateEvar1"}
+! CHECK: %[[ORIG_DECL:.*]]:2 = hlfir.declare %[[ORIG_ALLOC]] {uniq_name = "_QFdelayed_privatization_privateEvar1"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: omp.parallel private(@[[PRIVATIZER_SYM]] %[[ORIG_DECL]]#0 -> %[[PAR_ARG:.*]] : !fir.ref<i32>) {
+! CHECK: %[[PAR_ARG_DECL:.*]]:2 = hlfir.declare %[[PAR_ARG]] {uniq_name = "_QFdelayed_privatization_privateEvar1"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: hlfir.assign %{{.*}} to %[[PAR_ARG_DECL]]#0 : i32, !fir.ref<i32>
+! CHECK: omp.terminator
diff --git a/flang/test/Lower/OpenMP/delayed-privatization-reduction.f90 b/flang/test/Lower/OpenMP/delayed-privatization-reduction.f90
new file mode 100644
index 000000000000..c61f352b9b05
--- /dev/null
+++ b/flang/test/Lower/OpenMP/delayed-privatization-reduction.f90
@@ -0,0 +1,30 @@
+! Test that reductions and delayed privatization work properly togehter. Since
+! both types of clauses add block arguments to the OpenMP region, we make sure
+! that the block arguments are added in the proper order (reductions first and
+! then delayed privatization.
+
+! RUN: bbc -emit-hlfir -fopenmp --openmp-enable-delayed-privatization -o - %s 2>&1 | FileCheck %s
+
+subroutine red_and_delayed_private
+ integer :: red
+ integer :: prv
+
+ red = 0
+ prv = 10
+
+ !$omp parallel reduction(+:red) private(prv)
+ red = red + 1
+ prv = 20
+ !$omp end parallel
+end subroutine
+
+! CHECK-LABEL: omp.private {type = private}
+! CHECK-SAME: @[[PRIVATIZER_SYM:.*]] : !fir.ref<i32> alloc {
+
+! CHECK-LABEL: omp.reduction.declare
+! CHECK-SAME: @[[REDUCTION_SYM:.*]] : i32 init
+
+! CHECK-LABEL: _QPred_and_delayed_private
+! CHECK: omp.parallel
+! CHECK-SAME: reduction(@[[REDUCTION_SYM]] %{{.*}} -> %arg0 : !fir.ref<i32>)
+! CHECK-SAME: private(@[[PRIVATIZER_SYM]] %{{.*}} -> %arg1 : !fir.ref<i32>) {
diff --git a/flang/test/Lower/OpenMP/threadprivate-commonblock-use.f90 b/flang/test/Lower/OpenMP/threadprivate-commonblock-use.f90
index 28616f7595a0..71f1c7608a2c 100644
--- a/flang/test/Lower/OpenMP/threadprivate-commonblock-use.f90
+++ b/flang/test/Lower/OpenMP/threadprivate-commonblock-use.f90
@@ -15,7 +15,7 @@ contains
subroutine ss1
use m0
contains
-!CHECK-LABEL: func @_QMm1Fss1Pss2
+!CHECK-LABEL: func private @_QMm1Fss1Pss2
!CHECK: %[[CMN:.*]] = fir.address_of(@cmn_) : !fir.ref<!fir.array<4xi8>>
!CHECK: omp.parallel
!CHECK: %{{.*}} = omp.threadprivate %[[CMN]] : !fir.ref<!fir.array<4xi8>> -> !fir.ref<!fir.array<4xi8>>
diff --git a/flang/test/Lower/OpenMP/threadprivate-use-association-2-hlfir.f90 b/flang/test/Lower/OpenMP/threadprivate-use-association-2-hlfir.f90
index 722f023fbefc..79a1ce9897f2 100644
--- a/flang/test/Lower/OpenMP/threadprivate-use-association-2-hlfir.f90
+++ b/flang/test/Lower/OpenMP/threadprivate-use-association-2-hlfir.f90
@@ -19,7 +19,7 @@ end
! CHECK: return
! CHECK: }
-! CHECK-LABEL: func.func @_QMm2FtestPinternal_test() {
+! CHECK-LABEL: func.func private @_QMm2FtestPinternal_test() {{.*}} {
! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QMmEx) : !fir.ref<i32>
! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QMmEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_2:.*]] = omp.threadprivate %[[VAL_1]]#1 : !fir.ref<i32> -> !fir.ref<i32>
diff --git a/flang/test/Lower/PowerPC/ppc-vector-types.f90 b/flang/test/Lower/PowerPC/ppc-vector-types.f90
index be293f873ecb..4745e4567b2d 100644
--- a/flang/test/Lower/PowerPC/ppc-vector-types.f90
+++ b/flang/test/Lower/PowerPC/ppc-vector-types.f90
@@ -44,7 +44,7 @@
! CHECK-LLVM-NEXT: store <512 x i1> %[[RESQ]], ptr @_QFEvq2, align 64
contains
- ! CHECK-LLVM-LABEL: define <4 x i32> @_QFPtest_vec_integer_assign
+ ! CHECK-LLVM-LABEL: define internal <4 x i32> @_QFPtest_vec_integer_assign
function test_vec_integer_assign(arg1)
! CHECK-LLVM: %[[FUNC_RES:.*]] = alloca <4 x i32>, i64 1, align 16
vector(integer(4)) :: arg1, test_vec_integer_assign
@@ -58,7 +58,7 @@
! CHECK-LLVM-NEXT: ret <4 x i32> %[[RET]]
end function test_vec_integer_assign
- ! CHECK-LLVM-LABEL: define <2 x double> @_QFPtest_vec_real_assign
+ ! CHECK-LLVM-LABEL: define internal <2 x double> @_QFPtest_vec_real_assign
function test_vec_real_assign(arg1)
! CHECK-LLVM: %[[FUNC_RES:.*]] = alloca <2 x double>, i64 1, align 16
vector(real(8)) :: arg1, test_vec_real_assign
@@ -72,7 +72,7 @@
! CHECK-LLVM-NEXT: ret <2 x double> %[[RET]]
end function test_vec_real_assign
- ! CHECK-LLVM-LABEL: define <8 x i16> @_QFPtest_vec_unsigned_assign
+ ! CHECK-LLVM-LABEL: define internal <8 x i16> @_QFPtest_vec_unsigned_assign
function test_vec_unsigned_assign(arg1)
! CHECK-LLVM: %[[FUNC_RES:.*]] = alloca <8 x i16>, i64 1, align 16
vector(unsigned(2)) :: arg1, test_vec_unsigned_assign
@@ -86,7 +86,7 @@
! CHECK-LLVM-NEXT: ret <8 x i16> %[[RET]]
end function test_vec_unsigned_assign
- ! CHECK-LLVM-LABEL: define <256 x i1> @_QFPtest_vec_pair_assign
+ ! CHECK-LLVM-LABEL: define internal <256 x i1> @_QFPtest_vec_pair_assign
function test_vec_pair_assign(arg1)
! CHECK-LLVM: %[[FUNC_RES:.*]] = alloca <256 x i1>, i64 1, align 32
__vector_pair :: arg1, test_vec_pair_assign
@@ -100,7 +100,7 @@
! CHECK-LLVM-NEXT: ret <256 x i1> %[[RET]]
end function test_vec_pair_assign
- ! CHECK-LLVM-LABEL: define <512 x i1> @_QFPtest_vec_quad_assign
+ ! CHECK-LLVM-LABEL: define internal <512 x i1> @_QFPtest_vec_quad_assign
function test_vec_quad_assign(arg1)
! CHECK-LLVM: %[[FUNC_RES:.*]] = alloca <512 x i1>, i64 1, align 64
__vector_quad :: arg1, test_vec_quad_assign
diff --git a/flang/test/Lower/array-temp.f90 b/flang/test/Lower/array-temp.f90
index 347d4cef78bc..10c5ee91d44b 100644
--- a/flang/test/Lower/array-temp.f90
+++ b/flang/test/Lower/array-temp.f90
@@ -404,7 +404,7 @@ subroutine tt1
! CHECK-NEXT: fir.call @_FortranAioEndIoStatement
print*, [(r([7.0]),i=1,3)]
contains
- ! CHECK-LABEL: func @_QFtt1Pr
+ ! CHECK-LABEL: func private @_QFtt1Pr
function r(x)
real x(:)
r = x(1)
diff --git a/flang/test/Lower/convert.f90 b/flang/test/Lower/convert.f90
index 1ab93dcc1732..b7c8b8dc20cc 100755
--- a/flang/test/Lower/convert.f90
+++ b/flang/test/Lower/convert.f90
@@ -34,8 +34,8 @@ end
! ALL: fir.has_value %[[VAL_0]] : !fir.char<1,[[OPT_STR_LEN]]>
! ALL: fir.global linkonce @_QQEnvironmentDefaults.list constant : tuple<i[[int_size:.*]], !fir.ref<!fir.array<1xtuple<!fir.ref<i8>, !fir.ref<i8>>>>> {
-! ALL: %[[VAL_0:.*]] = fir.undefined tuple<i[[int_size]], !fir.ref<!fir.array<1xtuple<!fir.ref<i8>, !fir.ref<i8>>>>>
! ALL: %[[VAL_1:.*]] = arith.constant 1 : i[[int_size]]
+! ALL: %[[VAL_0:.*]] = fir.undefined tuple<i[[int_size]], !fir.ref<!fir.array<1xtuple<!fir.ref<i8>, !fir.ref<i8>>>>>
! ALL: %[[VAL_2:.*]] = fir.insert_value %[[VAL_0]], %[[VAL_1]], [0 : index] : (tuple<i[[int_size]], !fir.ref<!fir.array<1xtuple<!fir.ref<i8>, !fir.ref<i8>>>>>, i[[int_size]]) -> tuple<i[[int_size]], !fir.ref<!fir.array<1xtuple<!fir.ref<i8>, !fir.ref<i8>>>>>
! ALL: %[[VAL_3:.*]] = fir.address_of(@_QQEnvironmentDefaults.items) : !fir.ref<!fir.array<1xtuple<!fir.ref<i8>, !fir.ref<i8>>>>
! ALL: %[[VAL_4:.*]] = fir.insert_value %[[VAL_2]], %[[VAL_3]], [1 : index] : (tuple<i[[int_size]], !fir.ref<!fir.array<1xtuple<!fir.ref<i8>, !fir.ref<i8>>>>>, !fir.ref<!fir.array<1xtuple<!fir.ref<i8>, !fir.ref<i8>>>>) -> tuple<i[[int_size]], !fir.ref<!fir.array<1xtuple<!fir.ref<i8>, !fir.ref<i8>>>>>
diff --git a/flang/test/Lower/dummy-arguments.f90 b/flang/test/Lower/dummy-arguments.f90
index 43d8e3c1e5d4..331e089a60fa 100644
--- a/flang/test/Lower/dummy-arguments.f90
+++ b/flang/test/Lower/dummy-arguments.f90
@@ -9,7 +9,7 @@ program test1
call foo(10)
contains
-! CHECK-LABEL: func @_QFPfoo
+! CHECK-LABEL: func private @_QFPfoo
subroutine foo(avar1)
integer :: avar1
! integer :: my_data, my_data2
diff --git a/flang/test/Lower/dummy-procedure-character.f90 b/flang/test/Lower/dummy-procedure-character.f90
index cecd839287ed..72d548513fb2 100644
--- a/flang/test/Lower/dummy-procedure-character.f90
+++ b/flang/test/Lower/dummy-procedure-character.f90
@@ -213,7 +213,7 @@ subroutine host(f)
! CHECK: fir.call @_QFhostPintern(%[[VAL_1]])
call intern()
contains
-! CHECK-LABEL: func @_QFhostPintern(
+! CHECK-LABEL: func private @_QFhostPintern(
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>> {fir.host_assoc})
subroutine intern()
! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
@@ -242,7 +242,7 @@ subroutine host2(f)
! CHECK: fir.call @_QFhost2Pintern(%[[VAL_1]])
call intern()
contains
-! CHECK-LABEL: func @_QFhost2Pintern(
+! CHECK-LABEL: func private @_QFhost2Pintern(
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>> {fir.host_assoc})
subroutine intern()
! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.char<1,42> {bindc_name = ".result"}
diff --git a/flang/test/Lower/equivalence-with-host-assoc.f90 b/flang/test/Lower/equivalence-with-host-assoc.f90
index ec84fb506314..0ffb1bc5bf9e 100644
--- a/flang/test/Lower/equivalence-with-host-assoc.f90
+++ b/flang/test/Lower/equivalence-with-host-assoc.f90
@@ -10,7 +10,7 @@ contains
i1 = j1
end subroutine inner
end subroutine test1
-! FIR-LABEL: func.func @_QFtest1Pinner() attributes {fir.internal_proc} {
+! FIR-LABEL: func.func private @_QFtest1Pinner() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! FIR: %[[VAL_0:.*]] = fir.address_of(@_QFtest1Ei1) : !fir.ref<!fir.array<1xi32>>
! FIR: %[[VAL_1:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<!fir.array<1xi32>>) -> !fir.ref<!fir.array<4xi8>>
! FIR: %[[VAL_2:.*]] = arith.constant 0 : index
@@ -24,7 +24,7 @@ end subroutine test1
! FIR: return
! FIR: }
-! HLFIR-LABEL: func.func @_QFtest1Pinner() attributes {fir.internal_proc} {
+! HLFIR-LABEL: func.func private @_QFtest1Pinner() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! HLFIR: %[[VAL_0:.*]] = fir.address_of(@_QFtest1Ei1) : !fir.ref<!fir.array<1xi32>>
! HLFIR: %[[VAL_1:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<!fir.array<1xi32>>) -> !fir.ref<!fir.array<4xi8>>
! HLFIR: %[[VAL_2:.*]] = arith.constant 0 : index
@@ -54,7 +54,7 @@ contains
end subroutine inner
end subroutine host
end module test2
-! FIR-LABEL: func.func @_QMtest2FhostPinner() attributes {fir.internal_proc} {
+! FIR-LABEL: func.func private @_QMtest2FhostPinner() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! FIR: %[[VAL_0:.*]] = fir.address_of(@_QMtest2FhostEf1) : !fir.ref<!fir.array<1xi32>>
! FIR: %[[VAL_1:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<!fir.array<1xi32>>) -> !fir.ref<!fir.array<4xi8>>
! FIR: %[[VAL_2:.*]] = arith.constant 0 : index
@@ -68,7 +68,7 @@ end module test2
! FIR: return
! FIR: }
-! HLFIR-LABEL: func.func @_QMtest2FhostPinner() attributes {fir.internal_proc} {
+! HLFIR-LABEL: func.func private @_QMtest2FhostPinner() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! HLFIR: %[[VAL_0:.*]] = fir.address_of(@_QMtest2FhostEf1) : !fir.ref<!fir.array<1xi32>>
! HLFIR: %[[VAL_1:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<!fir.array<1xi32>>) -> !fir.ref<!fir.array<4xi8>>
! HLFIR: %[[VAL_2:.*]] = arith.constant 0 : index
@@ -94,7 +94,7 @@ contains
i1 = j1 + k1
end subroutine inner
end subroutine test3
-! FIR-LABEL: func.func @_QFtest3Pinner() attributes {fir.internal_proc} {
+! FIR-LABEL: func.func private @_QFtest3Pinner() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! FIR: %[[VAL_0:.*]] = fir.address_of(@blk_) : !fir.ref<tuple<i32>>
! FIR: %[[VAL_1:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<tuple<i32>>) -> !fir.ref<!fir.array<?xi8>>
! FIR: %[[VAL_2:.*]] = arith.constant 0 : index
@@ -115,7 +115,7 @@ end subroutine test3
! FIR: return
! FIR: }
-! HLFIR-LABEL: func.func @_QFtest3Pinner() attributes {fir.internal_proc} {
+! HLFIR-LABEL: func.func private @_QFtest3Pinner() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! HLFIR: %[[VAL_0:.*]] = fir.address_of(@blk_) : !fir.ref<tuple<i32>>
! HLFIR: %[[VAL_1:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<tuple<i32>>) -> !fir.ref<!fir.array<?xi8>>
! HLFIR: %[[VAL_2:.*]] = arith.constant 0 : index
@@ -149,7 +149,7 @@ contains
i1 = j1 + k1
end subroutine inner
end subroutine test4
-! FIR-LABEL: func.func @_QFtest4Pinner() attributes {fir.internal_proc} {
+! FIR-LABEL: func.func private @_QFtest4Pinner() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! FIR: %[[VAL_0:.*]] = fir.address_of(@blk_) : !fir.ref<tuple<i32>>
! FIR: %[[VAL_1:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<tuple<i32>>) -> !fir.ref<!fir.array<?xi8>>
! FIR: %[[VAL_2:.*]] = arith.constant 0 : index
@@ -170,7 +170,7 @@ end subroutine test4
! FIR: return
! FIR: }
-! HLFIR-LABEL: func.func @_QFtest4Pinner() attributes {fir.internal_proc} {
+! HLFIR-LABEL: func.func private @_QFtest4Pinner() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! HLFIR: %[[VAL_0:.*]] = fir.address_of(@blk_) : !fir.ref<tuple<i32>>
! HLFIR: %[[VAL_1:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<tuple<i32>>) -> !fir.ref<!fir.array<?xi8>>
! HLFIR: %[[VAL_2:.*]] = arith.constant 0 : index
diff --git a/flang/test/Lower/explicit-interface-results-2.f90 b/flang/test/Lower/explicit-interface-results-2.f90
index 4e4b035bae7e..86aae720e7fc 100644
--- a/flang/test/Lower/explicit-interface-results-2.f90
+++ b/flang/test/Lower/explicit-interface-results-2.f90
@@ -69,8 +69,8 @@ subroutine host4()
integer :: n
call internal_proc_a()
contains
-! CHECK-LABEL: func @_QFhost4Pinternal_proc_a
-! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func private @_QFhost4Pinternal_proc_a
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine internal_proc_a()
call takes_array(return_array())
! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
@@ -94,7 +94,7 @@ subroutine host5()
implicit none
call internal_proc_a()
contains
-! CHECK-LABEL: func @_QFhost5Pinternal_proc_a() attributes {fir.internal_proc} {
+! CHECK-LABEL: func private @_QFhost5Pinternal_proc_a() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine internal_proc_a()
call takes_array(return_array())
! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QMsome_moduleEn_module) : !fir.ref<i32>
@@ -115,7 +115,7 @@ subroutine host6()
implicit none
call internal_proc_a()
contains
-! CHECK-LABEL: func @_QFhost6Pinternal_proc_a
+! CHECK-LABEL: func private @_QFhost6Pinternal_proc_a
subroutine internal_proc_a()
call takes_array(return_array())
! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QMsome_moduleEn_module) : !fir.ref<i32>
@@ -187,7 +187,7 @@ subroutine host9()
common /mycom/ n_common
call internal_proc_a()
contains
-! CHECK-LABEL: func @_QFhost9Pinternal_proc_a
+! CHECK-LABEL: func private @_QFhost9Pinternal_proc_a
subroutine internal_proc_a()
! CHECK: %[[VAL_0:.*]] = arith.constant 0 : index
! CHECK: %[[VAL_1:.*]] = fir.address_of(@mycom_) : !fir.ref<!fir.array<4xi8>>
@@ -213,7 +213,7 @@ subroutine host10()
implicit none
call internal_proc_a()
contains
-! CHECK-LABEL: func @_QFhost10Pinternal_proc_a
+! CHECK-LABEL: func private @_QFhost10Pinternal_proc_a
subroutine internal_proc_a()
call takes_array(return_array())
! CHECK: %[[VAL_0:.*]] = arith.constant 0 : index
diff --git a/flang/test/Lower/forall/array-constructor.f90 b/flang/test/Lower/forall/array-constructor.f90
index 083a71ba479a..ad21ed33fba2 100644
--- a/flang/test/Lower/forall/array-constructor.f90
+++ b/flang/test/Lower/forall/array-constructor.f90
@@ -114,8 +114,8 @@ end subroutine ac1
! CHECK: return
! CHECK: }
-! CHECK-LABEL: func @_QFac1Pfunc(
-! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}) -> i32 {
+! CHECK-LABEL: func private @_QFac1Pfunc(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}) -> i32 {{.*}} {
! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "func", uniq_name = "_QFac1FfuncEfunc"}
! CHECK: %[[VAL_2:.*]] = arith.constant 1 : i64
! CHECK: %[[VAL_3:.*]] = arith.constant 1 : i64
@@ -259,8 +259,8 @@ end subroutine ac2
! CHECK: return
! CHECK: }
-! CHECK-LABEL: func @_QFac2Pfunc(
-! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}) -> !fir.array<3xi32> {
+! CHECK-LABEL: func private @_QFac2Pfunc(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}) -> !fir.array<3xi32> {{.*}} {
! CHECK: %[[VAL_1:.*]] = arith.constant 3 : index
! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.array<3xi32> {bindc_name = "func", uniq_name = "_QFac2FfuncEfunc"}
! CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_1]] : (index) -> !fir.shape<1>
diff --git a/flang/test/Lower/forall/character-1.f90 b/flang/test/Lower/forall/character-1.f90
index e5c40a16420a..e97c3f36b0b1 100644
--- a/flang/test/Lower/forall/character-1.f90
+++ b/flang/test/Lower/forall/character-1.f90
@@ -17,7 +17,7 @@ contains
end subroutine sub
end program test
-! CHECK-LABEL: define void @_QFPsub(
+! CHECK-LABEL: define internal void @_QFPsub(
! CHECK-SAME: ptr %[[arg:.*]])
! CHECK: %[[extent:.*]] = getelementptr { {{.*}}, [1 x [3 x i64]] }, ptr %[[arg]], i32 0, i32 7, i64 0, i32 1
! CHECK: %[[extval:.*]] = load i64, ptr %[[extent]]
diff --git a/flang/test/Lower/global-initialization.f90 b/flang/test/Lower/global-initialization.f90
index dd60a6fd8b9f..ff208ecc3c89 100644
--- a/flang/test/Lower/global-initialization.f90
+++ b/flang/test/Lower/global-initialization.f90
@@ -4,16 +4,19 @@ program bar
! CHECK: fir.address_of(@[[name1:.*]]my_data)
integer, save :: my_data = 1
print *, my_data
+ call foo()
+ call foo2()
+ call foo3()
contains
-! CHECK-LABEL: func @_QFPfoo
+! CHECK-LABEL: func private @_QFPfoo
subroutine foo()
! CHECK: fir.address_of(@[[name2:.*foo.*my_data]])
integer, save :: my_data = 2
print *, my_data + 1
end subroutine
-! CHECK-LABEL: func @_QFPfoo2
+! CHECK-LABEL: func private @_QFPfoo2
subroutine foo2()
! CHECK: fir.address_of(@[[name3:.*foo2.*my_data]])
integer, save :: my_data
@@ -21,7 +24,7 @@ subroutine foo2()
print *, my_data
end subroutine
-! CHECK-LABEL: func @_QFPfoo3
+! CHECK-LABEL: func private @_QFPfoo3
subroutine foo3()
! CHECK-DAG: fir.address_of(@[[name4:.*foo3.*idata]]){{.*}}fir.array<5xi32>
! CHECK-DAG: fir.address_of(@[[name5:.*foo3.*rdata]]){{.*}}fir.array<3xf16>
diff --git a/flang/test/Lower/host-associated-functions.f90 b/flang/test/Lower/host-associated-functions.f90
index 77a51490950f..78d081748c2f 100644
--- a/flang/test/Lower/host-associated-functions.f90
+++ b/flang/test/Lower/host-associated-functions.f90
@@ -19,8 +19,8 @@ subroutine capture_char_func_dummy(char_func_dummy, n)
! CHECK: fir.call @_QFcapture_char_func_dummyPinternal(%[[VAL_2]]) {{.*}}: (!fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>, !fir.ref<i32>>>) -> ()
call internal()
contains
- ! CHECK-LABEL: func @_QFcapture_char_func_dummyPinternal(
- ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>, !fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+ ! CHECK-LABEL: func private @_QFcapture_char_func_dummyPinternal(
+ ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>, !fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine internal()
! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]] : (!fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>, !fir.ref<i32>>>, i32) -> !fir.ref<tuple<!fir.boxproc<() -> ()>, i64>>
@@ -55,8 +55,8 @@ subroutine capture_char_func_assumed_dummy(char_func_dummy)
! CHECK: fir.call @_QFcapture_char_func_assumed_dummyPinternal(%[[VAL_1]]) {{.*}}: (!fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>>) -> ()
call internal()
contains
-! CHECK-LABEL: func @_QFcapture_char_func_assumed_dummyPinternal(
-! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func private @_QFcapture_char_func_assumed_dummyPinternal(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine internal()
! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]] : (!fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>>, i32) -> !fir.ref<tuple<!fir.boxproc<() -> ()>, i64>>
@@ -84,7 +84,7 @@ subroutine capture_char_func(n)
! CHECK: fir.call @_QFcapture_char_funcPinternal(%[[VAL_1]]) {{.*}}: (!fir.ref<tuple<!fir.ref<i32>>>) -> ()
call internal()
contains
-! CHECK-LABEL: func @_QFcapture_char_funcPinternal(
+! CHECK-LABEL: func private @_QFcapture_char_funcPinternal(
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc})
subroutine internal()
print *, char_func()
@@ -109,8 +109,8 @@ subroutine capture_array_func(n)
call internal()
contains
subroutine internal()
-! CHECK-LABEL: func @_QFcapture_array_funcPinternal(
-! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func private @_QFcapture_array_funcPinternal(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]] : (!fir.ref<tuple<!fir.ref<i32>>>, i32) -> !fir.llvm_ptr<!fir.ref<i32>>
! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.llvm_ptr<!fir.ref<i32>>
@@ -146,7 +146,7 @@ subroutine use_module()
! CHECK: fir.call @_QFuse_modulePinternal() {{.*}}: () -> ()
call internal()
contains
-! CHECK-LABEL: func @_QFuse_modulePinternal() {
+! CHECK-LABEL: func private @_QFuse_modulePinternal() {{.*}} {
subroutine internal()
print *, return_char(42)
end subroutine
diff --git a/flang/test/Lower/host-associated-globals.f90 b/flang/test/Lower/host-associated-globals.f90
index bb22a3277542..fe612e777aea 100644
--- a/flang/test/Lower/host-associated-globals.f90
+++ b/flang/test/Lower/host-associated-globals.f90
@@ -18,7 +18,7 @@ contains
print *, j_in_equiv, not_in_equiv
end subroutine
end subroutine
-! CHECK-LABEL: func.func @_QFmodule_varPbar()
+! CHECK-LABEL: func.func private @_QFmodule_varPbar()
! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QMtest_mod_used_in_hostEi) : !fir.ref<!fir.array<4xi8>>
! CHECK: %[[VAL_1:.*]] = arith.constant 0 : index
! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]] : (!fir.ref<!fir.array<4xi8>>, index) -> !fir.ref<i8>
@@ -37,7 +37,7 @@ contains
print *, j_in_equiv, not_in_equiv
end subroutine
end subroutine
-! CHECK-LABEL: func.func @_QFtest_commonPbar() attributes {fir.internal_proc} {
+! CHECK-LABEL: func.func private @_QFtest_commonPbar() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK: %[[VAL_0:.*]] = fir.address_of(@x_) : !fir.ref<!fir.array<12xi8>>
! CHECK: %[[VAL_1:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<!fir.array<12xi8>>) -> !fir.ref<!fir.array<?xi8>>
! CHECK: %[[VAL_2:.*]] = arith.constant 4 : index
@@ -59,7 +59,7 @@ contains
print *, j_in_equiv, not_in_equiv
end subroutine
end subroutine
-! CHECK-LABEL: func.func @_QFsaved_equivPbar() attributes {fir.internal_proc} {
+! CHECK-LABEL: func.func private @_QFsaved_equivPbar() attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QFsaved_equivEi) : !fir.ref<!fir.array<8xi8>>
! CHECK: %[[VAL_1:.*]] = arith.constant 4 : index
! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]] : (!fir.ref<!fir.array<8xi8>>, index) -> !fir.ref<i8>
@@ -79,8 +79,8 @@ contains
call test(saved_j, j)
end subroutine
end subroutine
-! CHECK-LABEL: func.func @_QFmixed_capturePbar(
-! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func.func private @_QFmixed_capturePbar(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK: %[[VAL_1:.*]] = fir.address_of(@_QFmixed_captureEsaved_i) : !fir.ref<!fir.array<4xi8>>
! CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
! CHECK: %[[VAL_3:.*]] = fir.coordinate_of %[[VAL_1]], %[[VAL_2]] : (!fir.ref<!fir.array<4xi8>>, index) -> !fir.ref<i8>
diff --git a/flang/test/Lower/host-associated.f90 b/flang/test/Lower/host-associated.f90
index 25e637805e87..f88903c8af80 100644
--- a/flang/test/Lower/host-associated.f90
+++ b/flang/test/Lower/host-associated.f90
@@ -19,8 +19,8 @@ subroutine test1
call test1_internal
print *, i
contains
- ! CHECK-LABEL: func @_QFtest1Ptest1_internal(
- ! CHECK-SAME: %[[arg:[^:]*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+ ! CHECK-LABEL: func private @_QFtest1Ptest1_internal(
+ ! CHECK-SAME: %[[arg:[^:]*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK: %[[iaddr:.*]] = fir.coordinate_of %[[arg]], %c0
! CHECK: %[[i:.*]] = fir.load %[[iaddr]] : !fir.llvm_ptr<!fir.ref<i32>>
! CHECK: %[[val:.*]] = fir.call @_QPifoo() {{.*}}: () -> i32
@@ -46,8 +46,8 @@ subroutine test2
call test2_internal
print *, a, b
contains
- ! CHECK-LABEL: func @_QFtest2Ptest2_internal(
- ! CHECK-SAME: %[[arg:[^:]*]]: !fir.ref<tuple<!fir.ref<f32>, !fir.ref<f32>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+ ! CHECK-LABEL: func private @_QFtest2Ptest2_internal(
+ ! CHECK-SAME: %[[arg:[^:]*]]: !fir.ref<tuple<!fir.ref<f32>, !fir.ref<f32>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine test2_internal
! CHECK: %[[a:.*]] = fir.coordinate_of %[[arg]], %c0
! CHECK: %[[aa:.*]] = fir.load %[[a]] : !fir.llvm_ptr<!fir.ref<f32>>
@@ -61,8 +61,8 @@ contains
call test2_inner
end subroutine test2_internal
- ! CHECK-LABEL: func @_QFtest2Ptest2_inner(
- ! CHECK-SAME: %[[arg:[^:]*]]: !fir.ref<tuple<!fir.ref<f32>, !fir.ref<f32>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+ ! CHECK-LABEL: func private @_QFtest2Ptest2_inner(
+ ! CHECK-SAME: %[[arg:[^:]*]]: !fir.ref<tuple<!fir.ref<f32>, !fir.ref<f32>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine test2_inner
! CHECK: %[[a:.*]] = fir.coordinate_of %[[arg]], %c0
! CHECK: %[[aa:.*]] = fir.load %[[a]] : !fir.llvm_ptr<!fir.ref<f32>>
@@ -95,8 +95,8 @@ subroutine test6(c)
print *, c
contains
- ! CHECK-LABEL: func @_QFtest6Ptest6_inner(
- ! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.boxchar<1>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+ ! CHECK-LABEL: func private @_QFtest6Ptest6_inner(
+ ! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.boxchar<1>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine test6_inner
! CHECK: %[[coor:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.boxchar<1>>>, i32) -> !fir.ref<!fir.boxchar<1>>
! CHECK: %[[load:.*]] = fir.load %[[coor]] : !fir.ref<!fir.boxchar<1>>
@@ -137,8 +137,8 @@ subroutine test3(p,q,i)
end if
contains
- ! CHECK-LABEL: func @_QFtest3Ptest3_inner(
- ! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+ ! CHECK-LABEL: func private @_QFtest3Ptest3_inner(
+ ! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine test3_inner
! CHECK: %[[pcoor:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<?xf32>>>
! CHECK: %[[p:.*]] = fir.load %[[pcoor]] : !fir.ref<!fir.box<!fir.array<?xf32>>>
@@ -184,8 +184,8 @@ subroutine test3a(p)
end if
contains
- ! CHECK: func @_QFtest3aPtest3a_inner(
- ! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.box<!fir.array<10xf32>>, !fir.box<!fir.array<10xf32>>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+ ! CHECK: func private @_QFtest3aPtest3a_inner(
+ ! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.box<!fir.array<10xf32>>, !fir.box<!fir.array<10xf32>>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine test3a_inner
! CHECK: %[[pcoor:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.box<!fir.array<10xf32>>, !fir.box<!fir.array<10xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<10xf32>>>
! CHECK: %[[p:.*]] = fir.load %[[pcoor]] : !fir.ref<!fir.box<!fir.array<10xf32>>>
@@ -228,8 +228,8 @@ subroutine test4
end if
contains
- ! CHECK-LABEL: func @_QFtest4Ptest4_inner(
- ! CHECK-SAME:%[[tup:.*]]: !fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+ ! CHECK-LABEL: func private @_QFtest4Ptest4_inner(
+ ! CHECK-SAME:%[[tup:.*]]: !fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine test4_inner
! CHECK: %[[ptup:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<f32>>>>
! CHECK: %[[p:.*]] = fir.load %[[ptup]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<f32>>>>
@@ -270,8 +270,8 @@ subroutine test5
end if
contains
- ! CHECK-LABEL: func @_QFtest5Ptest5_inner(
- ! CHECK-SAME:%[[tup:.*]]: !fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+ ! CHECK-LABEL: func private @_QFtest5Ptest5_inner(
+ ! CHECK-SAME:%[[tup:.*]]: !fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine test5_inner
! CHECK: %[[ptup:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>>
! CHECK: %[[p:.*]] = fir.load %[[ptup]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>>
@@ -308,8 +308,8 @@ subroutine test7(j, k)
k = test7_inner(k)
contains
-! CHECK-LABEL: func @_QFtest7Ptest7_inner(
-! CHECK-SAME: %[[i:.*]]: !fir.ref<i32>{{.*}}, %[[tup:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) -> i32 attributes {fir.internal_proc} {
+! CHECK-LABEL: func private @_QFtest7Ptest7_inner(
+! CHECK-SAME: %[[i:.*]]: !fir.ref<i32>{{.*}}, %[[tup:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) -> i32 attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
elemental integer function test7_inner(i)
implicit none
integer, intent(in) :: i
@@ -329,8 +329,8 @@ subroutine issue990()
integer :: captured
call bar()
contains
-! CHECK-LABEL: func @_QFissue990Pbar(
-! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func private @_QFissue990Pbar(
+! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine bar()
integer :: stmt_func, i
stmt_func(i) = i + captured
@@ -351,8 +351,8 @@ subroutine issue990b()
captured_stmt_func(i) = i + captured
call bar()
contains
-! CHECK-LABEL: func @_QFissue990bPbar(
-! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func private @_QFissue990bPbar(
+! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine bar()
! CHECK: %[[tupAddr:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<i32>>>, i32) -> !fir.llvm_ptr<!fir.ref<i32>>
! CHECK: %[[addr:.*]] = fir.load %[[tupAddr]] : !fir.llvm_ptr<!fir.ref<i32>>
@@ -372,8 +372,8 @@ subroutine test8(dummy_proc)
end interface
call bar()
contains
-! CHECK-LABEL: func @_QFtest8Pbar(
-! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.boxproc<() -> ()>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func private @_QFtest8Pbar(
+! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.boxproc<() -> ()>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine bar()
! CHECK: %[[tupAddr:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.boxproc<() -> ()>>>, i32) -> !fir.ref<!fir.boxproc<() -> ()>>
! CHECK: %[[dummyProc:.*]] = fir.load %[[tupAddr]] : !fir.ref<!fir.boxproc<() -> ()>>
@@ -392,8 +392,8 @@ subroutine test9(dummy_proc)
end interface
call bar()
contains
-! CHECK-LABEL: func @_QFtest9Pbar(
-! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.boxproc<() -> ()>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func private @_QFtest9Pbar(
+! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.boxproc<() -> ()>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine bar()
! CHECK: %[[tupAddr:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.boxproc<() -> ()>>>, i32) -> !fir.ref<!fir.boxproc<() -> ()>>
! CHECK: %[[dummyProc:.*]] = fir.load %[[tupAddr]] : !fir.ref<!fir.boxproc<() -> ()>>
@@ -415,8 +415,8 @@ subroutine test10(i)
! CHECK: fir.call @_QFtest10Pbar(%[[tup]]) {{.*}}: (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>>) -> ()
call bar()
contains
-! CHECK-LABEL: func @_QFtest10Pbar(
-! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func private @_QFtest10Pbar(
+! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
subroutine bar()
! CHECK: %[[tupAddr:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>
! CHECK: fir.load %[[tupAddr]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>
@@ -433,9 +433,9 @@ end subroutine
! CHECK: %[[VAL_8:.*]] = fir.emboxproc %[[VAL_7]], %[[VAL_5]] : ((!fir.ref<i32>, !fir.ref<tuple<!fir.ref<i32>>>) -> (), !fir.ref<tuple<!fir.ref<i32>>>) -> !fir.boxproc<() -> ()>
! CHECK: fir.call @_QPtest_proc_dummy_other(%[[VAL_8]]) {{.*}}: (!fir.boxproc<() -> ()>) -> ()
-! CHECK-LABEL: func @_QFtest_proc_dummyPtest_proc_dummy_a(
+! CHECK-LABEL: func private @_QFtest_proc_dummyPtest_proc_dummy_a(
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "j"},
-! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK: %[[VAL_2:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_3:.*]] = fir.coordinate_of %[[VAL_1]], %[[VAL_2]] : (!fir.ref<tuple<!fir.ref<i32>>>, i32) -> !fir.llvm_ptr<!fir.ref<i32>>
! CHECK: %[[VAL_4:.*]] = fir.load %[[VAL_3]] : !fir.llvm_ptr<!fir.ref<i32>>
@@ -525,10 +525,10 @@ end subroutine test_proc_dummy_other
! CHECK: return
! CHECK: }
-! CHECK-LABEL: func @_QFtest_proc_dummy_charPgen_message(
+! CHECK-LABEL: func private @_QFtest_proc_dummy_charPgen_message(
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.char<1,10>>,
! CHECK-SAME: %[[VAL_1:.*]]: index,
-! CHECK-SAME: %[[VAL_2:.*]]: !fir.ref<tuple<!fir.boxchar<1>>> {fir.host_assoc}) -> !fir.boxchar<1> attributes {fir.internal_proc} {
+! CHECK-SAME: %[[VAL_2:.*]]: !fir.ref<tuple<!fir.boxchar<1>>> {fir.host_assoc}) -> !fir.boxchar<1> attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : i32
! CHECK-DAG: %[[VAL_4:.*]] = arith.constant 10 : index
! CHECK-DAG: %[[VAL_5:.*]] = arith.constant false
diff --git a/flang/test/Lower/module-and-internal-proc.f90 b/flang/test/Lower/module-and-internal-proc.f90
index 1da5ce422939..0f4c6809581c 100644
--- a/flang/test/Lower/module-and-internal-proc.f90
+++ b/flang/test/Lower/module-and-internal-proc.f90
@@ -17,7 +17,7 @@ end subroutine
subroutine test2()
call test2internal()
contains
- ! CHECK-LABEL: func @_QMparentFtest2Ptest2internal()
+ ! CHECK-LABEL: func private @_QMparentFtest2Ptest2internal()
subroutine test2internal()
! CHECK: fir.address_of(@_QMparentEi) : !fir.ref<i32>
print *, i
@@ -31,7 +31,7 @@ subroutine test3()
use parent
call test3internal()
contains
- ! CHECK-LABEL: func @_QFtest3Ptest3internal()
+ ! CHECK-LABEL: func private @_QFtest3Ptest3internal()
subroutine test3internal()
! CHECK: fir.address_of(@_QMparentEi) : !fir.ref<i32>
print *, i
diff --git a/flang/test/Lower/parent-component.f90 b/flang/test/Lower/parent-component.f90
index ed1130a08493..c6bc53340643 100644
--- a/flang/test/Lower/parent-component.f90
+++ b/flang/test/Lower/parent-component.f90
@@ -29,20 +29,20 @@ contains
type(p), intent(in) :: a
print*, a
end subroutine
- ! CHECK-LABEL: func.func @_QFPprint_scalar(%{{.*}}: !fir.ref<!fir.type<_QFTp{a:i32}>> {fir.bindc_name = "a"})
+ ! CHECK-LABEL: func.func private @_QFPprint_scalar(%{{.*}}: !fir.ref<!fir.type<_QFTp{a:i32}>> {fir.bindc_name = "a"})
subroutine print_p(a)
type(p), intent(in) :: a(2)
print*, a
end subroutine
- ! CHECK-LABEL: func.func @_QFPprint_p(%{{.*}}: !fir.ref<!fir.array<2x!fir.type<_QFTp{a:i32}>>> {fir.bindc_name = "a"})
+ ! CHECK-LABEL: func.func private @_QFPprint_p(%{{.*}}: !fir.ref<!fir.array<2x!fir.type<_QFTp{a:i32}>>> {fir.bindc_name = "a"})
subroutine init_with_slice()
type(c) :: y(2) = [ c(11, 21), c(12, 22) ]
call print_p(y(:)%p)
print*,y(:)%p
end subroutine
- ! CHECK-LABEL: func.func @_QFPinit_with_slice()
+ ! CHECK-LABEL: func.func private @_QFPinit_with_slice()
! CHECK: %[[Y:.*]] = fir.address_of(@_QFFinit_with_sliceEy) : !fir.ref<!fir.array<2x!fir.type<_QFTc{a:i32,b:i32}>>>
! CHECK: %[[C2:.*]] = arith.constant 2 : index
! CHECK: %[[C1:.*]] = arith.constant 1 : index
@@ -78,7 +78,7 @@ contains
call print_p(y%p)
print*,y%p
end subroutine
- ! CHECK-LABEL: func.func @_QFPinit_no_slice()
+ ! CHECK-LABEL: func.func private @_QFPinit_no_slice()
! CHECK: %[[Y:.*]] = fir.address_of(@_QFFinit_no_sliceEy) : !fir.ref<!fir.array<2x!fir.type<_QFTc{a:i32,b:i32}>>>
! CHECK: %[[C2:.*]] = arith.constant 2 : index
! CHECK: %[[SHAPE:.*]] = fir.shape %[[C2]] : (index) -> !fir.shape<1>
@@ -106,7 +106,7 @@ contains
print*,y%p
end subroutine
- ! CHECK-LABEL: func.func @_QFPinit_allocatable()
+ ! CHECK-LABEL: func.func private @_QFPinit_allocatable()
! CHECK: %[[ALLOC:.*]] = fir.alloca !fir.heap<!fir.array<?x!fir.type<_QFTc{a:i32,b:i32}>>> {uniq_name = "_QFFinit_allocatableEy.addr"}
! CHECK: %[[LB0:.*]] = fir.alloca index {uniq_name = "_QFFinit_allocatableEy.lb0"}
! CHECK: %[[EXT0:.*]] = fir.alloca index {uniq_name = "_QFFinit_allocatableEy.ext0"}
@@ -139,7 +139,7 @@ contains
print*,s%p
end subroutine
- ! CHECK-LABEL: func.func @_QFPinit_scalar()
+ ! CHECK-LABEL: func.func private @_QFPinit_scalar()
! CHECK: %[[S:.*]] = fir.address_of(@_QFFinit_scalarEs) : !fir.ref<!fir.type<_QFTc{a:i32,b:i32}>>
! CHECK: %[[CAST:.*]] = fir.convert %[[S]] : (!fir.ref<!fir.type<_QFTc{a:i32,b:i32}>>) -> !fir.ref<!fir.type<_QFTp{a:i32}>>
! CHECK: fir.call @_QFPprint_scalar(%[[CAST]]) {{.*}}: (!fir.ref<!fir.type<_QFTp{a:i32}>>) -> ()
@@ -154,7 +154,7 @@ contains
print*,y%p
end subroutine
- ! CHECK-LABEL: func.func @_QFPinit_assumed(
+ ! CHECK-LABEL: func.func private @_QFPinit_assumed(
! CHECK-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?x!fir.type<_QFTc{a:i32,b:i32}>>
! CHECK: %[[BOX:.*]] = fir.rebox %[[ARG0]] : (!fir.box<!fir.array<?x!fir.type<_QFTc{a:i32,b:i32}>>>) -> !fir.box<!fir.array<?x!fir.type<_QFTp{a:i32}>>>
@@ -167,7 +167,7 @@ contains
call print_p(y%c%p)
end subroutine
- ! CHECK-LABEL: func.func @_QFPinit_existing_field
+ ! CHECK-LABEL: func.func private @_QFPinit_existing_field
! CHECK: %[[C2:.*]] = arith.constant 2 : index
! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<2x!fir.type<_QFTz{k:i32,c:!fir.type<_QFTc{a:i32,b:i32}>}>> {bindc_name = "y", uniq_name = "_QFFinit_existing_fieldEy"}
! CHECK: %[[FIELD_C:.*]] = fir.field_index c, !fir.type<_QFTz{k:i32,c:!fir.type<_QFTc{a:i32,b:i32}>}>
@@ -183,7 +183,7 @@ contains
a%p = B
end subroutine
-! CHECK-LABEL: func.func @_QFPparent_comp_lhs()
+! CHECK-LABEL: func.func private @_QFPparent_comp_lhs()
! CHECK: %[[BOX:.*]] = fir.alloca !fir.box<!fir.type<_QFTp{a:i32}>>
! CHECK: %[[A:.*]] = fir.alloca !fir.type<_QFTc{a:i32,b:i32}> {bindc_name = "a", uniq_name = "_QFFparent_comp_lhsEa"}
! CHECK: %[[B:.*]] = fir.alloca !fir.type<_QFTp{a:i32}> {bindc_name = "b", uniq_name = "_QFFparent_comp_lhsEb"}
diff --git a/flang/test/Lower/polymorphic.f90 b/flang/test/Lower/polymorphic.f90
index a813eff690b7..15d8a86e4ef4 100644
--- a/flang/test/Lower/polymorphic.f90
+++ b/flang/test/Lower/polymorphic.f90
@@ -519,8 +519,8 @@ module polymorphic_test
end subroutine
end subroutine
-! CHECK-LABEL: func.func @_QMpolymorphic_testFhost_assocPinternal(
-! CHECK-SAME: %[[TUPLE:.*]]: !fir.ref<tuple<!fir.class<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>>> {fir.host_assoc}) attributes {fir.internal_proc} {
+! CHECK-LABEL: func.func private @_QMpolymorphic_testFhost_assocPinternal(
+! CHECK-SAME: %[[TUPLE:.*]]: !fir.ref<tuple<!fir.class<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>>> {fir.host_assoc}) attributes {fir.internal_proc, llvm.linkage = #llvm.linkage<internal>} {
! CHECK: %[[POS_IN_TUPLE:.*]] = arith.constant 0 : i32
! CHECK: %[[COORD_OF_CLASS:.*]] = fir.coordinate_of %[[TUPLE]], %[[POS_IN_TUPLE]] : (!fir.ref<tuple<!fir.class<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>>>, i32) -> !fir.ref<!fir.class<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>>
! CHECK: %[[CLASS:.*]] = fir.load %[[COORD_OF_CLASS]] : !fir.ref<!fir.class<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>>
diff --git a/flang/test/Lower/program-units-fir-mangling.f90 b/flang/test/Lower/program-units-fir-mangling.f90
index 36631979141a..002343c45f6e 100644
--- a/flang/test/Lower/program-units-fir-mangling.f90
+++ b/flang/test/Lower/program-units-fir-mangling.f90
@@ -44,12 +44,12 @@ end module
function foo2()
real(4) :: foo2
contains
- ! CHECK-LABEL: func @_QFfoo2Psub() {
+ ! CHECK-LABEL: func private @_QFfoo2Psub() {{.*}} {
subroutine sub()
! CHECK: }
end subroutine
- ! CHECK-LABEL: func @_QFfoo2Pfoo() {
+ ! CHECK-LABEL: func private @_QFfoo2Pfoo() {{.*}} {
subroutine foo()
! CHECK: }
end subroutine
@@ -58,12 +58,12 @@ end function
! CHECK-LABEL: func @_QPsub2()
subroutine sUb2()
contains
- ! CHECK-LABEL: func @_QFsub2Psub() {
+ ! CHECK-LABEL: func private @_QFsub2Psub() {{.*}} {
subroutine sub()
! CHECK: }
end subroutine
- ! CHECK-LABEL: func @_QFsub2Pfoo() {
+ ! CHECK-LABEL: func private @_QFsub2Pfoo() {{.*}} {
subroutine Foo()
! CHECK: }
end subroutine
@@ -74,7 +74,7 @@ contains
! CHECK-LABEL: func @_QMtestmod2Psub()
subroutine sub()
contains
- ! CHECK-LABEL: func @_QMtestmod2FsubPsubsub() {
+ ! CHECK-LABEL: func private @_QMtestmod2FsubPsubsub() {{.*}} {
subroutine subSub()
! CHECK: }
end subroutine
@@ -105,7 +105,7 @@ contains
! CHECK-LABEL: func @_QMcolor_pointsScolor_points_aSimplPfoo()
subroutine foo
contains
- ! CHECK-LABEL: func @_QMcolor_pointsScolor_points_aSimplFfooPbar() {
+ ! CHECK-LABEL: func private @_QMcolor_pointsScolor_points_aSimplFfooPbar() {{.*}} {
subroutine bar
! CHECK: }
end subroutine
@@ -128,7 +128,7 @@ end subroutine
program test
! CHECK: }
contains
-! CHECK-LABEL: func @_QFPshould_not_collide() {
+! CHECK-LABEL: func private @_QFPshould_not_collide() {{.*}} {
subroutine should_not_collide()
! CHECK: }
end subroutine
@@ -226,7 +226,7 @@ subroutine nest1
! CHECK: fir.call @_QFnest1Pinner()
call inner
contains
- ! CHECK-LABEL: func @_QFnest1Pinner
+ ! CHECK-LABEL: func private @_QFnest1Pinner
subroutine inner
! CHECK: %[[V_0:[0-9]+]] = fir.address_of(@_QFnest1FinnerEkk) : !fir.ref<i32>
integer, save :: kk = 1
@@ -239,7 +239,7 @@ subroutine nest2
! CHECK: fir.call @_QFnest2Pinner()
call inner
contains
- ! CHECK-LABEL: func @_QFnest2Pinner
+ ! CHECK-LABEL: func private @_QFnest2Pinner
subroutine inner
! CHECK: %[[V_0:[0-9]+]] = fir.address_of(@_QFnest2FinnerEkk) : !fir.ref<i32>
integer, save :: kk = 77
diff --git a/flang/test/Parser/assume-aligned.f90 b/flang/test/Parser/assume-aligned.f90
new file mode 100644
index 000000000000..c61c10d61d72
--- /dev/null
+++ b/flang/test/Parser/assume-aligned.f90
@@ -0,0 +1,57 @@
+! RUN: %flang_fc1 -fdebug-unparse-no-sema %s 2>&1 | FileCheck %s
+
+SUBROUTINE aa(a, nn)
+ IMPLICIT NONE
+ INTEGER, INTENT(IN) :: nn
+ COMPLEX(8), INTENT(INOUT), DIMENSION(1:nn) :: a
+ INTEGER :: i
+ !DIR$ assume_aligned a:16
+!CHECK: !DIR$ ASSUME_ALIGNED a:16
+ !DIR$ assume_aligned a (1):16
+!CHECK: !DIR$ ASSUME_ALIGNED a(1):16
+ !DIR$ assume_aligned a(1):16
+!CHECK: !DIR$ ASSUME_ALIGNED a(1):16
+ !DIR$ assume_aligned a(nn):16
+!CHECK: !DIR$ ASSUME_ALIGNED a(nn):16
+ !DIR$ assume_aligned a(44):16
+!CHECK: !DIR$ ASSUME_ALIGNED a(44):16
+ DO i=1,nn
+ a(i)=a(i)+1.5
+ END DO
+END SUBROUTINE aa
+
+SUBROUTINE bb(v, s, e)
+ IMPLICIT NONE
+ INTEGER, INTENT(IN) :: s(3), e(3)
+ INTEGER :: y,z
+ REAL(8), INTENT(IN) :: v(s(1):e(1),s(2):e(2),s(3):e(3))
+ !DIR$ assume_aligned v(s(1),y,z) :64
+!CHECK: !DIR$ ASSUME_ALIGNED v(s(1),y,z):64
+END SUBROUTINE bb
+
+SUBROUTINE f(n)
+ IMPLICIT NONE
+ TYPE node
+ REAL(KIND=8), POINTER :: a(:,:)
+ END TYPE NODE
+
+ TYPE(NODE), POINTER :: nodes
+ INTEGER :: i
+ INTEGER, INTENT(IN) :: n
+
+ ALLOCATE(nodes)
+ ALLOCATE(nodes%a(1000,1000))
+
+ !DIR$ ASSUME_ALIGNED nodes%a(1,1) : 16
+!CHECK: !DIR$ ASSUME_ALIGNED nodes%a(1,1):16
+ DO i=1,n
+ nodes%a(1,i) = nodes%a(1,i)+1
+ END DO
+END SUBROUTINE f
+
+SUBROUTINE g(a, b)
+ IMPLICIT NONE
+ INTEGER, INTENT(in) :: a(128), b(128)
+ !DIR$ ASSUME_ALIGNED a:32, b:64
+!CHECK: !DIR$ ASSUME_ALIGNED a:32, b:64
+END SUBROUTINE g
diff --git a/flang/test/Semantics/Inputs/dir1/modfile63a.mod b/flang/test/Semantics/Inputs/dir1/modfile63a.mod
new file mode 100644
index 000000000000..acaa125819b3
--- /dev/null
+++ b/flang/test/Semantics/Inputs/dir1/modfile63a.mod
@@ -0,0 +1,6 @@
+!mod$ v1 sum:cbe36d213d935559
+module modfile63a
+contains
+subroutine s1()
+end
+end
diff --git a/flang/test/Semantics/Inputs/dir1/modfile63b.mod b/flang/test/Semantics/Inputs/dir1/modfile63b.mod
new file mode 100644
index 000000000000..af5fec9e69bf
--- /dev/null
+++ b/flang/test/Semantics/Inputs/dir1/modfile63b.mod
@@ -0,0 +1,8 @@
+!mod$ v1 sum:ddea620dc2aa0520
+!need$ cbe36d213d935559 n modfile63a
+module modfile63b
+use modfile63a,only:s1
+contains
+subroutine s2()
+end
+end
diff --git a/flang/test/Semantics/Inputs/dir2/modfile63a.mod b/flang/test/Semantics/Inputs/dir2/modfile63a.mod
new file mode 100644
index 000000000000..8236d36c5758
--- /dev/null
+++ b/flang/test/Semantics/Inputs/dir2/modfile63a.mod
@@ -0,0 +1,6 @@
+!mod$ v1 sum:00761f8b3a4c5780
+module modfile63a
+contains
+subroutine s1a()
+end
+end
diff --git a/flang/test/Semantics/Inputs/dir2/modfile63b.mod b/flang/test/Semantics/Inputs/dir2/modfile63b.mod
new file mode 100644
index 000000000000..af5fec9e69bf
--- /dev/null
+++ b/flang/test/Semantics/Inputs/dir2/modfile63b.mod
@@ -0,0 +1,8 @@
+!mod$ v1 sum:ddea620dc2aa0520
+!need$ cbe36d213d935559 n modfile63a
+module modfile63b
+use modfile63a,only:s1
+contains
+subroutine s2()
+end
+end
diff --git a/flang/test/Semantics/assign04.f90 b/flang/test/Semantics/assign04.f90
index a00ca5213a7a..14d90a8d5a22 100644
--- a/flang/test/Semantics/assign04.f90
+++ b/flang/test/Semantics/assign04.f90
@@ -105,6 +105,13 @@ subroutine s6(x)
x(:) = [1, 2, 3]
!ERROR: Whole assumed-size array 'x' may not appear here without subscripts
x = [1, 2, 3]
+ associate (y => x) ! ok
+ !ERROR: Whole assumed-size array 'y' may not appear here without subscripts
+ y = [1, 2, 3]
+ end associate
+ !ERROR: Whole assumed-size array 'x' may not appear here without subscripts
+ associate (y => (x))
+ end associate
end
module m7
diff --git a/flang/test/Semantics/bind-c03.f90 b/flang/test/Semantics/bind-c03.f90
index 03a544b1954d..65d52e964ca4 100644
--- a/flang/test/Semantics/bind-c03.f90
+++ b/flang/test/Semantics/bind-c03.f90
@@ -13,7 +13,13 @@ module m
end
end interface
+ interface proc3
+ subroutine proc3() bind(c)
+ end
+ end interface
+
procedure(proc1), bind(c) :: pc1 ! no error
+ procedure(proc3), bind(c) :: pc4 ! no error
!ERROR: An interface name with BIND attribute must be specified if the BIND attribute is specified in a procedure declaration statement
procedure(proc2), bind(c) :: pc2
diff --git a/flang/test/Semantics/bind-c04.f90 b/flang/test/Semantics/bind-c04.f90
index a4aaffb239fd..27119e375ce0 100644
--- a/flang/test/Semantics/bind-c04.f90
+++ b/flang/test/Semantics/bind-c04.f90
@@ -19,7 +19,7 @@ subroutine sub(x, y)
end
end interface
- !Acceptable (as an extension)
+ !ERROR: A procedure declaration statement with a binding name may not declare multiple procedures
procedure(proc), bind(c, name="aaa") :: pc1, pc2
!ERROR: A procedure pointer may not have a BIND attribute with a name
diff --git a/flang/test/Semantics/call03.f90 b/flang/test/Semantics/call03.f90
index 2aca8de93acb..bb7a8cb05c5a 100644
--- a/flang/test/Semantics/call03.f90
+++ b/flang/test/Semantics/call03.f90
@@ -357,19 +357,19 @@ module m01
call valueassumedsize(b(::2)) ! ok
call valueassumedsize(c(::2)) ! ok
call valueassumedsize(d(::2)) ! ok
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatileassumedsize(b(::2))
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatilecontiguous(b(::2))
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatileassumedsize(c(::2))
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatilecontiguous(c(::2))
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatileassumedsize(d(::2))
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatilecontiguous(d(::2))
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatilecontiguous(assumedrank)
end subroutine
@@ -388,18 +388,66 @@ module m01
call valueassumedsize(b) ! ok
call valueassumedsize(c) ! ok
call valueassumedsize(d) ! ok
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatileassumedsize(b)
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatilecontiguous(b)
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatileassumedsize(c)
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatilecontiguous(c)
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatileassumedsize(d)
- !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous dummy argument 'x='
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
call volatilecontiguous(d)
end subroutine
+ subroutine explicitAsyncContig(x)
+ real, asynchronous, intent(in out), contiguous :: x(:)
+ end
+ subroutine implicitAsyncContig(x)
+ real, intent(in out), contiguous :: x(:)
+ read(1,id=id,asynchronous="yes") x
+ end
+ subroutine test17explicit(x)
+ real, asynchronous, intent(in out) :: x(:)
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
+ call explicitAsyncContig(x)
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
+ call implicitAsyncContig(x)
+ end
+ subroutine test17implicit(x)
+ real, intent(in out) :: x(:)
+ read(1,id=id,asynchronous="yes") x
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
+ call explicitAsyncContig(x)
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
+ call implicitAsyncContig(x)
+ end
+ subroutine test17block(x)
+ real, intent(in out) :: x(:)
+ call explicitAsyncContig(x) ! ok
+ call implicitAsyncContig(x) ! ok
+ block
+ read(1,id=id,asynchronous="yes") x
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
+ call explicitAsyncContig(x)
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
+ call implicitAsyncContig(x)
+ end block
+ end
+ subroutine test17internal(x)
+ real, intent(in out) :: x(:)
+ call explicitAsyncContig(x) ! ok
+ call implicitAsyncContig(x) ! ok
+ contains
+ subroutine internal
+ read(1,id=id,asynchronous="yes") x
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
+ call explicitAsyncContig(x)
+ !ERROR: ASYNCHRONOUS or VOLATILE actual argument that is not simply contiguous may not be associated with a contiguous ASYNCHRONOUS or VOLATILE dummy argument 'x='
+ call implicitAsyncContig(x)
+ end
+ end
+
end module
diff --git a/flang/test/Semantics/data21.f90 b/flang/test/Semantics/data21.f90
new file mode 100644
index 000000000000..639f78440840
--- /dev/null
+++ b/flang/test/Semantics/data21.f90
@@ -0,0 +1,7 @@
+! RUN: %flang_fc1 -fdebug-dump-symbols %s 2>&1 | FileCheck %s
+! Ensure that DATA-like default component initializers work.
+! CHECK: j (InDataStmt) size=4 offset=0: ObjectEntity type: INTEGER(4) init:123_4
+type t
+ integer j/123/
+end type
+end
diff --git a/flang/test/Semantics/data22.f90 b/flang/test/Semantics/data22.f90
new file mode 100644
index 000000000000..365958dbe757
--- /dev/null
+++ b/flang/test/Semantics/data22.f90
@@ -0,0 +1,17 @@
+! RUN: %flang_fc1 -fdebug-dump-symbols %s 2>&1 | FileCheck %s
+! Ensure that implicitly typed DATA statement objects with derived
+! types get their symbols resolved by the end of the name resolution pass.
+! CHECK: x1 (Implicit, InDataStmt) size=4 offset=0: ObjectEntity type: TYPE(t1) shape: 1_8:1_8 init:[t1::t1(n=123_4)]
+! CHECK: x2 (InDataStmt) size=4 offset=4: ObjectEntity type: TYPE(t2) shape: 1_8:1_8 init:[t2::t2(m=456_4)]
+implicit type(t1)(x)
+type t1
+ integer n
+end type
+dimension x1(1), x2(1)
+data x1(1)%n /123/
+data x2(1)%m /456/
+type t2
+ integer m
+end type
+type(t2) x2
+end
diff --git a/flang/test/Semantics/getsymbols02.f90 b/flang/test/Semantics/getsymbols02.f90
index 25a4c30809fb..2605a593e814 100644
--- a/flang/test/Semantics/getsymbols02.f90
+++ b/flang/test/Semantics/getsymbols02.f90
@@ -11,4 +11,4 @@ ENDPROGRAM
! RUN: %flang_fc1 -fsyntax-only %S/Inputs/getsymbols02-b.f90
! RUN: %flang_fc1 -fget-symbols-sources %s 2>&1 | FileCheck %s
! CHECK: callget5: .{{[/\\]}}mm2b.mod,
-! CHECK: get5: .{{[/\\]}}mm2a.mod,
+! CHECK: get5: .{{[/\\]}}.{{[/\\]}}mm2a.mod,
diff --git a/flang/test/Semantics/implicit14.f90 b/flang/test/Semantics/implicit14.f90
new file mode 100644
index 000000000000..d688049a587f
--- /dev/null
+++ b/flang/test/Semantics/implicit14.f90
@@ -0,0 +1,54 @@
+! RUN: %python %S/test_errors.py %s %flang_fc1
+module m
+ type dt
+ procedure(explicit), pointer, nopass :: p
+ end type
+ contains
+ integer function one()
+ one = 1
+ end
+ function onePtr()
+ procedure(one), pointer :: onePtr
+ onePtr => one
+ end
+ function explicit
+ character(:), allocatable :: explicit
+ explicit = "abc"
+ end
+end
+
+program test
+ use m
+ procedure(), pointer :: p0
+ procedure(one), pointer :: p1
+ procedure(integer), pointer :: p2
+ procedure(explicit), pointer :: p3
+ external implicit
+ type(dt) x
+ p0 => one ! ok
+ p0 => onePtr() ! ok
+ p0 => implicit ! ok
+ !ERROR: Procedure pointer 'p0' with implicit interface may not be associated with procedure designator 'explicit' with explicit interface that cannot be called via an implicit interface
+ p0 => explicit
+ p1 => one ! ok
+ p1 => onePtr() ! ok
+ p1 => implicit ! ok
+ !ERROR: Function pointer 'p1' associated with incompatible function designator 'explicit': function results have incompatible attributes
+ p1 => explicit
+ p2 => one ! ok
+ p2 => onePtr() ! ok
+ p2 => implicit ! ok
+ !ERROR: Function pointer 'p2' associated with incompatible function designator 'explicit': function results have incompatible attributes
+ p2 => explicit
+ !ERROR: Function pointer 'p3' associated with incompatible function designator 'one': function results have incompatible attributes
+ p3 => one
+ !ERROR: Procedure pointer 'p3' associated with result of reference to function 'oneptr' that is an incompatible procedure pointer: function results have incompatible attributes
+ p3 => onePtr()
+ p3 => explicit ! ok
+ !ERROR: Procedure pointer 'p3' with explicit interface that cannot be called via an implicit interface cannot be associated with procedure designator with an implicit interface
+ p3 => implicit
+ !ERROR: Procedure pointer 'p' with explicit interface that cannot be called via an implicit interface cannot be associated with procedure designator with an implicit interface
+ x = dt(implicit)
+ !ERROR: Procedure pointer 'p' with explicit interface that cannot be called via an implicit interface cannot be associated with procedure designator with an implicit interface
+ x%p => implicit
+end
diff --git a/flang/test/Semantics/intrinsics03.f90 b/flang/test/Semantics/intrinsics03.f90
new file mode 100644
index 000000000000..03109bc300ca
--- /dev/null
+++ b/flang/test/Semantics/intrinsics03.f90
@@ -0,0 +1,125 @@
+! RUN: %python %S/test_errors.py %s %flang_fc1
+! Ensure that INDEX is a usable specific intrinsic procedure.
+
+program test
+ interface
+ pure integer function index1(string, substring)
+ character(*), intent(in) :: string, substring ! ok
+ end
+ pure integer function index2(x1, x2)
+ character(*), intent(in) :: x1, x2 ! ok
+ end
+ pure integer function index3(string, substring)
+ character, intent(in) :: string, substring ! not assumed length
+ end
+ pure integer function index4(string, substring, back)
+ character(*), intent(in) :: string, substring
+ logical, optional, intent(in) :: back ! not ok
+ end
+ subroutine s0(ix)
+ procedure(index) :: ix
+ end
+ subroutine s1(ix)
+ import index1
+ procedure(index1) :: ix
+ end
+ subroutine s2(ix)
+ import index2
+ procedure(index2) :: ix
+ end
+ subroutine s3(ix)
+ import index3
+ procedure(index3) :: ix
+ end
+ subroutine s4(ix)
+ import index4
+ procedure(index4) :: ix
+ end
+ end interface
+
+ procedure(index), pointer :: p0
+ procedure(index1), pointer :: p1
+ procedure(index2), pointer :: p2
+ procedure(index3), pointer :: p3
+ procedure(index4), pointer :: p4
+
+ p0 => index ! ok
+ p0 => index1 ! ok
+ p0 => index2 ! ok
+ !ERROR: Procedure pointer 'p0' associated with incompatible procedure designator 'index3': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ p0 => index3
+ !ERROR: Procedure pointer 'p0' associated with incompatible procedure designator 'index4': distinct numbers of dummy arguments
+ p0 => index4
+ p1 => index ! ok
+ p1 => index1 ! ok
+ p1 => index2 ! ok
+ !ERROR: Procedure pointer 'p1' associated with incompatible procedure designator 'index3': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ p1 => index3
+ !ERROR: Procedure pointer 'p1' associated with incompatible procedure designator 'index4': distinct numbers of dummy arguments
+ p1 => index4
+ p2 => index ! ok
+ p2 => index1 ! ok
+ p2 => index2 ! ok
+ !ERROR: Procedure pointer 'p2' associated with incompatible procedure designator 'index3': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ p2 => index3
+ !ERROR: Procedure pointer 'p2' associated with incompatible procedure designator 'index4': distinct numbers of dummy arguments
+ p2 => index4
+ !ERROR: Procedure pointer 'p3' associated with incompatible procedure designator 'index': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ p3 => index
+ !ERROR: Procedure pointer 'p3' associated with incompatible procedure designator 'index1': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ p3 => index1
+ !ERROR: Procedure pointer 'p3' associated with incompatible procedure designator 'index2': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ p3 => index2
+ p3 => index3 ! ok
+ !ERROR: Procedure pointer 'p3' associated with incompatible procedure designator 'index4': distinct numbers of dummy arguments
+ p3 => index4
+ !ERROR: Procedure pointer 'p4' associated with incompatible procedure designator 'index': distinct numbers of dummy arguments
+ p4 => index
+ !ERROR: Procedure pointer 'p4' associated with incompatible procedure designator 'index1': distinct numbers of dummy arguments
+ p4 => index1
+ !ERROR: Procedure pointer 'p4' associated with incompatible procedure designator 'index2': distinct numbers of dummy arguments
+ p4 => index2
+ !ERROR: Procedure pointer 'p4' associated with incompatible procedure designator 'index3': distinct numbers of dummy arguments
+ p4 => index3
+ p4 => index4 ! ok
+
+ call s0(index) ! ok
+ call s0(index1) ! ok
+ call s0(index2)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ call s0(index3)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': distinct numbers of dummy arguments
+ call s0(index4)
+ call s1(index) ! ok
+ call s1(index1) ! ok
+ call s1(index2) ! ok
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ call s1(index3)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': distinct numbers of dummy arguments
+ call s1(index4)
+ call s2(index) ! ok
+ call s2(index1) ! ok
+ call s2(index2) ! ok
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ call s2(index3)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': distinct numbers of dummy arguments
+ call s2(index4)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ call s3(index)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ call s3(index1)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': incompatible dummy argument #1: assumed-length character vs explicit-length character
+ call s3(index2)
+ call s3(index3) ! ok
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': distinct numbers of dummy arguments
+ call s3(index4)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': distinct numbers of dummy arguments
+ call s4(index)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': distinct numbers of dummy arguments
+ call s4(index1)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': distinct numbers of dummy arguments
+ call s4(index2)
+ !ERROR: Actual procedure argument has interface incompatible with dummy argument 'ix=': distinct numbers of dummy arguments
+ call s4(index3)
+ call s4(index4) ! ok
+end
diff --git a/flang/test/Semantics/modfile63.f90 b/flang/test/Semantics/modfile63.f90
new file mode 100644
index 000000000000..aaf1f7beaa48
--- /dev/null
+++ b/flang/test/Semantics/modfile63.f90
@@ -0,0 +1,19 @@
+! RUN: %flang_fc1 -fsyntax-only -I%S/Inputs/dir1 %s
+! RUN: not %flang_fc1 -fsyntax-only -I%S/Inputs/dir2 %s 2>&1 | FileCheck --check-prefix=ERROR %s
+! RUN: %flang_fc1 -Werror -fsyntax-only -I%S/Inputs/dir1 -I%S/Inputs/dir2 %s
+! RUN: not %flang_fc1 -Werror -fsyntax-only -I%S/Inputs/dir2 -I%S/Inputs/dir1 %s 2>&1 | FileCheck --check-prefix=WARNING %s
+
+! Inputs/dir1 and Inputs/dir2 each have identical copies of modfile63b.mod.
+! modfile63b.mod depends on Inputs/dir1/modfile63a.mod - the version in
+! Inputs/dir2/modfile63a.mod has a distinct checksum and should be
+! ignored with a warning.
+
+! If it becomes necessary to recompile those modules, just use the
+! module files as Fortran source.
+
+use modfile63b
+call s2
+end
+
+! ERROR: Could not find a module file for 'modfile63a' in the module search path with the expected checksum
+! WARNING: Module file for 'modfile63a' appears later in the module search path than conflicting modules with different checksums
diff --git a/flang/test/Semantics/resolve61.f90 b/flang/test/Semantics/resolve61.f90
index d6499f07b860..32bf9091a856 100644
--- a/flang/test/Semantics/resolve61.f90
+++ b/flang/test/Semantics/resolve61.f90
@@ -114,7 +114,7 @@ subroutine p12
type(t2) :: x2
type(t3) :: x3
pointer(a, x1)
- !ERROR: Type of Cray pointee 'x2' is a derived type that is neither SEQUENCE nor BIND(C)
+ !WARNING: Type of Cray pointee 'x2' is a derived type that is neither SEQUENCE nor BIND(C)
pointer(b, x2)
pointer(c, x3)
end
diff --git a/flang/test/Semantics/resolve91.f90 b/flang/test/Semantics/resolve91.f90
index 9873c5a351a4..2b0c4b6aa57e 100644
--- a/flang/test/Semantics/resolve91.f90
+++ b/flang/test/Semantics/resolve91.f90
@@ -4,7 +4,7 @@ module m
procedure(real), pointer :: p
!ERROR: EXTERNAL attribute was already specified on 'p'
!ERROR: POINTER attribute was already specified on 'p'
- !ERROR: The interface for procedure 'p' has already been declared
+ !ERROR: The type of 'p' has already been declared
procedure(integer), pointer :: p
end
@@ -82,3 +82,10 @@ module m8
!ERROR: The type of 'pvar' has already been declared
integer, pointer :: pVar => kVar
end module m8
+
+module m9
+ integer :: p, q
+ procedure() p ! ok
+ !ERROR: The type of 'q' has already been declared
+ procedure(real) q
+end module m9
diff --git a/flang/test/Semantics/separate-mp02.f90 b/flang/test/Semantics/separate-mp02.f90
index 5d13b6b693c8..c63ab6f41a13 100644
--- a/flang/test/Semantics/separate-mp02.f90
+++ b/flang/test/Semantics/separate-mp02.f90
@@ -148,6 +148,8 @@ module m2b
end
module subroutine s6() bind(c)
end
+ module subroutine s7() bind(c, name="s7")
+ end
end interface
end
@@ -172,6 +174,8 @@ contains
!ERROR: Module subprogram 's6' has binding label 'not_s6' but the corresponding interface body has 's6'
module subroutine s6() bind(c, name="not_s6")
end
+ module procedure s7
+ end
end
diff --git a/flang/test/Semantics/test_modfile.py b/flang/test/Semantics/test_modfile.py
index 87bd7dd0b55b..0e7806f27aa9 100755
--- a/flang/test/Semantics/test_modfile.py
+++ b/flang/test/Semantics/test_modfile.py
@@ -65,7 +65,7 @@ with tempfile.TemporaryDirectory() as tmpdir:
sys.exit(1)
with open(mod, "r", encoding="utf-8", errors="strict") as f:
for line in f:
- if "!mod$" in line:
+ if "!mod$" in line or "!need$" in line:
continue
actual += line
diff --git a/flang/test/Semantics/typed-subr.f90 b/flang/test/Semantics/typed-subr.f90
new file mode 100644
index 000000000000..c6637c95bdfd
--- /dev/null
+++ b/flang/test/Semantics/typed-subr.f90
@@ -0,0 +1,4 @@
+! RUN: %python %S/test_errors.py %s %flang_fc1
+! ERROR: SUBROUTINE prefix cannot specify a type
+integer subroutine foo
+end
diff --git a/flang/unittests/Runtime/NumericalFormatTest.cpp b/flang/unittests/Runtime/NumericalFormatTest.cpp
index 03a2be3ca56d..37eecd7708a1 100644
--- a/flang/unittests/Runtime/NumericalFormatTest.cpp
+++ b/flang/unittests/Runtime/NumericalFormatTest.cpp
@@ -916,7 +916,7 @@ TEST(IOApiTests, EditDoubleInputValues) {
{"(RU,F7.0)", "-1.e999", 0xffefffffffffffff, 0}, // -HUGE()
{"(E9.1)", " 1.0E-325", 0x0, 0},
{"(RU,E9.1)", " 1.0E-325", 0x1, 0},
- {"(E9.1)", "-1.0E-325", 0x0, 0},
+ {"(E9.1)", "-1.0E-325", 0x8000000000000000, 0},
{"(RD,E9.1)", "-1.0E-325", 0x8000000000000001, 0},
};
for (auto const &[format, data, want, iostat] : testCases) {
diff --git a/libc/.clang-tidy b/libc/.clang-tidy
index 5adada9a3f59..dbde88928ee6 100644
--- a/libc/.clang-tidy
+++ b/libc/.clang-tidy
@@ -26,5 +26,7 @@ CheckOptions:
value: UPPER_CASE
- key: readability-identifier-naming.ConstexprVariableCase
value: UPPER_CASE
+ - key: readability-identifier-naming.ConstexprFunctionCase
+ value: lower_case
- key: readability-identifier-naming.GetConfigPerFile
value: true
diff --git a/libc/CMakeLists.txt b/libc/CMakeLists.txt
index 75fcc91757b8..0b72b1c54816 100644
--- a/libc/CMakeLists.txt
+++ b/libc/CMakeLists.txt
@@ -240,7 +240,7 @@ endif()
if(LIBC_TARGET_TRIPLE)
set(LIBC_INSTALL_LIBRARY_DIR lib${LLVM_LIBDIR_SUFFIX}/${LIBC_TARGET_TRIPLE})
-elseif(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT LIBC_GPU_BUILD)
+elseif(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR)
set(LIBC_INSTALL_LIBRARY_DIR
lib${LLVM_LIBDIR_SUFFIX}/${LLVM_DEFAULT_TARGET_TRIPLE})
else()
diff --git a/libc/benchmarks/automemcpy/lib/CMakeLists.txt b/libc/benchmarks/automemcpy/lib/CMakeLists.txt
index 0c7d399d4023..e66b9045b607 100644
--- a/libc/benchmarks/automemcpy/lib/CMakeLists.txt
+++ b/libc/benchmarks/automemcpy/lib/CMakeLists.txt
@@ -18,7 +18,8 @@ add_custom_command(
add_library(automemcpy_implementations "${Implementations}")
target_link_libraries(automemcpy_implementations PUBLIC LLVMSupport libc-memory-benchmark)
-target_include_directories(automemcpy_implementations PRIVATE ${LIBC_SOURCE_DIR} ${LIBC_AUTOMEMCPY_INCLUDE_DIR})
+target_include_directories(automemcpy_implementations PRIVATE
+ ${LIBC_SOURCE_DIR} ${LIBC_AUTOMEMCPY_INCLUDE_DIR})
target_compile_options(automemcpy_implementations PRIVATE ${LIBC_COMPILE_OPTIONS_NATIVE} "SHELL:-mllvm -combiner-global-alias-analysis" -fno-builtin)
llvm_update_compile_flags(automemcpy_implementations)
diff --git a/libc/cmake/modules/CheckCompilerFeatures.cmake b/libc/cmake/modules/CheckCompilerFeatures.cmake
index c3f50df1dda5..3a9e1e3b1cf8 100644
--- a/libc/cmake/modules/CheckCompilerFeatures.cmake
+++ b/libc/cmake/modules/CheckCompilerFeatures.cmake
@@ -55,7 +55,7 @@ foreach(feature IN LISTS ALL_COMPILER_FEATURES)
if(has_feature)
list(APPEND AVAILABLE_COMPILER_FEATURES ${feature})
if(${feature} STREQUAL "float128")
- set(LIBC_COMPILER_HAS_FLOAT128 TRUE)
+ set(LIBC_TYPES_HAS_FLOAT128 TRUE)
elseif(${feature} STREQUAL "fixed_point")
set(LIBC_COMPILER_HAS_FIXED_POINT TRUE)
endif()
diff --git a/libc/cmake/modules/LLVMLibCArchitectures.cmake b/libc/cmake/modules/LLVMLibCArchitectures.cmake
index 0dbc59ad643a..dacb4db75d33 100644
--- a/libc/cmake/modules/LLVMLibCArchitectures.cmake
+++ b/libc/cmake/modules/LLVMLibCArchitectures.cmake
@@ -167,13 +167,14 @@ if(LIBC_TARGET_OS STREQUAL "baremetal")
set(LIBC_TARGET_OS_IS_BAREMETAL TRUE)
elseif(LIBC_TARGET_OS STREQUAL "linux")
set(LIBC_TARGET_OS_IS_LINUX TRUE)
-elseif(LIBC_TARGET_OS STREQUAL "poky" OR LIBC_TARGET_OS STREQUAL "suse")
- # poky are ustom Linux-base systems created by yocto. Since these are Linux
+elseif(LIBC_TARGET_OS STREQUAL "poky" OR LIBC_TARGET_OS STREQUAL "suse" OR
+ LIBC_TARGET_OS STREQUAL "redhat")
+ # poky are custom Linux-base systems created by yocto. Since these are Linux
# images, we change the LIBC_TARGET_OS to linux. This define is used to
# include the right directories during compilation.
#
- # openSUSE uses different triple format which causes LIBC_TARGET_OS to be
- # computed as "suse" instead of "linux".
+ # openSUSE and redhat use different triple format which causes LIBC_TARGET_OS
+ # to be computed as "suse" or "redhat" instead of "linux".
set(LIBC_TARGET_OS_IS_LINUX TRUE)
set(LIBC_TARGET_OS "linux")
elseif(LIBC_TARGET_OS STREQUAL "darwin")
diff --git a/libc/cmake/modules/compiler_features/check_float128.cpp b/libc/cmake/modules/compiler_features/check_float128.cpp
index 8b1e3fe04ed4..64fa9f97dff3 100644
--- a/libc/cmake/modules/compiler_features/check_float128.cpp
+++ b/libc/cmake/modules/compiler_features/check_float128.cpp
@@ -1,5 +1,5 @@
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
-#ifndef LIBC_COMPILER_HAS_FLOAT128
+#ifndef LIBC_TYPES_HAS_FLOAT128
#error unsupported
#endif
diff --git a/libc/config/baremetal/api.td b/libc/config/baremetal/api.td
index d6897fbecaac..3da83d9eb30c 100644
--- a/libc/config/baremetal/api.td
+++ b/libc/config/baremetal/api.td
@@ -2,6 +2,42 @@ include "config/public_api.td"
include "spec/stdc.td"
+def AssertMacro : MacroDef<"assert"> {
+ let Defn = [{
+ #undef assert
+
+ #ifdef NDEBUG
+ #define assert(e) (void)0
+ #else
+
+ #ifdef __cplusplus
+ extern "C"
+ #endif
+ _Noreturn void __assert_fail(const char *, const char *, unsigned, const char *) __NOEXCEPT;
+
+ #define assert(e) \
+ ((e) ? (void)0 : __assert_fail(#e, __FILE__, __LINE__, __PRETTY_FUNCTION__))
+
+ #endif
+ }];
+}
+
+def StaticAssertMacro : MacroDef<"static_assert"> {
+ let Defn = [{
+ #ifndef __cplusplus
+ #undef static_assert
+ #define static_assert _Static_assert
+ #endif
+ }];
+}
+
+def AssertAPI : PublicAPI<"assert.h"> {
+ let Macros = [
+ AssertMacro,
+ StaticAssertMacro,
+ ];
+}
+
def CTypeAPI : PublicAPI<"ctype.h"> {
}
diff --git a/libc/config/baremetal/arm/entrypoints.txt b/libc/config/baremetal/arm/entrypoints.txt
index 608ac4603430..a61d9feac293 100644
--- a/libc/config/baremetal/arm/entrypoints.txt
+++ b/libc/config/baremetal/arm/entrypoints.txt
@@ -1,4 +1,7 @@
set(TARGET_LIBC_ENTRYPOINTS
+ # assert.h entrypoints
+ libc.src.assert.__assert_fail
+
# ctype.h entrypoints
libc.src.ctype.isalnum
libc.src.ctype.isalpha
diff --git a/libc/config/baremetal/arm/headers.txt b/libc/config/baremetal/arm/headers.txt
index 38899fabd980..4c02ac84018d 100644
--- a/libc/config/baremetal/arm/headers.txt
+++ b/libc/config/baremetal/arm/headers.txt
@@ -1,4 +1,5 @@
set(TARGET_PUBLIC_HEADERS
+ libc.include.assert
libc.include.ctype
libc.include.fenv
libc.include.errno
diff --git a/libc/config/baremetal/riscv/entrypoints.txt b/libc/config/baremetal/riscv/entrypoints.txt
index 2f299e992be0..533f9f9f3685 100644
--- a/libc/config/baremetal/riscv/entrypoints.txt
+++ b/libc/config/baremetal/riscv/entrypoints.txt
@@ -1,4 +1,7 @@
set(TARGET_LIBC_ENTRYPOINTS
+ # assert.h entrypoints
+ libc.src.assert.__assert_fail
+
# ctype.h entrypoints
libc.src.ctype.isalnum
libc.src.ctype.isalpha
diff --git a/libc/config/baremetal/riscv/headers.txt b/libc/config/baremetal/riscv/headers.txt
index 38899fabd980..4c02ac84018d 100644
--- a/libc/config/baremetal/riscv/headers.txt
+++ b/libc/config/baremetal/riscv/headers.txt
@@ -1,4 +1,5 @@
set(TARGET_PUBLIC_HEADERS
+ libc.include.assert
libc.include.ctype
libc.include.fenv
libc.include.errno
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index a6dc74101dbc..06832a41221d 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -414,7 +414,7 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.truncl
)
-if(LIBC_COMPILER_HAS_FLOAT128)
+if(LIBC_TYPES_HAS_FLOAT128)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# math.h C23 _Float128 entrypoints
libc.src.math.ceilf128
diff --git a/libc/config/linux/riscv/entrypoints.txt b/libc/config/linux/riscv/entrypoints.txt
index fc4d8828f4c6..bf518083b51f 100644
--- a/libc/config/linux/riscv/entrypoints.txt
+++ b/libc/config/linux/riscv/entrypoints.txt
@@ -423,7 +423,7 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.truncl
)
-if(LIBC_COMPILER_HAS_FLOAT128)
+if(LIBC_TYPES_HAS_FLOAT128)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# math.h C23 _Float128 entrypoints
libc.src.math.ceilf128
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index c2300a2aa681..bc10512d942f 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -142,6 +142,11 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdbit.stdc_count_ones_ui
libc.src.stdbit.stdc_count_ones_ul
libc.src.stdbit.stdc_count_ones_ull
+ libc.src.stdbit.stdc_has_single_bit_uc
+ libc.src.stdbit.stdc_has_single_bit_us
+ libc.src.stdbit.stdc_has_single_bit_ui
+ libc.src.stdbit.stdc_has_single_bit_ul
+ libc.src.stdbit.stdc_has_single_bit_ull
# stdlib.h entrypoints
libc.src.stdlib.abs
@@ -437,7 +442,7 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.truncl
)
-if(LIBC_COMPILER_HAS_FLOAT128)
+if(LIBC_TYPES_HAS_FLOAT128)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# math.h C23 _Float128 entrypoints
libc.src.math.ceilf128
diff --git a/libc/docs/dev/code_style.rst b/libc/docs/dev/code_style.rst
index eeeced0359ad..e6fc6df5a0f6 100644
--- a/libc/docs/dev/code_style.rst
+++ b/libc/docs/dev/code_style.rst
@@ -47,8 +47,8 @@ We define two kinds of macros:
e.g., ``LIBC_COMPILER_IS_CLANG``.
* ``cpu_features.h`` - Target cpu feature availability.
e.g., ``LIBC_TARGET_CPU_HAS_AVX2``.
- * ``float.h`` - Floating point type properties and availability.
- e.g., ``LIBC_COMPILER_HAS_FLOAT128``.
+ * ``types.h`` - Type properties and availability.
+ e.g., ``LIBC_TYPES_HAS_FLOAT128``.
* ``os.h`` - Target os properties.
e.g., ``LIBC_TARGET_OS_IS_LINUX``.
diff --git a/libc/docs/dev/undefined_behavior.rst b/libc/docs/dev/undefined_behavior.rst
index 0cb25c7f2a23..6e73a305e8e0 100644
--- a/libc/docs/dev/undefined_behavior.rst
+++ b/libc/docs/dev/undefined_behavior.rst
@@ -20,6 +20,7 @@ guidelines and the resulting code should behave predictably even in unexpected
situations.
#. Follow the standards.
+ #. If there is no standard, first ask yourself if this implementation is necessary (are there users who need this functionality?). If it truly is, then match existing implementations. Creating competing designs just causes confusion (see the history of qsort_r).
#. Avoid giving an incorrect answer.
#. In general, correct answer > correct answer (wrong format) > no answer > crash the program >>>>>>> incorrect answer.
#. The C library is called frequently in performance critical situations, and so can't afford to do thorough error checking and correction.
@@ -61,7 +62,7 @@ Often the standard will imply an intended behavior through what it states is und
Ignoring Bug-For-Bug Compatibility
----------------------------------
-Any long running implementations will have bugs and deviations from the standard. Hyrum's Law states that “all observable behaviors of your system will be depended on by somebody” which includes these bugs. An example of a long-standing bug is glibc's scanf float parsing behavior. The behavior is specifically defined in the standard, but it isn't adhered to by all libc implementations. There is a longstanding bug in glibc where it incorrectly parses the string 100er and this caused the C standard to add that specific example to the definition for scanf. The intended behavior is for scanf, when parsing a float, to parse the longest possibly valid prefix and then accept it if and only if that complete parsed value is a float. In the case of 100er the longest possibly valid prefix is 100e but the float parsed from that string is only 100. Since there is no number after the e it shouldn't be included in the float, so scanf should return a parsing error. For LLVM's libc it was decided to follow the standard, even though glibc's version is slightly simpler to implement and this edge case is rare. Following the standard must be the first priority, since that's the goal of the library.
+Any long running implementations will have bugs and deviations from the standard. Hyrum's Law states that “all observable behaviors of your system will be depended on by somebody” which includes these bugs. An example of a long-standing bug is glibc's scanf float parsing behavior. The behavior is specifically defined in the standard, but it isn't adhered to by all libc implementations. There is a longstanding bug in glibc where it incorrectly parses the string 100er and this caused the C standard to add that specific example to the definition for scanf. The intended behavior is for scanf, when parsing a float, to parse the longest possibly valid prefix and then accept it if and only if that complete parsed value is a float. In the case of 100er the longest possibly valid prefix is 100e but the float parsed from that string is only 100. Since there is no number after the e it shouldn't be included in the float, so scanf should return a parsing error. For LLVM's libc it was decided to follow the standard, even though glibc's version is slightly simpler to implement and this edge case is rare. Following the standard must be the first priority, since that's the goal of the library. If there is no standard, then matching another implementation (even bug-for-bug) may be necessary, but before you implement an unstandardized function first consider if anyone will actually use it at all.
Design Decisions
================
diff --git a/libc/docs/stdbit.rst b/libc/docs/stdbit.rst
index 0308caeb9293..b579e9dbbc2f 100644
--- a/libc/docs/stdbit.rst
+++ b/libc/docs/stdbit.rst
@@ -81,11 +81,11 @@ stdc_count_ones_us |check|
stdc_count_ones_ui |check|
stdc_count_ones_ul |check|
stdc_count_ones_ull |check|
-stdc_has_single_bit_uc
-stdc_has_single_bit_us
-stdc_has_single_bit_ui
-stdc_has_single_bit_ul
-stdc_has_single_bit_ull
+stdc_has_single_bit_uc |check|
+stdc_has_single_bit_us |check|
+stdc_has_single_bit_ui |check|
+stdc_has_single_bit_ul |check|
+stdc_has_single_bit_ull |check|
stdc_bit_width_uc
stdc_bit_width_us
stdc_bit_width_ui
@@ -124,7 +124,7 @@ stdc_first_trailing_zero |check|
stdc_first_trailing_one |check|
stdc_count_zeros |check|
stdc_count_ones |check|
-stdc_has_single_bit
+stdc_has_single_bit |check|
stdc_bit_width
stdc_bit_floor
stdc_bit_ceil
diff --git a/libc/include/__llvm-libc-common.h b/libc/include/__llvm-libc-common.h
index 6b883ee21a8c..3af0b08e9e86 100644
--- a/libc/include/__llvm-libc-common.h
+++ b/libc/include/__llvm-libc-common.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC___COMMON_H
-#define LLVM_LIBC___COMMON_H
+#ifndef LLVM_LIBC_COMMON_H
+#define LLVM_LIBC_COMMON_H
#ifdef __cplusplus
@@ -51,4 +51,4 @@
#endif // __cplusplus
-#endif // LLVM_LIBC___COMMON_H
+#endif // LLVM_LIBC_COMMON_H
diff --git a/libc/include/llvm-libc-macros/containerof-macro.h b/libc/include/llvm-libc-macros/containerof-macro.h
index ea91fa7097a4..62724abd3b0f 100644
--- a/libc/include/llvm-libc-macros/containerof-macro.h
+++ b/libc/include/llvm-libc-macros/containerof-macro.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
-#define __LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
+#ifndef LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
+#define LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
#include <llvm-libc-macros/offsetof-macro.h>
@@ -17,4 +17,4 @@
(type *)(void *)((const char *)__ptr - offsetof(type, member)); \
})
-#endif // __LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
+#endif // LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
diff --git a/libc/include/llvm-libc-macros/fcntl-macros.h b/libc/include/llvm-libc-macros/fcntl-macros.h
index 448dcc0a8135..4bd03a7e3e2b 100644
--- a/libc/include/llvm-libc-macros/fcntl-macros.h
+++ b/libc/include/llvm-libc-macros/fcntl-macros.h
@@ -1,8 +1,8 @@
-#ifndef __LLVM_LIBC_MACROS_FCNTL_MACROS_H
-#define __LLVM_LIBC_MACROS_FCNTL_MACROS_H
+#ifndef LLVM_LIBC_MACROS_FCNTL_MACROS_H
+#define LLVM_LIBC_MACROS_FCNTL_MACROS_H
#ifdef __linux__
#include "linux/fcntl-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_FCNTL_MACROS_H
+#endif // LLVM_LIBC_MACROS_FCNTL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/features-macros.h b/libc/include/llvm-libc-macros/features-macros.h
index 2938b3ccb95b..5bc87a68fc0b 100644
--- a/libc/include/llvm-libc-macros/features-macros.h
+++ b/libc/include/llvm-libc-macros/features-macros.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_FEATURES_MACROS_H
-#define __LLVM_LIBC_MACROS_FEATURES_MACROS_H
+#ifndef LLVM_LIBC_MACROS_FEATURES_MACROS_H
+#define LLVM_LIBC_MACROS_FEATURES_MACROS_H
#define __LLVM_LIBC__ 1
-#endif // __LLVM_LIBC_MACROS_FEATURES_MACROS_H
+#endif // LLVM_LIBC_MACROS_FEATURES_MACROS_H
diff --git a/libc/include/llvm-libc-macros/fenv-macros.h b/libc/include/llvm-libc-macros/fenv-macros.h
index cc0ea344b170..72ac660cd98c 100644
--- a/libc/include/llvm-libc-macros/fenv-macros.h
+++ b/libc/include/llvm-libc-macros/fenv-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_FENV_MACROS_H
-#define __LLVM_LIBC_MACROS_FENV_MACROS_H
+#ifndef LLVM_LIBC_MACROS_FENV_MACROS_H
+#define LLVM_LIBC_MACROS_FENV_MACROS_H
#define FE_DIVBYZERO 1
#define FE_INEXACT 2
@@ -24,4 +24,4 @@
#define FE_DFL_ENV ((fenv_t *)-1)
-#endif // __LLVM_LIBC_MACROS_FENV_MACROS_H
+#endif // LLVM_LIBC_MACROS_FENV_MACROS_H
diff --git a/libc/include/llvm-libc-macros/file-seek-macros.h b/libc/include/llvm-libc-macros/file-seek-macros.h
index 04f397982f46..676cb7511407 100644
--- a/libc/include/llvm-libc-macros/file-seek-macros.h
+++ b/libc/include/llvm-libc-macros/file-seek-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
-#define __LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
+#ifndef LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
+#define LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
#define SEEK_SET 0
#define SEEK_CUR 1
#define SEEK_END 2
-#endif // __LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
+#endif // LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
diff --git a/libc/include/llvm-libc-macros/float-macros.h b/libc/include/llvm-libc-macros/float-macros.h
index 86ec49393930..4fe8590c5f70 100644
--- a/libc/include/llvm-libc-macros/float-macros.h
+++ b/libc/include/llvm-libc-macros/float-macros.h
@@ -6,13 +6,14 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_FLOAT_MACROS_H
-#define __LLVM_LIBC_MACROS_FLOAT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_FLOAT_MACROS_H
+#define LLVM_LIBC_MACROS_FLOAT_MACROS_H
// Suppress `#include_next is a language extension` warnings.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wgnu-include-next"
+#pragma clang diagnostic ignored "-Winclude-next-absolute-path"
#else // gcc
#pragma GCC system_header
#endif //__clang__
@@ -169,4 +170,4 @@
// TODO: Add FLT16 and FLT128 constants.
-#endif // __LLVM_LIBC_MACROS_FLOAT_MACROS_H
+#endif // LLVM_LIBC_MACROS_FLOAT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/generic-error-number-macros.h b/libc/include/llvm-libc-macros/generic-error-number-macros.h
index 3805c95cbb2a..7ee0352669b8 100644
--- a/libc/include/llvm-libc-macros/generic-error-number-macros.h
+++ b/libc/include/llvm-libc-macros/generic-error-number-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
-#define __LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
+#ifndef LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
+#define LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
#define EPERM 1
#define ENOENT 2
@@ -45,4 +45,4 @@
#define ERANGE 34
#define EILSEQ 35
-#endif // __LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
+#endif // LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
diff --git a/libc/include/llvm-libc-macros/gpu/time-macros.h b/libc/include/llvm-libc-macros/gpu/time-macros.h
index baf2ea5f4132..c3dc812f90a3 100644
--- a/libc/include/llvm-libc-macros/gpu/time-macros.h
+++ b/libc/include/llvm-libc-macros/gpu/time-macros.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
-#define __LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
+#ifndef LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
+#define LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
#define CLOCKS_PER_SEC 1000000
-#endif // __LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
+#endif // LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
diff --git a/libc/include/llvm-libc-macros/inttypes-macros.h b/libc/include/llvm-libc-macros/inttypes-macros.h
index fc3e2517f194..8e7d4f558a63 100644
--- a/libc/include/llvm-libc-macros/inttypes-macros.h
+++ b/libc/include/llvm-libc-macros/inttypes-macros.h
@@ -5,8 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_INTTYPES_MACROS_H
-#define __LLVM_LIBC_MACROS_INTTYPES_MACROS_H
+#ifndef LLVM_LIBC_MACROS_INTTYPES_MACROS_H
+#define LLVM_LIBC_MACROS_INTTYPES_MACROS_H
// fprintf/scanf format macros.
// POSIX.1-2008, Technical Corrigendum 1, XBD/TC1-2008/0050 [211] is applied.
@@ -286,4 +286,4 @@
#define SCNxMAX __UINTMAX_FMTx__
#define SCNxPTR __UINTPTR_FMTx__
-#endif // __LLVM_LIBC_MACROS_INTTYPES_MACROS_H
+#endif // LLVM_LIBC_MACROS_INTTYPES_MACROS_H
diff --git a/libc/include/llvm-libc-macros/limits-macros.h b/libc/include/llvm-libc-macros/limits-macros.h
index 3b4df58ae4a1..95f0f5f0baa5 100644
--- a/libc/include/llvm-libc-macros/limits-macros.h
+++ b/libc/include/llvm-libc-macros/limits-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LIMITS_MACROS_H
-#define __LLVM_LIBC_MACROS_LIMITS_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LIMITS_MACROS_H
+#define LLVM_LIBC_MACROS_LIMITS_MACROS_H
// Define all C23 macro constants of limits.h
@@ -225,4 +225,4 @@
#define ULLONG_MIN 0ULL
#endif // ULLONG_MIN
-#endif // __LLVM_LIBC_MACROS_LIMITS_MACROS_H
+#endif // LLVM_LIBC_MACROS_LIMITS_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/fcntl-macros.h b/libc/include/llvm-libc-macros/linux/fcntl-macros.h
index 495c5ec780ed..1d4e5bbbdc77 100644
--- a/libc/include/llvm-libc-macros/linux/fcntl-macros.h
+++ b/libc/include/llvm-libc-macros/linux/fcntl-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
// File creation flags
#define O_CLOEXEC 02000000
@@ -68,4 +68,4 @@
#define F_GETFL 3
#define F_SETFL 4
-#endif // __LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sched-macros.h b/libc/include/llvm-libc-macros/linux/sched-macros.h
index 0c574440ccbc..ace620049ca0 100644
--- a/libc/include/llvm-libc-macros/linux/sched-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sched-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
// Definitions of SCHED_* macros must match was linux as at:
// https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/sched.h
@@ -26,4 +26,4 @@
#define CPU_COUNT_S(setsize, set) __sched_getcpucount(setsize, set)
#define CPU_COUNT(set) CPU_COUNT_S(sizeof(cpu_set_t), set)
-#endif // __LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/signal-macros.h b/libc/include/llvm-libc-macros/linux/signal-macros.h
index deb190ec3759..e379fc41efd0 100644
--- a/libc/include/llvm-libc-macros/linux/signal-macros.h
+++ b/libc/include/llvm-libc-macros/linux/signal-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SIGNUM_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SIGNUM_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SIGNAL_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SIGNAL_MACROS_H
#define SIGHUP 1
#define SIGINT 2
@@ -101,4 +101,4 @@
#define CLD_STOPPED 5 // child has stopped
#define CLD_CONTINUED 6 // stopped child has continued
-#endif // __LLVM_LIBC_MACROS_LINUX_SIGNUM_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SIGNAL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-ioctl-macros.h b/libc/include/llvm-libc-macros/linux/sys-ioctl-macros.h
index 8f13a0ef4ad3..5eb779aeeca5 100644
--- a/libc/include/llvm-libc-macros/linux/sys-ioctl-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-ioctl-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
// TODO (michaelrj): Finish defining these macros.
// Just defining this macro for the moment since it's all that we need right
@@ -16,4 +16,4 @@
// think is worth digging into right now.
#define TIOCGETD 0x5424
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-random-macros.h b/libc/include/llvm-libc-macros/linux/sys-random-macros.h
index 1337f8b606fc..9261e87bdbf6 100644
--- a/libc/include/llvm-libc-macros/linux/sys-random-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-random-macros.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
// Getrandom flags
#define GRND_RANDOM 0x0001
#define GRND_NONBLOCK 0x0002
#define GRND_INSECURE 0x0004
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-resource-macros.h b/libc/include/llvm-libc-macros/linux/sys-resource-macros.h
index dd265530ada2..c9d93c30c35a 100644
--- a/libc/include/llvm-libc-macros/linux/sys-resource-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-resource-macros.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_RESOURCE_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_RESOURCE_MACROS_H
+
#define RLIMIT_CPU 0
#define RLIMIT_FSIZE 1
#define RLIMIT_DATA 2
@@ -24,3 +27,5 @@
#define RLIMIT_RTTIME 15
#define RLIM_INFINITY (~0UL)
+
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_RESOURCE_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-socket-macros.h b/libc/include/llvm-libc-macros/linux/sys-socket-macros.h
index 7de410225b71..f335200a103b 100644
--- a/libc/include/llvm-libc-macros/linux/sys-socket-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-socket-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
// IEEE Std 1003.1-2017 - basedefs/sys_socket.h.html
// Macro values come from the Linux syscall interface.
@@ -25,4 +25,4 @@
#define SOCK_SEQPACKET 5
#define SOCK_PACKET 10
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-stat-macros.h b/libc/include/llvm-libc-macros/linux/sys-stat-macros.h
index 48606cfa08ce..3013121d0f3c 100644
--- a/libc/include/llvm-libc-macros/linux/sys-stat-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-stat-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
// Definitions from linux/stat.h
#define S_IFMT 0170000
@@ -45,4 +45,4 @@
#define S_IWOTH 00002
#define S_IXOTH 00001
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-time-macros.h b/libc/include/llvm-libc-macros/linux/sys-time-macros.h
index 06ae43f0e005..e97819594adc 100644
--- a/libc/include/llvm-libc-macros/linux/sys-time-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-time-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
// Add two timevals and put the result in timeval_ptr_result. If the resulting
// usec value is greater than 999,999 then the microseconds are turned into full
@@ -50,4 +50,4 @@
? ((timeval_ptr_a)->tv_usec CMP(timeval_ptr_b)->tv_usec) \
: ((timeval_ptr_a)->tv_sec CMP(timeval_ptr_b)->tv_sec))
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-wait-macros.h b/libc/include/llvm-libc-macros/linux/sys-wait-macros.h
index 3e6c6f53cc71..c101638fdae3 100644
--- a/libc/include/llvm-libc-macros/linux/sys-wait-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-wait-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
// Wait flags
#define WNOHANG 1 // Do not block
@@ -41,4 +41,4 @@
#define P_PGID 2
#define P_PIDFD 3
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/termios-macros.h b/libc/include/llvm-libc-macros/linux/termios-macros.h
index 17e380ebecff..668cfe27abaa 100644
--- a/libc/include/llvm-libc-macros/linux/termios-macros.h
+++ b/libc/include/llvm-libc-macros/linux/termios-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
// Below are generic definitions of symbolic bit-masks, modes etc. They serve
// most architectures including x86_64, aarch64 but have to be adjusted for few
@@ -164,4 +164,4 @@
#define TCIOFF 2 // Suspend output
#define TCION 3 // Restart output
-#endif // __LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/time-macros.h b/libc/include/llvm-libc-macros/linux/time-macros.h
index ace27cb2e9eb..407a1eb30eea 100644
--- a/libc/include/llvm-libc-macros/linux/time-macros.h
+++ b/libc/include/llvm-libc-macros/linux/time-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
// clock type macros
#define CLOCK_REALTIME 0
@@ -23,4 +23,4 @@
#define CLOCKS_PER_SEC 1000000
-#endif //__LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/unistd-macros.h b/libc/include/llvm-libc-macros/linux/unistd-macros.h
index cfdfb9a93ee9..c5109df435e6 100644
--- a/libc/include/llvm-libc-macros/linux/unistd-macros.h
+++ b/libc/include/llvm-libc-macros/linux/unistd-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
// Values for mode argument to the access(...) function.
#define F_OK 0
@@ -27,4 +27,4 @@
(long)(arg4), (long)(arg5), (long)(arg6))
#define syscall(...) __syscall_helper(__VA_ARGS__, 0, 1, 2, 3, 4, 5, 6)
-#endif // __LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
diff --git a/libc/include/llvm-libc-macros/math-macros.h b/libc/include/llvm-libc-macros/math-macros.h
index 9f8edd954b7e..e67fe4d11b44 100644
--- a/libc/include/llvm-libc-macros/math-macros.h
+++ b/libc/include/llvm-libc-macros/math-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_MATH_MACROS_H
-#define __LLVM_LIBC_MACROS_MATH_MACROS_H
+#ifndef LLVM_LIBC_MACROS_MATH_MACROS_H
+#define LLVM_LIBC_MACROS_MATH_MACROS_H
#include "limits-macros.h"
@@ -38,4 +38,4 @@
#define math_errhandling (MATH_ERRNO | MATH_ERREXCEPT)
#endif
-#endif // __LLVM_LIBC_MACROS_MATH_MACROS_H
+#endif // LLVM_LIBC_MACROS_MATH_MACROS_H
diff --git a/libc/include/llvm-libc-macros/null-macro.h b/libc/include/llvm-libc-macros/null-macro.h
index b83fc05c614d..416d4e865fc5 100644
--- a/libc/include/llvm-libc-macros/null-macro.h
+++ b/libc/include/llvm-libc-macros/null-macro.h
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_NULL_MACRO_H
-#define __LLVM_LIBC_MACROS_NULL_MACRO_H
+#ifndef LLVM_LIBC_MACROS_NULL_MACRO_H
+#define LLVM_LIBC_MACROS_NULL_MACRO_H
#define __need_NULL
#include <stddef.h>
-#endif // __LLVM_LIBC_MACROS_NULL_MACRO_H
+#endif // LLVM_LIBC_MACROS_NULL_MACRO_H
diff --git a/libc/include/llvm-libc-macros/offsetof-macro.h b/libc/include/llvm-libc-macros/offsetof-macro.h
index eeceb3db110b..208c06b29cb6 100644
--- a/libc/include/llvm-libc-macros/offsetof-macro.h
+++ b/libc/include/llvm-libc-macros/offsetof-macro.h
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
-#define __LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
+#ifndef LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
+#define LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
#define __need_offsetof
#include <stddef.h>
-#endif // __LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
+#endif // LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
diff --git a/libc/include/llvm-libc-macros/sched-macros.h b/libc/include/llvm-libc-macros/sched-macros.h
index 760edd9feb72..0f643029816c 100644
--- a/libc/include/llvm-libc-macros/sched-macros.h
+++ b/libc/include/llvm-libc-macros/sched-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SCHED_MACROS_H
-#define __LLVM_LIBC_MACROS_SCHED_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SCHED_MACROS_H
+#define LLVM_LIBC_MACROS_SCHED_MACROS_H
#ifdef __linux__
#include "linux/sched-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SCHED_MACROS_H
+#endif // LLVM_LIBC_MACROS_SCHED_MACROS_H
diff --git a/libc/include/llvm-libc-macros/signal-macros.h b/libc/include/llvm-libc-macros/signal-macros.h
index 525032b3c5b1..7ab605baa54c 100644
--- a/libc/include/llvm-libc-macros/signal-macros.h
+++ b/libc/include/llvm-libc-macros/signal-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SIGNUM_MACROS_H
-#define __LLVM_LIBC_MACROS_SIGNUM_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SIGNAL_MACROS_H
+#define LLVM_LIBC_MACROS_SIGNAL_MACROS_H
#ifdef __linux__
#include "linux/signal-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SIGNUM_MACROS_H
+#endif // LLVM_LIBC_MACROS_SIGNAL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/stdbit-macros.h b/libc/include/llvm-libc-macros/stdbit-macros.h
index 5ee152e105f7..e3a36d10ed92 100644
--- a/libc/include/llvm-libc-macros/stdbit-macros.h
+++ b/libc/include/llvm-libc-macros/stdbit-macros.h
@@ -157,6 +157,21 @@ inline unsigned stdc_count_ones(unsigned long x) {
inline unsigned stdc_count_ones(unsigned long long x) {
return stdc_count_ones_ull(x);
}
+inline bool stdc_has_single_bit(unsigned char x) {
+ return stdc_has_single_bit_uc(x);
+}
+inline bool stdc_has_single_bit(unsigned short x) {
+ return stdc_has_single_bit_us(x);
+}
+inline bool stdc_has_single_bit(unsigned x) {
+ return stdc_has_single_bit_ui(x);
+}
+inline bool stdc_has_single_bit(unsigned long x) {
+ return stdc_has_single_bit_ul(x);
+}
+inline bool stdc_has_single_bit(unsigned long long x) {
+ return stdc_has_single_bit_ull(x);
+}
#else
#define stdc_leading_zeros(x) \
_Generic((x), \
@@ -228,6 +243,13 @@ inline unsigned stdc_count_ones(unsigned long long x) {
unsigned: stdc_count_ones_ui, \
unsigned long: stdc_count_ones_ul, \
unsigned long long: stdc_count_ones_ull)(x)
+#define stdc_has_single_bit(x) \
+ _Generic((x), \
+ unsigned char: stdc_has_single_bit_uc, \
+ unsigned short: stdc_has_single_bit_us, \
+ unsigned: stdc_has_single_bit_ui, \
+ unsigned long: stdc_has_single_bit_ul, \
+ unsigned long long: stdc_has_single_bit_ull)(x)
#endif // __cplusplus
#endif // __LLVM_LIBC_MACROS_STDBIT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/stdckdint-macros.h b/libc/include/llvm-libc-macros/stdckdint-macros.h
index 03b73aeeb671..694412290bbc 100644
--- a/libc/include/llvm-libc-macros/stdckdint-macros.h
+++ b/libc/include/llvm-libc-macros/stdckdint-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
-#define __LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
+#define LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
// We need to use __builtin_*_overflow from GCC/Clang to implement the overflow
// macros. Check __GNUC__ for availability of such builtins.
@@ -22,4 +22,4 @@
#define ckd_mul(R, A, B) __builtin_mul_overflow((A), (B), (R))
#endif // __STDC_VERSION_STDCKDINT_H__
#endif // __GNUC__
-#endif // __LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
+#endif // LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/stdfix-macros.h b/libc/include/llvm-libc-macros/stdfix-macros.h
index 11c18f83b8c5..554ebe544a42 100644
--- a/libc/include/llvm-libc-macros/stdfix-macros.h
+++ b/libc/include/llvm-libc-macros/stdfix-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_STDFIX_MACROS_H
-#define __LLVM_LIBC_MACROS_STDFIX_MACROS_H
+#ifndef LLVM_LIBC_MACROS_STDFIX_MACROS_H
+#define LLVM_LIBC_MACROS_STDFIX_MACROS_H
#ifdef __FRACT_FBIT__
// _Fract and _Accum types are available
@@ -325,4 +325,4 @@
#endif // LIBC_COMPILER_HAS_FIXED_POINT
-#endif // __LLVM_LIBC_MACROS_STDFIX_MACROS_H
+#endif // LLVM_LIBC_MACROS_STDFIX_MACROS_H
diff --git a/libc/include/llvm-libc-macros/stdio-macros.h b/libc/include/llvm-libc-macros/stdio-macros.h
index b2c62ec7cff2..db747c5d5d67 100644
--- a/libc/include/llvm-libc-macros/stdio-macros.h
+++ b/libc/include/llvm-libc-macros/stdio-macros.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_STDIO_MACROS_H
-#define __LLVM_LIBC_MACROS_STDIO_MACROS_H
+#ifndef LLVM_LIBC_MACROS_STDIO_MACROS_H
+#define LLVM_LIBC_MACROS_STDIO_MACROS_H
#define BUFSIZ 1024
-#endif // __LLVM_LIBC_MACROS_STDIO_MACROS_H
+#endif // LLVM_LIBC_MACROS_STDIO_MACROS_H
diff --git a/libc/include/llvm-libc-macros/stdlib-macros.h b/libc/include/llvm-libc-macros/stdlib-macros.h
index a7625aa187c9..5fcbfef97b32 100644
--- a/libc/include/llvm-libc-macros/stdlib-macros.h
+++ b/libc/include/llvm-libc-macros/stdlib-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_STDLIB_MACROS_H
-#define __LLVM_LIBC_MACROS_STDLIB_MACROS_H
+#ifndef LLVM_LIBC_MACROS_STDLIB_MACROS_H
+#define LLVM_LIBC_MACROS_STDLIB_MACROS_H
#ifndef NULL
#define __need_NULL
@@ -19,4 +19,4 @@
#define RAND_MAX 2147483647
-#endif // __LLVM_LIBC_MACROS_STDLIB_MACROS_H
+#endif // LLVM_LIBC_MACROS_STDLIB_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-auxv-macros.h b/libc/include/llvm-libc-macros/sys-auxv-macros.h
index a57c6018ea0a..2dcaa2f1a8ee 100644
--- a/libc/include/llvm-libc-macros/sys-auxv-macros.h
+++ b/libc/include/llvm-libc-macros/sys-auxv-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_AUXV_MACROS_H
-#define __LLVM_LIBC_MACROS_AUXV_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_AUXV_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_AUXV_MACROS_H
// Macros defining the aux vector indexes.
#define AT_NULL 0
@@ -40,4 +40,4 @@
#define AT_MINSIGSTKSZ 51
#endif
-#endif // __LLVM_LIBC_MACROS_AUXV_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_AUXV_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-ioctl-macros.h b/libc/include/llvm-libc-macros/sys-ioctl-macros.h
index c273fab36e3f..4a5f9651231f 100644
--- a/libc/include/llvm-libc-macros/sys-ioctl-macros.h
+++ b/libc/include/llvm-libc-macros/sys-ioctl-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
#ifdef __linux__
#include "linux/sys-ioctl-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-mman-macros.h b/libc/include/llvm-libc-macros/sys-mman-macros.h
index 4ffc112f4e4d..a6dc6d96b5b7 100644
--- a/libc/include/llvm-libc-macros/sys-mman-macros.h
+++ b/libc/include/llvm-libc-macros/sys-mman-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
// Use definitions from <linux/mman.h> to dispatch arch-specific flag values.
// For example, MCL_CURRENT/MCL_FUTURE/MCL_ONFAULT are different on different
@@ -45,4 +45,4 @@
#define POSIX_MADV_DONTNEED MADV_DONTNEED
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-queue-macros.h b/libc/include/llvm-libc-macros/sys-queue-macros.h
index 7da643cb7253..fcac265333fc 100644
--- a/libc/include/llvm-libc-macros/sys-queue-macros.h
+++ b/libc/include/llvm-libc-macros/sys-queue-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
#include <llvm-libc-macros/containerof-macro.h>
#include <llvm-libc-macros/null-macro.h>
@@ -259,4 +259,4 @@
(head2)->stqh_last = &STAILQ_FIRST(head2); \
} while (0)
-#endif // __LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-random-macros.h b/libc/include/llvm-libc-macros/sys-random-macros.h
index e87128d0d0fc..9b1a8edb4f79 100644
--- a/libc/include/llvm-libc-macros/sys-random-macros.h
+++ b/libc/include/llvm-libc-macros/sys-random-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
#ifdef __linux__
#include "linux/sys-random-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-resource-macros.h b/libc/include/llvm-libc-macros/sys-resource-macros.h
index 272723a955a7..1ce01cdd1e83 100644
--- a/libc/include/llvm-libc-macros/sys-resource-macros.h
+++ b/libc/include/llvm-libc-macros/sys-resource-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
#ifdef __linux__
#include "linux/sys-resource-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-select-macros.h b/libc/include/llvm-libc-macros/sys-select-macros.h
index 5d6592c1c281..d54e5300d12e 100644
--- a/libc/include/llvm-libc-macros/sys-select-macros.h
+++ b/libc/include/llvm-libc-macros/sys-select-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
#define FD_SETSIZE 1024
#define __FD_SET_WORD_TYPE unsigned long
@@ -32,4 +32,4 @@
#define FD_ISSET(fd, set) \
(int)(((set)->__set[__FD_WORD(fd)] & __FD_MASK(fd)) != 0)
-#endif // __LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-socket-macros.h b/libc/include/llvm-libc-macros/sys-socket-macros.h
index 203236099063..6b1d28070b43 100644
--- a/libc/include/llvm-libc-macros/sys-socket-macros.h
+++ b/libc/include/llvm-libc-macros/sys-socket-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
#ifdef __linux__
#include "linux/sys-socket-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-stat-macros.h b/libc/include/llvm-libc-macros/sys-stat-macros.h
index 64f63c33b3e5..c47c9612705e 100644
--- a/libc/include/llvm-libc-macros/sys-stat-macros.h
+++ b/libc/include/llvm-libc-macros/sys-stat-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
#ifdef __linux__
#include "linux/sys-stat-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-time-macros.h b/libc/include/llvm-libc-macros/sys-time-macros.h
index 8e4631703308..36d7d5adafc9 100644
--- a/libc/include/llvm-libc-macros/sys-time-macros.h
+++ b/libc/include/llvm-libc-macros/sys-time-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
#ifdef __linux__
#include "linux/sys-time-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-wait-macros.h b/libc/include/llvm-libc-macros/sys-wait-macros.h
index ea58fccecaff..c418a7930b69 100644
--- a/libc/include/llvm-libc-macros/sys-wait-macros.h
+++ b/libc/include/llvm-libc-macros/sys-wait-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
#ifdef __linux__
#include "linux/sys-wait-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/termios-macros.h b/libc/include/llvm-libc-macros/termios-macros.h
index c99982837a57..1067e8a57474 100644
--- a/libc/include/llvm-libc-macros/termios-macros.h
+++ b/libc/include/llvm-libc-macros/termios-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_TERMIOS_MACROS_H
-#define __LLVM_LIBC_MACROS_TERMIOS_MACROS_H
+#ifndef LLVM_LIBC_MACROS_TERMIOS_MACROS_H
+#define LLVM_LIBC_MACROS_TERMIOS_MACROS_H
#ifdef __linux__
#include "linux/termios-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_TERMIOS_MACROS_H
+#endif // LLVM_LIBC_MACROS_TERMIOS_MACROS_H
diff --git a/libc/include/llvm-libc-macros/time-macros.h b/libc/include/llvm-libc-macros/time-macros.h
index c3bd7aa24f56..6d49ed484d5d 100644
--- a/libc/include/llvm-libc-macros/time-macros.h
+++ b/libc/include/llvm-libc-macros/time-macros.h
@@ -1,5 +1,5 @@
-#ifndef __LLVM_LIBC_MACROS_TIME_MACROS_H
-#define __LLVM_LIBC_MACROS_TIME_MACROS_H
+#ifndef LLVM_LIBC_MACROS_TIME_MACROS_H
+#define LLVM_LIBC_MACROS_TIME_MACROS_H
#if defined(__AMDGPU__) || defined(__NVPTX__)
#include "gpu/time-macros.h"
@@ -7,4 +7,4 @@
#include "linux/time-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_TIME_MACROS_H
+#endif // LLVM_LIBC_MACROS_TIME_MACROS_H
diff --git a/libc/include/llvm-libc-macros/unistd-macros.h b/libc/include/llvm-libc-macros/unistd-macros.h
index dbcac0f5e72d..4f27f075fcc6 100644
--- a/libc/include/llvm-libc-macros/unistd-macros.h
+++ b/libc/include/llvm-libc-macros/unistd-macros.h
@@ -1,8 +1,8 @@
-#ifndef __LLVM_LIBC_MACROS_UNISTD_MACROS_H
-#define __LLVM_LIBC_MACROS_UNISTD_MACROS_H
+#ifndef LLVM_LIBC_MACROS_UNISTD_MACROS_H
+#define LLVM_LIBC_MACROS_UNISTD_MACROS_H
#ifdef __linux__
#include "linux/unistd-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_UNISTD_MACROS_H
+#endif // LLVM_LIBC_MACROS_UNISTD_MACROS_H
diff --git a/libc/include/llvm-libc-macros/wchar-macros.h b/libc/include/llvm-libc-macros/wchar-macros.h
index adca41eb0122..5b211f5276b6 100644
--- a/libc/include/llvm-libc-macros/wchar-macros.h
+++ b/libc/include/llvm-libc-macros/wchar-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_WCHAR_MACROS_H
-#define __LLVM_LIBC_MACROS_WCHAR_MACROS_H
+#ifndef LLVM_LIBC_MACROS_WCHAR_MACROS_H
+#define LLVM_LIBC_MACROS_WCHAR_MACROS_H
#ifndef WEOF
#define WEOF 0xffffffffu
#endif
-#endif // __LLVM_LIBC_MACROS_WCHAR_MACROS_H
+#endif // LLVM_LIBC_MACROS_WCHAR_MACROS_H
diff --git a/libc/include/llvm-libc-types/ACTION.h b/libc/include/llvm-libc-types/ACTION.h
index 7181a59b177d..1ddce208df1c 100644
--- a/libc/include/llvm-libc-types/ACTION.h
+++ b/libc/include/llvm-libc-types/ACTION.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_ACTION_H__
-#define __LLVM_LIBC_TYPES_ACTION_H__
+#ifndef LLVM_LIBC_TYPES_ACTION_H
+#define LLVM_LIBC_TYPES_ACTION_H
typedef enum { FIND, ENTER } ACTION;
-#endif // __LLVM_LIBC_TYPES_ACTION_H__
+#endif // LLVM_LIBC_TYPES_ACTION_H
diff --git a/libc/include/llvm-libc-types/DIR.h b/libc/include/llvm-libc-types/DIR.h
index 0a2cf27d2485..855446db6f53 100644
--- a/libc/include/llvm-libc-types/DIR.h
+++ b/libc/include/llvm-libc-types/DIR.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_DIR_H__
-#define __LLVM_LIBC_TYPES_DIR_H__
+#ifndef LLVM_LIBC_TYPES_DIR_H
+#define LLVM_LIBC_TYPES_DIR_H
typedef struct DIR DIR;
-#endif // __LLVM_LIBC_TYPES_DIR_H__
+#endif // LLVM_LIBC_TYPES_DIR_H
diff --git a/libc/include/llvm-libc-types/ENTRY.h b/libc/include/llvm-libc-types/ENTRY.h
index 0ccb5938207a..ccbd777e2475 100644
--- a/libc/include/llvm-libc-types/ENTRY.h
+++ b/libc/include/llvm-libc-types/ENTRY.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_ENTRY_H__
-#define __LLVM_LIBC_TYPES_ENTRY_H__
+#ifndef LLVM_LIBC_TYPES_ENTRY_H
+#define LLVM_LIBC_TYPES_ENTRY_H
typedef struct {
char *key;
void *data;
} ENTRY;
-#endif // __LLVM_LIBC_TYPES_ENTRY_H__
+#endif // LLVM_LIBC_TYPES_ENTRY_H
diff --git a/libc/include/llvm-libc-types/FILE.h b/libc/include/llvm-libc-types/FILE.h
index 1c1ff97ec86a..f1d2e4f726c7 100644
--- a/libc/include/llvm-libc-types/FILE.h
+++ b/libc/include/llvm-libc-types/FILE.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FILE_H__
-#define __LLVM_LIBC_TYPES_FILE_H__
+#ifndef LLVM_LIBC_TYPES_FILE_H
+#define LLVM_LIBC_TYPES_FILE_H
typedef struct FILE FILE;
-#endif // __LLVM_LIBC_TYPES_FILE_H__
+#endif // LLVM_LIBC_TYPES_FILE_H
diff --git a/libc/include/llvm-libc-types/__atexithandler_t.h b/libc/include/llvm-libc-types/__atexithandler_t.h
index a9887b6abf70..01aed676c2a7 100644
--- a/libc/include/llvm-libc-types/__atexithandler_t.h
+++ b/libc/include/llvm-libc-types/__atexithandler_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_ATEXITHANDLER_T_H__
-#define __LLVM_LIBC_TYPES_ATEXITHANDLER_T_H__
+#ifndef LLVM_LIBC_TYPES___ATEXITHANDLER_T_H
+#define LLVM_LIBC_TYPES___ATEXITHANDLER_T_H
typedef void (*__atexithandler_t)(void);
-#endif // __LLVM_LIBC_TYPES_ATEXITHANDLER_T_H__
+#endif // LLVM_LIBC_TYPES___ATEXITHANDLER_T_H
diff --git a/libc/include/llvm-libc-types/__atfork_callback_t.h b/libc/include/llvm-libc-types/__atfork_callback_t.h
index 3da66c23feb0..ae2d0ca39d53 100644
--- a/libc/include/llvm-libc-types/__atfork_callback_t.h
+++ b/libc/include/llvm-libc-types/__atfork_callback_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_ATFORK_CALLBACK_T_H__
-#define __LLVM_LIBC_TYPES_ATFORK_CALLBACK_T_H__
+#ifndef LLVM_LIBC_TYPES___ATFORK_CALLBACK_T_H
+#define LLVM_LIBC_TYPES___ATFORK_CALLBACK_T_H
typedef void (*__atfork_callback_t)(void);
-#endif // __LLVM_LIBC_TYPES_ATFORK_CALLBACK_T_H__
+#endif // LLVM_LIBC_TYPES___ATFORK_CALLBACK_T_H
diff --git a/libc/include/llvm-libc-types/__bsearchcompare_t.h b/libc/include/llvm-libc-types/__bsearchcompare_t.h
index 40ebc7f35668..0b1987be1fdd 100644
--- a/libc/include/llvm-libc-types/__bsearchcompare_t.h
+++ b/libc/include/llvm-libc-types/__bsearchcompare_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_BSEARCHCOMPARE_T_H__
-#define __LLVM_LIBC_TYPES_BSEARCHCOMPARE_T_H__
+#ifndef LLVM_LIBC_TYPES___BSEARCHCOMPARE_T_H
+#define LLVM_LIBC_TYPES___BSEARCHCOMPARE_T_H
typedef int (*__bsearchcompare_t)(const void *, const void *);
-#endif // __LLVM_LIBC_TYPES_BSEARCHCOMPARE_T_H__
+#endif // LLVM_LIBC_TYPES___BSEARCHCOMPARE_T_H
diff --git a/libc/include/llvm-libc-types/__call_once_func_t.h b/libc/include/llvm-libc-types/__call_once_func_t.h
index bc8ed8331bd8..6d278da4f1d3 100644
--- a/libc/include/llvm-libc-types/__call_once_func_t.h
+++ b/libc/include/llvm-libc-types/__call_once_func_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CALL_ONCE_FUNC_T_H__
-#define __LLVM_LIBC_TYPES_CALL_ONCE_FUNC_T_H__
+#ifndef LLVM_LIBC_TYPES___CALL_ONCE_FUNC_T_H
+#define LLVM_LIBC_TYPES___CALL_ONCE_FUNC_T_H
typedef void (*__call_once_func_t)(void);
-#endif // __LLVM_LIBC_TYPES_CALL_ONCE_FUNC_T_H__
+#endif // LLVM_LIBC_TYPES___CALL_ONCE_FUNC_T_H
diff --git a/libc/include/llvm-libc-types/__exec_argv_t.h b/libc/include/llvm-libc-types/__exec_argv_t.h
index 35b687d9685d..4eff583768af 100644
--- a/libc/include/llvm-libc-types/__exec_argv_t.h
+++ b/libc/include/llvm-libc-types/__exec_argv_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_EXEC_ARGV_T_H__
-#define __LLVM_LIBC_TYPES_EXEC_ARGV_T_H__
+#ifndef LLVM_LIBC_TYPES___EXEC_ARGV_T_H
+#define LLVM_LIBC_TYPES___EXEC_ARGV_T_H
typedef char *const __exec_argv_t[];
-#endif // __LLVM_LIBC_TYPES_EXEC_ARGV_T_H__
+#endif // LLVM_LIBC_TYPES___EXEC_ARGV_T_H
diff --git a/libc/include/llvm-libc-types/__exec_envp_t.h b/libc/include/llvm-libc-types/__exec_envp_t.h
index 06eb2ddcb1fb..89e02754c4e4 100644
--- a/libc/include/llvm-libc-types/__exec_envp_t.h
+++ b/libc/include/llvm-libc-types/__exec_envp_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_EXEC_ENVP_T_H__
-#define __LLVM_LIBC_TYPES_EXEC_ENVP_T_H__
+#ifndef LLVM_LIBC_TYPES___EXEC_ENVP_T_H
+#define LLVM_LIBC_TYPES___EXEC_ENVP_T_H
typedef char *const __exec_envp_t[];
-#endif // __LLVM_LIBC_TYPES_EXEC_ENVP_T_H__
+#endif // LLVM_LIBC_TYPES___EXEC_ENVP_T_H
diff --git a/libc/include/llvm-libc-types/__futex_word.h b/libc/include/llvm-libc-types/__futex_word.h
index 85130ab4976b..04023c7e2d5f 100644
--- a/libc/include/llvm-libc-types/__futex_word.h
+++ b/libc/include/llvm-libc-types/__futex_word.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FUTEX_WORD_H__
-#define __LLVM_LIBC_TYPES_FUTEX_WORD_H__
+#ifndef LLVM_LIBC_TYPES___FUTEX_WORD_H
+#define LLVM_LIBC_TYPES___FUTEX_WORD_H
typedef struct {
// Futex word should be aligned appropriately to allow target atomic
@@ -17,4 +17,4 @@ typedef struct {
: _Alignof(__UINT32_TYPE__)) __UINT32_TYPE__ __word;
} __futex_word;
-#endif // __LLVM_LIBC_TYPES_FUTEX_WORD_H__
+#endif // LLVM_LIBC_TYPES___FUTEX_WORD_H
diff --git a/libc/include/llvm-libc-types/__getoptargv_t.h b/libc/include/llvm-libc-types/__getoptargv_t.h
index 81c67286c3a7..c26b9e9fa619 100644
--- a/libc/include/llvm-libc-types/__getoptargv_t.h
+++ b/libc/include/llvm-libc-types/__getoptargv_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_GETOPTARGV_T_H__
-#define __LLVM_LIBC_TYPES_GETOPTARGV_T_H__
+#ifndef LLVM_LIBC_TYPES___GETOPTARGV_T_H
+#define LLVM_LIBC_TYPES___GETOPTARGV_T_H
typedef char *const __getoptargv_t[];
-#endif // __LLVM_LIBC_TYPES_GETOPTARGV_T_H__
+#endif // LLVM_LIBC_TYPES___GETOPTARGV_T_H
diff --git a/libc/include/llvm-libc-types/__mutex_type.h b/libc/include/llvm-libc-types/__mutex_type.h
index a7ed8f843c3a..d27bf5db8377 100644
--- a/libc/include/llvm-libc-types/__mutex_type.h
+++ b/libc/include/llvm-libc-types/__mutex_type.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES___MUTEX_T_H
-#define __LLVM_LIBC_TYPES___MUTEX_T_H
+#ifndef LLVM_LIBC_TYPES___MUTEX_TYPE_H
+#define LLVM_LIBC_TYPES___MUTEX_TYPE_H
#include <llvm-libc-types/__futex_word.h>
@@ -26,4 +26,4 @@ typedef struct {
#endif
} __mutex_type;
-#endif // __LLVM_LIBC_TYPES___MUTEX_T_H
+#endif // LLVM_LIBC_TYPES___MUTEX_TYPE_H
diff --git a/libc/include/llvm-libc-types/__pthread_once_func_t.h b/libc/include/llvm-libc-types/__pthread_once_func_t.h
index 5ace5cb7f151..7575029f08c2 100644
--- a/libc/include/llvm-libc-types/__pthread_once_func_t.h
+++ b/libc/include/llvm-libc-types/__pthread_once_func_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_ONCE_FUNC_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_ONCE_FUNC_T_H__
+#ifndef LLVM_LIBC_TYPES___PTHREAD_ONCE_FUNC_T_H
+#define LLVM_LIBC_TYPES___PTHREAD_ONCE_FUNC_T_H
typedef void (*__pthread_once_func_t)(void);
-#endif // __LLVM_LIBC_TYPES_PTHREAD_ONCE_FUNC_T_H__
+#endif // LLVM_LIBC_TYPES___PTHREAD_ONCE_FUNC_T_H
diff --git a/libc/include/llvm-libc-types/__pthread_start_t.h b/libc/include/llvm-libc-types/__pthread_start_t.h
index 1e05f9b49729..6b7ae40b1b77 100644
--- a/libc/include/llvm-libc-types/__pthread_start_t.h
+++ b/libc/include/llvm-libc-types/__pthread_start_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_START_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_START_T_H__
+#ifndef LLVM_LIBC_TYPES___PTHREAD_START_T_H
+#define LLVM_LIBC_TYPES___PTHREAD_START_T_H
typedef void *(*__pthread_start_t)(void *);
-#endif // __LLVM_LIBC_TYPES_PTHREAD_START_T_H__
+#endif // LLVM_LIBC_TYPES___PTHREAD_START_T_H
diff --git a/libc/include/llvm-libc-types/__pthread_tss_dtor_t.h b/libc/include/llvm-libc-types/__pthread_tss_dtor_t.h
index 1b54d31a7977..c67b6045936d 100644
--- a/libc/include/llvm-libc-types/__pthread_tss_dtor_t.h
+++ b/libc/include/llvm-libc-types/__pthread_tss_dtor_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_TSS_DTOR_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_TSS_DTOR_T_H__
+#ifndef LLVM_LIBC_TYPES___PTHREAD_TSS_DTOR_T_H
+#define LLVM_LIBC_TYPES___PTHREAD_TSS_DTOR_T_H
typedef void (*__pthread_tss_dtor_t)(void *);
-#endif // __LLVM_LIBC_TYPES_PTHREAD_TSS_DTOR_T_H__
+#endif // LLVM_LIBC_TYPES___PTHREAD_TSS_DTOR_T_H
diff --git a/libc/include/llvm-libc-types/__qsortcompare_t.h b/libc/include/llvm-libc-types/__qsortcompare_t.h
index 82bd4cc1fcd0..48fc9ccb4409 100644
--- a/libc/include/llvm-libc-types/__qsortcompare_t.h
+++ b/libc/include/llvm-libc-types/__qsortcompare_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_QSORTCOMPARE_T_H__
-#define __LLVM_LIBC_TYPES_QSORTCOMPARE_T_H__
+#ifndef LLVM_LIBC_TYPES___QSORTCOMPARE_T_H
+#define LLVM_LIBC_TYPES___QSORTCOMPARE_T_H
typedef int (*__qsortcompare_t)(const void *, const void *);
-#endif // __LLVM_LIBC_TYPES_QSORTCOMPARE_T_H__
+#endif // LLVM_LIBC_TYPES___QSORTCOMPARE_T_H
diff --git a/libc/include/llvm-libc-types/__qsortrcompare_t.h b/libc/include/llvm-libc-types/__qsortrcompare_t.h
index febf79d9f90b..f6b058864359 100644
--- a/libc/include/llvm-libc-types/__qsortrcompare_t.h
+++ b/libc/include/llvm-libc-types/__qsortrcompare_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_QSORTRCOMPARE_T_H__
-#define __LLVM_LIBC_TYPES_QSORTRCOMPARE_T_H__
+#ifndef LLVM_LIBC_TYPES___QSORTRCOMPARE_T_H
+#define LLVM_LIBC_TYPES___QSORTRCOMPARE_T_H
typedef int (*__qsortrcompare_t)(const void *, const void *, void *);
-#endif // __LLVM_LIBC_TYPES_QSORTRCOMPARE_T_H__
+#endif // LLVM_LIBC_TYPES___QSORTRCOMPARE_T_H
diff --git a/libc/include/llvm-libc-types/__sighandler_t.h b/libc/include/llvm-libc-types/__sighandler_t.h
index bd0ad98d8529..9c1ac997fc4e 100644
--- a/libc/include/llvm-libc-types/__sighandler_t.h
+++ b/libc/include/llvm-libc-types/__sighandler_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIGHANDLER_T_H__
-#define __LLVM_LIBC_TYPES_SIGHANDLER_T_H__
+#ifndef LLVM_LIBC_TYPES___SIGHANDLER_T_H
+#define LLVM_LIBC_TYPES___SIGHANDLER_T_H
typedef void (*__sighandler_t)(int);
-#endif // __LLVM_LIBC_TYPES_SIGHANDLER_T_H__
+#endif // LLVM_LIBC_TYPES___SIGHANDLER_T_H
diff --git a/libc/include/llvm-libc-types/__thread_type.h b/libc/include/llvm-libc-types/__thread_type.h
index da5b898e5750..645573f544a9 100644
--- a/libc/include/llvm-libc-types/__thread_type.h
+++ b/libc/include/llvm-libc-types/__thread_type.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_THREAD_TYPE_H__
-#define __LLVM_LIBC_TYPES_THREAD_TYPE_H__
+#ifndef LLVM_LIBC_TYPES___THREAD_TYPE_H
+#define LLVM_LIBC_TYPES___THREAD_TYPE_H
typedef struct {
void *__attrib;
} __thread_type;
-#endif // __LLVM_LIBC_TYPES_THREAD_TYPE_H__
+#endif // LLVM_LIBC_TYPES___THREAD_TYPE_H
diff --git a/libc/include/llvm-libc-types/blkcnt_t.h b/libc/include/llvm-libc-types/blkcnt_t.h
index acd8d3467ec5..9dea8f033d6d 100644
--- a/libc/include/llvm-libc-types/blkcnt_t.h
+++ b/libc/include/llvm-libc-types/blkcnt_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_BLKCNT_T_H__
-#define __LLVM_LIBC_TYPES_BLKCNT_T_H__
+#ifndef LLVM_LIBC_TYPES_BLKCNT_T_H
+#define LLVM_LIBC_TYPES_BLKCNT_T_H
typedef __INTPTR_TYPE__ blkcnt_t;
-#endif // __LLVM_LIBC_TYPES_BLKCNT_T_H__
+#endif // LLVM_LIBC_TYPES_BLKCNT_T_H
diff --git a/libc/include/llvm-libc-types/blksize_t.h b/libc/include/llvm-libc-types/blksize_t.h
index 99ddac56194a..7caa9705cca3 100644
--- a/libc/include/llvm-libc-types/blksize_t.h
+++ b/libc/include/llvm-libc-types/blksize_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_BLKSIZE_T_H__
-#define __LLVM_LIBC_TYPES_BLKSIZE_T_H__
+#ifndef LLVM_LIBC_TYPES_BLKSIZE_T_H
+#define LLVM_LIBC_TYPES_BLKSIZE_T_H
typedef __INTPTR_TYPE__ blksize_t;
-#endif // __LLVM_LIBC_TYPES_BLKSIZE_T_H__
+#endif // LLVM_LIBC_TYPES_BLKSIZE_T_H
diff --git a/libc/include/llvm-libc-types/cc_t.h b/libc/include/llvm-libc-types/cc_t.h
index e08523cc3ec9..40d99ad22da2 100644
--- a/libc/include/llvm-libc-types/cc_t.h
+++ b/libc/include/llvm-libc-types/cc_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CC_T_H__
-#define __LLVM_LIBC_TYPES_CC_T_H__
+#ifndef LLVM_LIBC_TYPES_CC_T_H
+#define LLVM_LIBC_TYPES_CC_T_H
typedef unsigned char cc_t;
-#endif // __LLVM_LIBC_TYPES_CC_T_H__
+#endif // LLVM_LIBC_TYPES_CC_T_H
diff --git a/libc/include/llvm-libc-types/clock_t.h b/libc/include/llvm-libc-types/clock_t.h
index b7969d602c6b..8759ee999fb5 100644
--- a/libc/include/llvm-libc-types/clock_t.h
+++ b/libc/include/llvm-libc-types/clock_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CLOCK_T_H__
-#define __LLVM_LIBC_TYPES_CLOCK_T_H__
+#ifndef LLVM_LIBC_TYPES_CLOCK_T_H
+#define LLVM_LIBC_TYPES_CLOCK_T_H
typedef long clock_t;
-#endif // __LLVM_LIBC_TYPES_CLOCK_T_H__
+#endif // LLVM_LIBC_TYPES_CLOCK_T_H
diff --git a/libc/include/llvm-libc-types/clockid_t.h b/libc/include/llvm-libc-types/clockid_t.h
index ddaceb664ec1..4b059599502c 100644
--- a/libc/include/llvm-libc-types/clockid_t.h
+++ b/libc/include/llvm-libc-types/clockid_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CLOCKID_T_H__
-#define __LLVM_LIBC_TYPES_CLOCKID_T_H__
+#ifndef LLVM_LIBC_TYPES_CLOCKID_T_H
+#define LLVM_LIBC_TYPES_CLOCKID_T_H
typedef int clockid_t;
-#endif // __LLVM_LIBC_TYPES_CLOCKID_T_H__
+#endif // LLVM_LIBC_TYPES_CLOCKID_T_H
diff --git a/libc/include/llvm-libc-types/cnd_t.h b/libc/include/llvm-libc-types/cnd_t.h
index 09a29ac43dee..1159ac435792 100644
--- a/libc/include/llvm-libc-types/cnd_t.h
+++ b/libc/include/llvm-libc-types/cnd_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CND_T_H__
-#define __LLVM_LIBC_TYPES_CND_T_H__
+#ifndef LLVM_LIBC_TYPES_CND_T_H
+#define LLVM_LIBC_TYPES_CND_T_H
#include "mtx_t.h"
@@ -17,4 +17,4 @@ typedef struct {
mtx_t __qmtx;
} cnd_t;
-#endif // __LLVM_LIBC_TYPES_CND_T_H__
+#endif // LLVM_LIBC_TYPES_CND_T_H
diff --git a/libc/include/llvm-libc-types/cookie_io_functions_t.h b/libc/include/llvm-libc-types/cookie_io_functions_t.h
index df904162a897..f9fa1a2d50ed 100644
--- a/libc/include/llvm-libc-types/cookie_io_functions_t.h
+++ b/libc/include/llvm-libc-types/cookie_io_functions_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
-#define __LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
+#ifndef LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
+#define LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
#include <llvm-libc-types/off64_t.h>
#include <llvm-libc-types/size_t.h>
@@ -25,4 +25,4 @@ typedef struct {
cookie_close_function_t *close;
} cookie_io_functions_t;
-#endif // __LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
+#endif // LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
diff --git a/libc/include/llvm-libc-types/cpu_set_t.h b/libc/include/llvm-libc-types/cpu_set_t.h
index 79f694aeda60..e7f52597e147 100644
--- a/libc/include/llvm-libc-types/cpu_set_t.h
+++ b/libc/include/llvm-libc-types/cpu_set_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CPU_SET_T_H
-#define __LLVM_LIBC_TYPES_CPU_SET_T_H
+#ifndef LLVM_LIBC_TYPES_CPU_SET_T_H
+#define LLVM_LIBC_TYPES_CPU_SET_T_H
typedef struct {
// If a processor with more than 1024 CPUs is to be supported in future,
@@ -15,4 +15,4 @@ typedef struct {
unsigned long __mask[128 / sizeof(unsigned long)];
} cpu_set_t;
-#endif // __LLVM_LIBC_TYPES_CPU_SET_T_H
+#endif // LLVM_LIBC_TYPES_CPU_SET_T_H
diff --git a/libc/include/llvm-libc-types/dev_t.h b/libc/include/llvm-libc-types/dev_t.h
index 9fbc41a49b89..3181e3415f2e 100644
--- a/libc/include/llvm-libc-types/dev_t.h
+++ b/libc/include/llvm-libc-types/dev_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_DEV_T_H__
-#define __LLVM_LIBC_TYPES_DEV_T_H__
+#ifndef LLVM_LIBC_TYPES_DEV_T_H
+#define LLVM_LIBC_TYPES_DEV_T_H
typedef __UINT64_TYPE__ dev_t;
-#endif // __LLVM_LIBC_TYPES_DEV_T_H__
+#endif // LLVM_LIBC_TYPES_DEV_T_H
diff --git a/libc/include/llvm-libc-types/div_t.h b/libc/include/llvm-libc-types/div_t.h
index e495a1c3f9dc..450603d69f35 100644
--- a/libc/include/llvm-libc-types/div_t.h
+++ b/libc/include/llvm-libc-types/div_t.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_DIV_T_H__
-#define __LLVM_LIBC_TYPES_DIV_T_H__
+#ifndef LLVM_LIBC_TYPES_DIV_T_H
+#define LLVM_LIBC_TYPES_DIV_T_H
typedef struct {
int quot;
int rem;
} div_t;
-#endif // __LLVM_LIBC_TYPES_DIV_T_H__
+#endif // LLVM_LIBC_TYPES_DIV_T_H
diff --git a/libc/include/llvm-libc-types/double_t.h b/libc/include/llvm-libc-types/double_t.h
index 2aa471de4840..c4ad08afddfa 100644
--- a/libc/include/llvm-libc-types/double_t.h
+++ b/libc/include/llvm-libc-types/double_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_DOUBLE_T_H__
-#define __LLVM_LIBC_TYPES_DOUBLE_T_H__
+#ifndef LLVM_LIBC_TYPES_DOUBLE_T_H
+#define LLVM_LIBC_TYPES_DOUBLE_T_H
#if !defined(__FLT_EVAL_METHOD__) || __FLT_EVAL_METHOD__ == 0
#define __LLVM_LIBC_DOUBLE_T double
@@ -21,4 +21,4 @@
typedef __LLVM_LIBC_DOUBLE_T double_t;
-#endif // __LLVM_LIBC_TYPES_DOUBLE_T_H__
+#endif // LLVM_LIBC_TYPES_DOUBLE_T_H
diff --git a/libc/include/llvm-libc-types/fd_set.h b/libc/include/llvm-libc-types/fd_set.h
index 54e3fc654c06..58fc438bbdd2 100644
--- a/libc/include/llvm-libc-types/fd_set.h
+++ b/libc/include/llvm-libc-types/fd_set.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FD_SET_H__
-#define __LLVM_LIBC_TYPES_FD_SET_H__
+#ifndef LLVM_LIBC_TYPES_FD_SET_H
+#define LLVM_LIBC_TYPES_FD_SET_H
#include <llvm-libc-macros/sys-select-macros.h> // FD_SETSIZE
@@ -15,4 +15,4 @@ typedef struct {
__FD_SET_WORD_TYPE __set[__FD_SET_ARRAYSIZE];
} fd_set;
-#endif // __LLVM_LIBC_TYPES_FD_SET_H__
+#endif // LLVM_LIBC_TYPES_FD_SET_H
diff --git a/libc/include/llvm-libc-types/fenv_t.h b/libc/include/llvm-libc-types/fenv_t.h
index 86fcf2e49a7f..c83f23894c0c 100644
--- a/libc/include/llvm-libc-types/fenv_t.h
+++ b/libc/include/llvm-libc-types/fenv_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FENV_T_H__
-#define __LLVM_LIBC_TYPES_FENV_T_H__
+#ifndef LLVM_LIBC_TYPES_FENV_T_H
+#define LLVM_LIBC_TYPES_FENV_T_H
#ifdef __aarch64__
typedef struct {
@@ -33,4 +33,4 @@ typedef struct {
#error "fenv_t not defined for your platform"
#endif
-#endif // __LLVM_LIBC_TYPES_FENV_T_H__
+#endif // LLVM_LIBC_TYPES_FENV_T_H
diff --git a/libc/include/llvm-libc-types/fexcept_t.h b/libc/include/llvm-libc-types/fexcept_t.h
index 6e7969c1be0a..60687bd1318a 100644
--- a/libc/include/llvm-libc-types/fexcept_t.h
+++ b/libc/include/llvm-libc-types/fexcept_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FEXCEPT_T_H__
-#define __LLVM_LIBC_TYPES_FEXCEPT_T_H__
+#ifndef LLVM_LIBC_TYPES_FEXCEPT_T_H
+#define LLVM_LIBC_TYPES_FEXCEPT_T_H
typedef int fexcept_t;
-#endif // __LLVM_LIBC_TYPES_FEXCEPT_T_H__
+#endif // LLVM_LIBC_TYPES_FEXCEPT_T_H
diff --git a/libc/include/llvm-libc-types/float128.h b/libc/include/llvm-libc-types/float128.h
index 61a094fdb96b..0b290c676ecc 100644
--- a/libc/include/llvm-libc-types/float128.h
+++ b/libc/include/llvm-libc-types/float128.h
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FLOAT128_H__
-#define __LLVM_LIBC_TYPES_FLOAT128_H__
+#ifndef LLVM_LIBC_TYPES_FLOAT128_H
+#define LLVM_LIBC_TYPES_FLOAT128_H
-#include <include/llvm-libc-macros/float-macros.h> // LDBL_MANT_DIG
+#include "../llvm-libc-macros/float-macros.h" // LDBL_MANT_DIG
// Currently, C23 `_Float128` type is only defined as a built-in type in GCC 7
// or later, and only for C. For C++, or for clang, `__float128` is defined
@@ -34,4 +34,4 @@ typedef __float128 float128;
typedef long double float128;
#endif
-#endif // __LLVM_LIBC_TYPES_FLOAT128_H__
+#endif // LLVM_LIBC_TYPES_FLOAT128_H
diff --git a/libc/include/llvm-libc-types/float_t.h b/libc/include/llvm-libc-types/float_t.h
index 8df3bf05f6a1..5027249c30d3 100644
--- a/libc/include/llvm-libc-types/float_t.h
+++ b/libc/include/llvm-libc-types/float_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FLOAT_T_H__
-#define __LLVM_LIBC_TYPES_FLOAT_T_H__
+#ifndef LLVM_LIBC_TYPES_FLOAT_T_H
+#define LLVM_LIBC_TYPES_FLOAT_T_H
#if !defined(__FLT_EVAL_METHOD__) || __FLT_EVAL_METHOD__ == 0
#define __LLVM_LIBC_FLOAT_T float
@@ -21,4 +21,4 @@
typedef __LLVM_LIBC_FLOAT_T float_t;
-#endif // __LLVM_LIBC_TYPES_FLOAT_T_H__
+#endif // LLVM_LIBC_TYPES_FLOAT_T_H
diff --git a/libc/include/llvm-libc-types/gid_t.h b/libc/include/llvm-libc-types/gid_t.h
index 664aee020a4e..cfe36ce9906b 100644
--- a/libc/include/llvm-libc-types/gid_t.h
+++ b/libc/include/llvm-libc-types/gid_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_GID_T_H__
-#define __LLVM_LIBC_TYPES_GID_T_H__
+#ifndef LLVM_LIBC_TYPES_GID_T_H
+#define LLVM_LIBC_TYPES_GID_T_H
typedef __UINT32_TYPE__ gid_t;
-#endif // __LLVM_LIBC_TYPES_GID_T_H__
+#endif // LLVM_LIBC_TYPES_GID_T_H
diff --git a/libc/include/llvm-libc-types/ino_t.h b/libc/include/llvm-libc-types/ino_t.h
index 0f5abd96c2b7..148bd67f98fe 100644
--- a/libc/include/llvm-libc-types/ino_t.h
+++ b/libc/include/llvm-libc-types/ino_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_INO_T_H__
-#define __LLVM_LIBC_TYPES_INO_T_H__
+#ifndef LLVM_LIBC_TYPES_INO_T_H
+#define LLVM_LIBC_TYPES_INO_T_H
typedef __UINTPTR_TYPE__ ino_t;
-#endif // __LLVM_LIBC_TYPES_INO_T_H__
+#endif // LLVM_LIBC_TYPES_INO_T_H
diff --git a/libc/include/llvm-libc-types/jmp_buf.h b/libc/include/llvm-libc-types/jmp_buf.h
index 6af4e8ebad92..29a1df9ad682 100644
--- a/libc/include/llvm-libc-types/jmp_buf.h
+++ b/libc/include/llvm-libc-types/jmp_buf.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_JMP_BUF_H__
-#define __LLVM_LIBC_TYPES_JMP_BUF_H__
+#ifndef LLVM_LIBC_TYPES_JMP_BUF_H
+#define LLVM_LIBC_TYPES_JMP_BUF_H
typedef struct {
#ifdef __x86_64__
@@ -39,4 +39,4 @@ typedef struct {
typedef __jmp_buf jmp_buf[1];
-#endif // __LLVM_LIBC_TYPES_JMP_BUF_H__
+#endif // LLVM_LIBC_TYPES_JMP_BUF_H
diff --git a/libc/include/llvm-libc-types/ldiv_t.h b/libc/include/llvm-libc-types/ldiv_t.h
index 9bd8d253330a..5c64ec10d918 100644
--- a/libc/include/llvm-libc-types/ldiv_t.h
+++ b/libc/include/llvm-libc-types/ldiv_t.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_LDIV_T_H__
-#define __LLVM_LIBC_TYPES_LDIV_T_H__
+#ifndef LLVM_LIBC_TYPES_LDIV_T_H
+#define LLVM_LIBC_TYPES_LDIV_T_H
typedef struct {
long quot;
long rem;
} ldiv_t;
-#endif // __LLVM_LIBC_TYPES_LDIV_T_H__
+#endif // LLVM_LIBC_TYPES_LDIV_T_H
diff --git a/libc/include/llvm-libc-types/lldiv_t.h b/libc/include/llvm-libc-types/lldiv_t.h
index 109304d12078..5b8dcbef9470 100644
--- a/libc/include/llvm-libc-types/lldiv_t.h
+++ b/libc/include/llvm-libc-types/lldiv_t.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_LLDIV_T_H__
-#define __LLVM_LIBC_TYPES_LLDIV_T_H__
+#ifndef LLVM_LIBC_TYPES_LLDIV_T_H
+#define LLVM_LIBC_TYPES_LLDIV_T_H
typedef struct {
long long quot;
long long rem;
} lldiv_t;
-#endif // __LLVM_LIBC_TYPES_LLDIV_T_H__
+#endif // LLVM_LIBC_TYPES_LLDIV_T_H
diff --git a/libc/include/llvm-libc-types/mode_t.h b/libc/include/llvm-libc-types/mode_t.h
index 20037bb9ac8f..fe09060d9a6e 100644
--- a/libc/include/llvm-libc-types/mode_t.h
+++ b/libc/include/llvm-libc-types/mode_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_MODE_T_H
-#define __LLVM_LIBC_TYPES_MODE_T_H
+#ifndef LLVM_LIBC_TYPES_MODE_T_H
+#define LLVM_LIBC_TYPES_MODE_T_H
typedef unsigned mode_t;
-#endif // __LLVM_LIBC_TYPES_MODE_T_H
+#endif // LLVM_LIBC_TYPES_MODE_T_H
diff --git a/libc/include/llvm-libc-types/mtx_t.h b/libc/include/llvm-libc-types/mtx_t.h
index ac6453eeabf0..0f3882c26b6b 100644
--- a/libc/include/llvm-libc-types/mtx_t.h
+++ b/libc/include/llvm-libc-types/mtx_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_MTX_T_H__
-#define __LLVM_LIBC_TYPES_MTX_T_H__
+#ifndef LLVM_LIBC_TYPES_MTX_T_H
+#define LLVM_LIBC_TYPES_MTX_T_H
#include <llvm-libc-types/__mutex_type.h>
typedef __mutex_type mtx_t;
-#endif // __LLVM_LIBC_TYPES_MTX_T_H__
+#endif // LLVM_LIBC_TYPES_MTX_T_H
diff --git a/libc/include/llvm-libc-types/nlink_t.h b/libc/include/llvm-libc-types/nlink_t.h
index 1826144b3c88..7e0016a9af95 100644
--- a/libc/include/llvm-libc-types/nlink_t.h
+++ b/libc/include/llvm-libc-types/nlink_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_NLINK_T_H__
-#define __LLVM_LIBC_TYPES_NLINK_T_H__
+#ifndef LLVM_LIBC_TYPES_NLINK_T_H
+#define LLVM_LIBC_TYPES_NLINK_T_H
typedef __UINTPTR_TYPE__ nlink_t;
-#endif // __LLVM_LIBC_TYPES_NLINK_T_H__
+#endif // LLVM_LIBC_TYPES_NLINK_T_H
diff --git a/libc/include/llvm-libc-types/off64_t.h b/libc/include/llvm-libc-types/off64_t.h
index 0f95caa19cca..669698a8c05f 100644
--- a/libc/include/llvm-libc-types/off64_t.h
+++ b/libc/include/llvm-libc-types/off64_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_OFF64_T_H__
-#define __LLVM_LIBC_TYPES_OFF64_T_H__
+#ifndef LLVM_LIBC_TYPES_OFF64_T_H
+#define LLVM_LIBC_TYPES_OFF64_T_H
typedef __INT64_TYPE__ off64_t;
-#endif // __LLVM_LIBC_TYPES_OFF64_T_H__
+#endif // LLVM_LIBC_TYPES_OFF64_T_H
diff --git a/libc/include/llvm-libc-types/off_t.h b/libc/include/llvm-libc-types/off_t.h
index 111b29aa68d8..63224b6831d5 100644
--- a/libc/include/llvm-libc-types/off_t.h
+++ b/libc/include/llvm-libc-types/off_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_OFF_T_H__
-#define __LLVM_LIBC_TYPES_OFF_T_H__
+#ifndef LLVM_LIBC_TYPES_OFF_T_H
+#define LLVM_LIBC_TYPES_OFF_T_H
typedef __INT64_TYPE__ off_t;
-#endif // __LLVM_LIBC_TYPES_OFF_T_H__
+#endif // LLVM_LIBC_TYPES_OFF_T_H
diff --git a/libc/include/llvm-libc-types/once_flag.h b/libc/include/llvm-libc-types/once_flag.h
index 77bab28338a0..cb8011284610 100644
--- a/libc/include/llvm-libc-types/once_flag.h
+++ b/libc/include/llvm-libc-types/once_flag.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_ONCE_FLAG_H__
-#define __LLVM_LIBC_TYPES_ONCE_FLAG_H__
+#ifndef LLVM_LIBC_TYPES_ONCE_FLAG_H
+#define LLVM_LIBC_TYPES_ONCE_FLAG_H
#include <llvm-libc-types/__futex_word.h>
@@ -17,4 +17,4 @@ typedef __futex_word once_flag;
#error "Once flag type not defined for the target platform."
#endif
-#endif // __LLVM_LIBC_TYPES_ONCE_FLAG_H__
+#endif // LLVM_LIBC_TYPES_ONCE_FLAG_H
diff --git a/libc/include/llvm-libc-types/pid_t.h b/libc/include/llvm-libc-types/pid_t.h
index d78fde74f34a..0397bd249032 100644
--- a/libc/include/llvm-libc-types/pid_t.h
+++ b/libc/include/llvm-libc-types/pid_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PID_t_H__
-#define __LLVM_LIBC_TYPES_PID_t_H__
+#ifndef LLVM_LIBC_TYPES_PID_T_H
+#define LLVM_LIBC_TYPES_PID_T_H
typedef __INT32_TYPE__ pid_t;
-#endif // __LLVM_LIBC_TYPES_PID_t_H__
+#endif // LLVM_LIBC_TYPES_PID_T_H
diff --git a/libc/include/llvm-libc-types/posix_spawn_file_actions_t.h b/libc/include/llvm-libc-types/posix_spawn_file_actions_t.h
index 55adbd198de8..3062da3a54b5 100644
--- a/libc/include/llvm-libc-types/posix_spawn_file_actions_t.h
+++ b/libc/include/llvm-libc-types/posix_spawn_file_actions_t.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_T_H
-#define __LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_T_H
+#ifndef LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_H
+#define LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_H
typedef struct {
void *__front;
void *__back;
} posix_spawn_file_actions_t;
-#endif // __LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_T_H
+#endif // LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_H
diff --git a/libc/include/llvm-libc-types/posix_spawnattr_t.h b/libc/include/llvm-libc-types/posix_spawnattr_t.h
index f1bcb3e1434f..47cadc7cdda1 100644
--- a/libc/include/llvm-libc-types/posix_spawnattr_t.h
+++ b/libc/include/llvm-libc-types/posix_spawnattr_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
-#define __LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
+#ifndef LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
+#define LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
typedef struct {
// This data structure will be populated as required.
} posix_spawnattr_t;
-#endif // __LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
+#endif // LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
diff --git a/libc/include/llvm-libc-types/pthread_attr_t.h b/libc/include/llvm-libc-types/pthread_attr_t.h
index 7bf8a5402f28..66c04de04a99 100644
--- a/libc/include/llvm-libc-types/pthread_attr_t.h
+++ b/libc/include/llvm-libc-types/pthread_attr_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
-#define __LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
+#ifndef LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
#include <llvm-libc-types/size_t.h>
@@ -18,4 +18,4 @@ typedef struct {
size_t __guardsize;
} pthread_attr_t;
-#endif // __LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
+#endif // LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
diff --git a/libc/include/llvm-libc-types/pthread_key_t.h b/libc/include/llvm-libc-types/pthread_key_t.h
index 351e37614a01..e73c7e26c17c 100644
--- a/libc/include/llvm-libc-types/pthread_key_t.h
+++ b/libc/include/llvm-libc-types/pthread_key_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_KEY_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_KEY_T_H__
+#ifndef LLVM_LIBC_TYPES_PTHREAD_KEY_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_KEY_T_H
typedef unsigned int pthread_key_t;
-#endif // __LLVM_LIBC_TYPES_PTHREAD_KEY_T_H__
+#endif // LLVM_LIBC_TYPES_PTHREAD_KEY_T_H
diff --git a/libc/include/llvm-libc-types/pthread_mutex_t.h b/libc/include/llvm-libc-types/pthread_mutex_t.h
index 65e43538cd27..b1eb21f24fac 100644
--- a/libc/include/llvm-libc-types/pthread_mutex_t.h
+++ b/libc/include/llvm-libc-types/pthread_mutex_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
-#define __LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
+#ifndef LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
#include <llvm-libc-types/__mutex_type.h>
typedef __mutex_type pthread_mutex_t;
-#endif // __LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
+#endif // LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
diff --git a/libc/include/llvm-libc-types/pthread_mutexattr_t.h b/libc/include/llvm-libc-types/pthread_mutexattr_t.h
index be1ff5611ed4..8f159a61420c 100644
--- a/libc/include/llvm-libc-types/pthread_mutexattr_t.h
+++ b/libc/include/llvm-libc-types/pthread_mutexattr_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
-#define __LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
+#ifndef LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
// pthread_mutexattr_t is a collection bit mapped flags. The mapping is internal
// detail of the libc implementation.
typedef unsigned int pthread_mutexattr_t;
-#endif // __LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
+#endif // LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
diff --git a/libc/include/llvm-libc-types/pthread_once_t.h b/libc/include/llvm-libc-types/pthread_once_t.h
index 6d65f8f74052..3fe78b7ddff6 100644
--- a/libc/include/llvm-libc-types/pthread_once_t.h
+++ b/libc/include/llvm-libc-types/pthread_once_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H__
+#ifndef LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H
#include <llvm-libc-types/__futex_word.h>
@@ -17,4 +17,4 @@ typedef __futex_word pthread_once_t;
#error "Once flag type not defined for the target platform."
#endif
-#endif // __LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H__
+#endif // LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H
diff --git a/libc/include/llvm-libc-types/pthread_t.h b/libc/include/llvm-libc-types/pthread_t.h
index 8130491274ef..72c14e1c2eea 100644
--- a/libc/include/llvm-libc-types/pthread_t.h
+++ b/libc/include/llvm-libc-types/pthread_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_T_H__
+#ifndef LLVM_LIBC_TYPES_PTHREAD_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_T_H
#include <llvm-libc-types/__thread_type.h>
typedef __thread_type pthread_t;
-#endif // __LLVM_LIBC_TYPES_PTHREAD_T_H__
+#endif // LLVM_LIBC_TYPES_PTHREAD_T_H
diff --git a/libc/include/llvm-libc-types/rlim_t.h b/libc/include/llvm-libc-types/rlim_t.h
index 4e5acfb24c1b..016ec7bdc5b1 100644
--- a/libc/include/llvm-libc-types/rlim_t.h
+++ b/libc/include/llvm-libc-types/rlim_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_RLIM_T_H__
-#define __LLVM_LIBC_TYPES_RLIM_T_H__
+#ifndef LLVM_LIBC_TYPES_RLIM_T_H
+#define LLVM_LIBC_TYPES_RLIM_T_H
typedef __UINT64_TYPE__ rlim_t;
-#endif // __LLVM_LIBC_TYPES_RLIM_T_H__
+#endif // LLVM_LIBC_TYPES_RLIM_T_H
diff --git a/libc/include/llvm-libc-types/rpc_opcodes_t.h b/libc/include/llvm-libc-types/rpc_opcodes_t.h
index 7b85428dd344..919ea039c18e 100644
--- a/libc/include/llvm-libc-types/rpc_opcodes_t.h
+++ b/libc/include/llvm-libc-types/rpc_opcodes_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_RPC_OPCODE_H__
-#define __LLVM_LIBC_TYPES_RPC_OPCODE_H__
+#ifndef LLVM_LIBC_TYPES_RPC_OPCODES_T_H
+#define LLVM_LIBC_TYPES_RPC_OPCODES_T_H
typedef enum {
RPC_NOOP = 0,
@@ -34,4 +34,4 @@ typedef enum {
RPC_LAST = 0xFFFF,
} rpc_opcode_t;
-#endif // __LLVM_LIBC_TYPES_RPC_OPCODE_H__
+#endif // LLVM_LIBC_TYPES_RPC_OPCODES_T_H
diff --git a/libc/include/llvm-libc-types/sa_family_t.h b/libc/include/llvm-libc-types/sa_family_t.h
index 52b69957b0d3..0a010b678ddb 100644
--- a/libc/include/llvm-libc-types/sa_family_t.h
+++ b/libc/include/llvm-libc-types/sa_family_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SA_FAMILY_T_H__
-#define __LLVM_LIBC_TYPES_SA_FAMILY_T_H__
+#ifndef LLVM_LIBC_TYPES_SA_FAMILY_T_H
+#define LLVM_LIBC_TYPES_SA_FAMILY_T_H
// The posix standard only says of sa_family_t that it must be unsigned. The
// linux man page for "address_families" lists approximately 32 different
@@ -16,4 +16,4 @@
typedef unsigned short sa_family_t;
-#endif // __LLVM_LIBC_TYPES_SA_FAMILY_T_H__
+#endif // LLVM_LIBC_TYPES_SA_FAMILY_T_H
diff --git a/libc/include/llvm-libc-types/sig_atomic_t.h b/libc/include/llvm-libc-types/sig_atomic_t.h
index 324629c1b55c..2ef375806791 100644
--- a/libc/include/llvm-libc-types/sig_atomic_t.h
+++ b/libc/include/llvm-libc-types/sig_atomic_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIG_ATOMIC_T_H__
-#define __LLVM_LIBC_TYPES_SIG_ATOMIC_T_H__
+#ifndef LLVM_LIBC_TYPES_SIG_ATOMIC_T_H
+#define LLVM_LIBC_TYPES_SIG_ATOMIC_T_H
typedef int sig_atomic_t;
-#endif // __LLVM_LIBC_TYPES_SIG_ATOMIC_T_H__
+#endif // LLVM_LIBC_TYPES_SIG_ATOMIC_T_H
diff --git a/libc/include/llvm-libc-types/siginfo_t.h b/libc/include/llvm-libc-types/siginfo_t.h
index ef8af78a88be..935ef4bbcb72 100644
--- a/libc/include/llvm-libc-types/siginfo_t.h
+++ b/libc/include/llvm-libc-types/siginfo_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIGINFO_T_H__
-#define __LLVM_LIBC_TYPES_SIGINFO_T_H__
+#ifndef LLVM_LIBC_TYPES_SIGINFO_T_H
+#define LLVM_LIBC_TYPES_SIGINFO_T_H
#include <llvm-libc-types/clock_t.h>
#include <llvm-libc-types/pid_t.h>
@@ -106,4 +106,4 @@ typedef struct {
#define si_syscall _sifields._sigsys._syscall
#define si_arch _sifields._sigsys._arch
-#endif // __LLVM_LIBC_TYPES_SIGINFO_T_H__
+#endif // LLVM_LIBC_TYPES_SIGINFO_T_H
diff --git a/libc/include/llvm-libc-types/sigset_t.h b/libc/include/llvm-libc-types/sigset_t.h
index bcfbc29996ae..f159c6c6c643 100644
--- a/libc/include/llvm-libc-types/sigset_t.h
+++ b/libc/include/llvm-libc-types/sigset_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIGSET_T_H__
-#define __LLVM_LIBC_TYPES_SIGSET_T_H__
+#ifndef LLVM_LIBC_TYPES_SIGSET_T_H
+#define LLVM_LIBC_TYPES_SIGSET_T_H
#include <llvm-libc-macros/signal-macros.h>
@@ -17,4 +17,4 @@ typedef struct {
unsigned long __signals[__NSIGSET_WORDS];
} sigset_t;
-#endif // __LLVM_LIBC_TYPES_SIGSET_T_H__
+#endif // LLVM_LIBC_TYPES_SIGSET_T_H
diff --git a/libc/include/llvm-libc-types/size_t.h b/libc/include/llvm-libc-types/size_t.h
index 8eaf194e0572..3b31b0820f23 100644
--- a/libc/include/llvm-libc-types/size_t.h
+++ b/libc/include/llvm-libc-types/size_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIZE_T_H__
-#define __LLVM_LIBC_TYPES_SIZE_T_H__
+#ifndef LLVM_LIBC_TYPES_SIZE_T_H
+#define LLVM_LIBC_TYPES_SIZE_T_H
// Since __need_size_t is defined, we get the definition of size_t from the
// standalone C header stddef.h. Also, because __need_size_t is defined,
@@ -16,4 +16,4 @@
#include <stddef.h>
#undef __need_size_t
-#endif // __LLVM_LIBC_TYPES_SIZE_T_H__
+#endif // LLVM_LIBC_TYPES_SIZE_T_H
diff --git a/libc/include/llvm-libc-types/socklen_t.h b/libc/include/llvm-libc-types/socklen_t.h
index 3134a53390e7..5357747f5b83 100644
--- a/libc/include/llvm-libc-types/socklen_t.h
+++ b/libc/include/llvm-libc-types/socklen_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SOCKLEN_T_H__
-#define __LLVM_LIBC_TYPES_SOCKLEN_T_H__
+#ifndef LLVM_LIBC_TYPES_SOCKLEN_T_H
+#define LLVM_LIBC_TYPES_SOCKLEN_T_H
// The posix standard only says of socklen_t that it must be an integer type of
// width of at least 32 bits. The long type is defined as being at least 32
@@ -15,4 +15,4 @@
typedef unsigned long socklen_t;
-#endif // __LLVM_LIBC_TYPES_SOCKLEN_T_H__
+#endif // LLVM_LIBC_TYPES_SOCKLEN_T_H
diff --git a/libc/include/llvm-libc-types/speed_t.h b/libc/include/llvm-libc-types/speed_t.h
index b4ec13df27b5..9875d3b82a69 100644
--- a/libc/include/llvm-libc-types/speed_t.h
+++ b/libc/include/llvm-libc-types/speed_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SPEED_T_H__
-#define __LLVM_LIBC_TYPES_SPEED_T_H__
+#ifndef LLVM_LIBC_TYPES_SPEED_T_H
+#define LLVM_LIBC_TYPES_SPEED_T_H
typedef unsigned int speed_t;
-#endif // __LLVM_LIBC_TYPES_SPEED_T_H__
+#endif // LLVM_LIBC_TYPES_SPEED_T_H
diff --git a/libc/include/llvm-libc-types/ssize_t.h b/libc/include/llvm-libc-types/ssize_t.h
index b8874538b1bf..41e4b6d2c500 100644
--- a/libc/include/llvm-libc-types/ssize_t.h
+++ b/libc/include/llvm-libc-types/ssize_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SSIZE_T_H__
-#define __LLVM_LIBC_TYPES_SSIZE_T_H__
+#ifndef LLVM_LIBC_TYPES_SSIZE_T_H
+#define LLVM_LIBC_TYPES_SSIZE_T_H
typedef __INT64_TYPE__ ssize_t;
-#endif // __LLVM_LIBC_TYPES_SSIZE_T_H__
+#endif // LLVM_LIBC_TYPES_SSIZE_T_H
diff --git a/libc/include/llvm-libc-types/stack_t.h b/libc/include/llvm-libc-types/stack_t.h
index f564d9134010..5fa4d3a6d3dc 100644
--- a/libc/include/llvm-libc-types/stack_t.h
+++ b/libc/include/llvm-libc-types/stack_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STACK_T_H__
-#define __LLVM_LIBC_TYPES_STACK_T_H__
+#ifndef LLVM_LIBC_TYPES_STACK_T_H
+#define LLVM_LIBC_TYPES_STACK_T_H
#include <llvm-libc-types/size_t.h>
@@ -19,4 +19,4 @@ typedef struct {
size_t ss_size;
} stack_t;
-#endif // __LLVM_LIBC_TYPES_STACK_T_H__
+#endif // LLVM_LIBC_TYPES_STACK_T_H
diff --git a/libc/include/llvm-libc-types/struct_dirent.h b/libc/include/llvm-libc-types/struct_dirent.h
index de54a2262446..3c5b361c3cbc 100644
--- a/libc/include/llvm-libc-types/struct_dirent.h
+++ b/libc/include/llvm-libc-types/struct_dirent.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_DIRENT_H__
-#define __LLVM_LIBC_TYPES_STRUCT_DIRENT_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_DIRENT_H
+#define LLVM_LIBC_TYPES_STRUCT_DIRENT_H
#include <llvm-libc-types/ino_t.h>
#include <llvm-libc-types/off_t.h>
@@ -26,4 +26,4 @@ struct dirent {
char d_name[1];
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_DIRENT_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_DIRENT_H
diff --git a/libc/include/llvm-libc-types/struct_epoll_data.h b/libc/include/llvm-libc-types/struct_epoll_data.h
index c363171089f1..7200276a141e 100644
--- a/libc/include/llvm-libc-types/struct_epoll_data.h
+++ b/libc/include/llvm-libc-types/struct_epoll_data.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_EPOLL_DATA_H__
-#define __LLVM_LIBC_TYPES_EPOLL_DATA_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_EPOLL_DATA_H
+#define LLVM_LIBC_TYPES_STRUCT_EPOLL_DATA_H
union epoll_data {
void *ptr;
@@ -18,4 +18,4 @@ union epoll_data {
typedef union epoll_data epoll_data_t;
-#endif // __LLVM_LIBC_TYPES_EPOLL_DATA_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_EPOLL_DATA_H
diff --git a/libc/include/llvm-libc-types/struct_epoll_event.h b/libc/include/llvm-libc-types/struct_epoll_event.h
index edfa026fa982..6fc5b410348a 100644
--- a/libc/include/llvm-libc-types/struct_epoll_event.h
+++ b/libc/include/llvm-libc-types/struct_epoll_event.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_EPOLL_EVENT_H__
-#define __LLVM_LIBC_TYPES_EPOLL_EVENT_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_EPOLL_EVENT_H
+#define LLVM_LIBC_TYPES_STRUCT_EPOLL_EVENT_H
#include <llvm-libc-types/struct_epoll_data.h>
@@ -16,4 +16,4 @@ typedef struct epoll_event {
epoll_data_t data;
} epoll_event;
-#endif // __LLVM_LIBC_TYPES_EPOLL_EVENT_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_EPOLL_EVENT_H
diff --git a/libc/include/llvm-libc-types/struct_hsearch_data.h b/libc/include/llvm-libc-types/struct_hsearch_data.h
index 7e2a7232fce5..cdb1d0c5da14 100644
--- a/libc/include/llvm-libc-types/struct_hsearch_data.h
+++ b/libc/include/llvm-libc-types/struct_hsearch_data.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H__
-#define __LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H
+#define LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H
struct hsearch_data {
void *__opaque;
unsigned int __unused[2];
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H
diff --git a/libc/include/llvm-libc-types/struct_rlimit.h b/libc/include/llvm-libc-types/struct_rlimit.h
index 4fe0aa6cdf0b..e093d9f306c9 100644
--- a/libc/include/llvm-libc-types/struct_rlimit.h
+++ b/libc/include/llvm-libc-types/struct_rlimit.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_RLIMIT_H__
-#define __LLVM_LIBC_TYPES_STRUCT_RLIMIT_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_RLIMIT_H
+#define LLVM_LIBC_TYPES_STRUCT_RLIMIT_H
#include <llvm-libc-types/rlim_t.h>
@@ -16,4 +16,4 @@ struct rlimit {
rlim_t rlim_max;
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_RLIMIT_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_RLIMIT_H
diff --git a/libc/include/llvm-libc-types/struct_rusage.h b/libc/include/llvm-libc-types/struct_rusage.h
index 43f345792205..21ea8b1061c2 100644
--- a/libc/include/llvm-libc-types/struct_rusage.h
+++ b/libc/include/llvm-libc-types/struct_rusage.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_RUSAGE_H__
-#define __LLVM_LIBC_TYPES_STRUCT_RUSAGE_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_RUSAGE_H
+#define LLVM_LIBC_TYPES_STRUCT_RUSAGE_H
#include <llvm-libc-types/struct_timeval.h>
@@ -34,4 +34,4 @@ struct rusage {
#endif
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_RUSAGE_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_RUSAGE_H
diff --git a/libc/include/llvm-libc-types/struct_sched_param.h b/libc/include/llvm-libc-types/struct_sched_param.h
index 4f31881ceeb6..0521a4df652f 100644
--- a/libc/include/llvm-libc-types/struct_sched_param.h
+++ b/libc/include/llvm-libc-types/struct_sched_param.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H__
-#define __LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H
+#define LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H
#include <llvm-libc-types/pid_t.h>
#include <llvm-libc-types/struct_timespec.h>
@@ -18,4 +18,4 @@ struct sched_param {
int sched_priority;
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H
diff --git a/libc/include/llvm-libc-types/struct_sigaction.h b/libc/include/llvm-libc-types/struct_sigaction.h
index 3940f14ffa84..54d2995f4ecd 100644
--- a/libc/include/llvm-libc-types/struct_sigaction.h
+++ b/libc/include/llvm-libc-types/struct_sigaction.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIGACTION_H__
-#define __LLVM_LIBC_TYPES_SIGACTION_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_SIGACTION_H
+#define LLVM_LIBC_TYPES_STRUCT_SIGACTION_H
#include <llvm-libc-types/siginfo_t.h>
#include <llvm-libc-types/sigset_t.h>
@@ -27,4 +27,4 @@ struct sigaction {
typedef void (*__sighandler_t)(int);
-#endif // __LLVM_LIBC_TYPES_SIGACTION_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_SIGACTION_H
diff --git a/libc/include/llvm-libc-types/struct_sockaddr.h b/libc/include/llvm-libc-types/struct_sockaddr.h
index 9a6214c7d3e6..074b1ae50ef0 100644
--- a/libc/include/llvm-libc-types/struct_sockaddr.h
+++ b/libc/include/llvm-libc-types/struct_sockaddr.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H__
-#define __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H
+#define LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H
#include <llvm-libc-types/sa_family_t.h>
@@ -18,4 +18,4 @@ struct sockaddr {
char sa_data[];
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H
diff --git a/libc/include/llvm-libc-types/struct_sockaddr_un.h b/libc/include/llvm-libc-types/struct_sockaddr_un.h
index 9c3efea27925..4332419a5b71 100644
--- a/libc/include/llvm-libc-types/struct_sockaddr_un.h
+++ b/libc/include/llvm-libc-types/struct_sockaddr_un.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H__
-#define __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H
+#define LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H
#include <llvm-libc-types/sa_family_t.h>
@@ -19,4 +19,4 @@ struct sockaddr_un {
char sun_path[108]; /* Pathname */
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H
diff --git a/libc/include/llvm-libc-types/struct_stat.h b/libc/include/llvm-libc-types/struct_stat.h
index baaef15d9964..3539fb5b920e 100644
--- a/libc/include/llvm-libc-types/struct_stat.h
+++ b/libc/include/llvm-libc-types/struct_stat.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_STAT_H__
-#define __LLVM_LIBC_TYPES_STRUCT_STAT_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_STAT_H
+#define LLVM_LIBC_TYPES_STRUCT_STAT_H
#include <llvm-libc-types/blkcnt_t.h>
#include <llvm-libc-types/blksize_t.h>
@@ -36,4 +36,4 @@ struct stat {
blkcnt_t st_blocks;
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_STAT_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_STAT_H
diff --git a/libc/include/llvm-libc-types/struct_timespec.h b/libc/include/llvm-libc-types/struct_timespec.h
index 1fa6272d3df9..5d56d9c9468b 100644
--- a/libc/include/llvm-libc-types/struct_timespec.h
+++ b/libc/include/llvm-libc-types/struct_timespec.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TIMESPEC_H__
-#define __LLVM_LIBC_TYPES_TIMESPEC_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H
+#define LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H
#include <llvm-libc-types/time_t.h>
@@ -17,4 +17,4 @@ struct timespec {
long tv_nsec; /* Nanoseconds. */
};
-#endif // __LLVM_LIBC_TYPES_TIMESPEC_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H
diff --git a/libc/include/llvm-libc-types/struct_timeval.h b/libc/include/llvm-libc-types/struct_timeval.h
index 756fecabb6ac..6a0b7bbaf825 100644
--- a/libc/include/llvm-libc-types/struct_timeval.h
+++ b/libc/include/llvm-libc-types/struct_timeval.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TIMEVAL_H__
-#define __LLVM_LIBC_TYPES_TIMEVAL_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_TIMEVAL_H
+#define LLVM_LIBC_TYPES_STRUCT_TIMEVAL_H
#include <llvm-libc-types/suseconds_t.h>
#include <llvm-libc-types/time_t.h>
@@ -17,4 +17,4 @@ struct timeval {
suseconds_t tv_usec; // Micro seconds
};
-#endif // __LLVM_LIBC_TYPES_TIMEVAL_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_TIMEVAL_H
diff --git a/libc/include/llvm-libc-types/struct_tm.h b/libc/include/llvm-libc-types/struct_tm.h
index 953e12e819c3..9fef7c5718ea 100644
--- a/libc/include/llvm-libc-types/struct_tm.h
+++ b/libc/include/llvm-libc-types/struct_tm.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TM_H__
-#define __LLVM_LIBC_TYPES_TM_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_TM_H
+#define LLVM_LIBC_TYPES_STRUCT_TM_H
struct tm {
int tm_sec; // seconds after the minute
@@ -21,4 +21,4 @@ struct tm {
int tm_isdst; // Daylight Saving Time flag
};
-#endif // __LLVM_LIBC_TYPES_TM_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_TM_H
diff --git a/libc/include/llvm-libc-types/struct_utsname.h b/libc/include/llvm-libc-types/struct_utsname.h
index bfd1ad9ceddb..e474171c7285 100644
--- a/libc/include/llvm-libc-types/struct_utsname.h
+++ b/libc/include/llvm-libc-types/struct_utsname.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_UTSNAME_H__
-#define __LLVM_LIBC_TYPES_STRUCT_UTSNAME_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_UTSNAME_H
+#define LLVM_LIBC_TYPES_STRUCT_UTSNAME_H
#if defined(__linux__)
#define __UTS_NAME_LENGTH 65
@@ -31,4 +31,4 @@ struct utsname {
#undef __UTS_NAME_LENGTH
-#endif // __LLVM_LIBC_TYPES_STRUCT_UTSNAME_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_UTSNAME_H
diff --git a/libc/include/llvm-libc-types/suseconds_t.h b/libc/include/llvm-libc-types/suseconds_t.h
index d7298ed74a4c..32ecc9f537d0 100644
--- a/libc/include/llvm-libc-types/suseconds_t.h
+++ b/libc/include/llvm-libc-types/suseconds_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SUSECONDS_T_H__
-#define __LLVM_LIBC_TYPES_SUSECONDS_T_H__
+#ifndef LLVM_LIBC_TYPES_SUSECONDS_T_H
+#define LLVM_LIBC_TYPES_SUSECONDS_T_H
typedef __INT32_TYPE__ suseconds_t;
-#endif // __LLVM_LIBC_TYPES_SUSECONDS_T_H__
+#endif // LLVM_LIBC_TYPES_SUSECONDS_T_H
diff --git a/libc/include/llvm-libc-types/tcflag_t.h b/libc/include/llvm-libc-types/tcflag_t.h
index 7c2ce2154208..2978487df434 100644
--- a/libc/include/llvm-libc-types/tcflag_t.h
+++ b/libc/include/llvm-libc-types/tcflag_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TCFLAG_T_H__
-#define __LLVM_LIBC_TYPES_TCFLAG_T_H__
+#ifndef LLVM_LIBC_TYPES_TCFLAG_T_H
+#define LLVM_LIBC_TYPES_TCFLAG_T_H
typedef unsigned int tcflag_t;
-#endif // __LLVM_LIBC_TYPES_TCFLAG_T_H__
+#endif // LLVM_LIBC_TYPES_TCFLAG_T_H
diff --git a/libc/include/llvm-libc-types/test_rpc_opcodes_t.h b/libc/include/llvm-libc-types/test_rpc_opcodes_t.h
index ec4eb2608799..7129768dc8b9 100644
--- a/libc/include/llvm-libc-types/test_rpc_opcodes_t.h
+++ b/libc/include/llvm-libc-types/test_rpc_opcodes_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TEST_RPC_OPCODE_H__
-#define __LLVM_LIBC_TYPES_TEST_RPC_OPCODE_H__
+#ifndef LLVM_LIBC_TYPES_TEST_RPC_OPCODES_T_H
+#define LLVM_LIBC_TYPES_TEST_RPC_OPCODES_T_H
// We consider the first 32768 opcodes as reserved for libc purposes. We allow
// extensions to use any other number without conflicting with anything else.
@@ -18,4 +18,4 @@ typedef enum : unsigned short {
RPC_TEST_STREAM,
} rpc_test_opcode_t;
-#endif // __LLVM_LIBC_TYPES_TEST_RPC_OPCODE_H__
+#endif // LLVM_LIBC_TYPES_TEST_RPC_OPCODES_T_H
diff --git a/libc/include/llvm-libc-types/thrd_start_t.h b/libc/include/llvm-libc-types/thrd_start_t.h
index 83fc32cbd1f8..1fb21bccc036 100644
--- a/libc/include/llvm-libc-types/thrd_start_t.h
+++ b/libc/include/llvm-libc-types/thrd_start_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_THRD_START_T_H__
-#define __LLVM_LIBC_TYPES_THRD_START_T_H__
+#ifndef LLVM_LIBC_TYPES_THRD_START_T_H
+#define LLVM_LIBC_TYPES_THRD_START_T_H
typedef int (*thrd_start_t)(void *);
-#endif // __LLVM_LIBC_TYPES_THRD_START_T_H__
+#endif // LLVM_LIBC_TYPES_THRD_START_T_H
diff --git a/libc/include/llvm-libc-types/thrd_t.h b/libc/include/llvm-libc-types/thrd_t.h
index 0743106c48c6..2e0f9a0d75ad 100644
--- a/libc/include/llvm-libc-types/thrd_t.h
+++ b/libc/include/llvm-libc-types/thrd_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_THRD_T_H__
-#define __LLVM_LIBC_TYPES_THRD_T_H__
+#ifndef LLVM_LIBC_TYPES_THRD_T_H
+#define LLVM_LIBC_TYPES_THRD_T_H
#include <llvm-libc-types/__thread_type.h>
typedef __thread_type thrd_t;
-#endif // __LLVM_LIBC_TYPES_THRD_T_H__
+#endif // LLVM_LIBC_TYPES_THRD_T_H
diff --git a/libc/include/llvm-libc-types/time_t.h b/libc/include/llvm-libc-types/time_t.h
index 2b3ccd4d8024..59953b343ba9 100644
--- a/libc/include/llvm-libc-types/time_t.h
+++ b/libc/include/llvm-libc-types/time_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TIME_T_H__
-#define __LLVM_LIBC_TYPES_TIME_T_H__
+#ifndef LLVM_LIBC_TYPES_TIME_T_H
+#define LLVM_LIBC_TYPES_TIME_T_H
#if (defined(__arm__) || defined(_M_ARM))
typedef __INTPTR_TYPE__ time_t;
@@ -15,4 +15,4 @@ typedef __INTPTR_TYPE__ time_t;
typedef __INT64_TYPE__ time_t;
#endif
-#endif // __LLVM_LIBC_TYPES_TIME_T_H__
+#endif // LLVM_LIBC_TYPES_TIME_T_H
diff --git a/libc/include/llvm-libc-types/tss_dtor_t.h b/libc/include/llvm-libc-types/tss_dtor_t.h
index f80661b588ba..c54b34e7d8b7 100644
--- a/libc/include/llvm-libc-types/tss_dtor_t.h
+++ b/libc/include/llvm-libc-types/tss_dtor_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TSS_DTOR_T_H__
-#define __LLVM_LIBC_TYPES_TSS_DTOR_T_H__
+#ifndef LLVM_LIBC_TYPES_TSS_DTOR_T_H
+#define LLVM_LIBC_TYPES_TSS_DTOR_T_H
typedef void (*tss_dtor_t)(void *);
-#endif // __LLVM_LIBC_TYPES_TSS_DTOR_T_H__
+#endif // LLVM_LIBC_TYPES_TSS_DTOR_T_H
diff --git a/libc/include/llvm-libc-types/tss_t.h b/libc/include/llvm-libc-types/tss_t.h
index 868ec1ac1128..92bc7ef451ca 100644
--- a/libc/include/llvm-libc-types/tss_t.h
+++ b/libc/include/llvm-libc-types/tss_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TSS_T_H__
-#define __LLVM_LIBC_TYPES_TSS_T_H__
+#ifndef LLVM_LIBC_TYPES_TSS_T_H
+#define LLVM_LIBC_TYPES_TSS_T_H
typedef unsigned int tss_t;
-#endif // __LLVM_LIBC_TYPES_TSS_T_H__
+#endif // LLVM_LIBC_TYPES_TSS_T_H
diff --git a/libc/include/llvm-libc-types/uid_t.h b/libc/include/llvm-libc-types/uid_t.h
index ae9fac2a4288..4f6c6479186b 100644
--- a/libc/include/llvm-libc-types/uid_t.h
+++ b/libc/include/llvm-libc-types/uid_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_UID_T_H__
-#define __LLVM_LIBC_TYPES_UID_T_H__
+#ifndef LLVM_LIBC_TYPES_UID_T_H
+#define LLVM_LIBC_TYPES_UID_T_H
typedef __UINT32_TYPE__ uid_t;
-#endif // __LLVM_LIBC_TYPES_UID_T_H__
+#endif // LLVM_LIBC_TYPES_UID_T_H
diff --git a/libc/include/llvm-libc-types/union_sigval.h b/libc/include/llvm-libc-types/union_sigval.h
index ccc9f2e5d0fb..5f83cd220308 100644
--- a/libc/include/llvm-libc-types/union_sigval.h
+++ b/libc/include/llvm-libc-types/union_sigval.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_UNION_SIGVAL_H__
-#define __LLVM_LIBC_TYPES_UNION_SIGVAL_H__
+#ifndef LLVM_LIBC_TYPES_UNION_SIGVAL_H
+#define LLVM_LIBC_TYPES_UNION_SIGVAL_H
union sigval {
int sival_int;
void *sival_ptr;
};
-#endif // __LLVM_LIBC_TYPES_UNION_SIGVAL_H__
+#endif // LLVM_LIBC_TYPES_UNION_SIGVAL_H
diff --git a/libc/include/llvm-libc-types/wchar_t.h b/libc/include/llvm-libc-types/wchar_t.h
index 9efb5cd8e665..3e9a70b8afe6 100644
--- a/libc/include/llvm-libc-types/wchar_t.h
+++ b/libc/include/llvm-libc-types/wchar_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_WCHAR_T_H__
-#define __LLVM_LIBC_TYPES_WCHAR_T_H__
+#ifndef LLVM_LIBC_TYPES_WCHAR_T_H
+#define LLVM_LIBC_TYPES_WCHAR_T_H
// Since __need_wchar_t is defined, we get the definition of wchar_t from the
// standalone C header stddef.h. Also, because __need_wchar_t is defined,
@@ -16,4 +16,4 @@
#include <stddef.h>
#undef __need_wchar_t
-#endif // __LLVM_LIBC_TYPES_WCHAR_T_H__
+#endif // LLVM_LIBC_TYPES_WCHAR_T_H
diff --git a/libc/include/llvm-libc-types/wint_t.h b/libc/include/llvm-libc-types/wint_t.h
index cf6ccd7e1ae7..2758685a0845 100644
--- a/libc/include/llvm-libc-types/wint_t.h
+++ b/libc/include/llvm-libc-types/wint_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_WINT_T_H__
-#define __LLVM_LIBC_TYPES_WINT_T_H__
+#ifndef LLVM_LIBC_TYPES_WINT_T_H
+#define LLVM_LIBC_TYPES_WINT_T_H
// Since __need_wint_t is defined, we get the definition of wint_t from the
// standalone C header stddef.h. Also, because __need_wint_t is defined,
@@ -16,4 +16,4 @@
#include <stddef.h>
#undef __need_wint_t
-#endif // __LLVM_LIBC_TYPES_WINT_T_H__
+#endif // LLVM_LIBC_TYPES_WINT_T_H
diff --git a/libc/include/sys/queue.h b/libc/include/sys/queue.h
index 2a4dc37712d6..1cde35e77a04 100644
--- a/libc/include/sys/queue.h
+++ b/libc/include/sys/queue.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SYS_QUEUE_H
-#define LLVM_LIBC_SYS_QUEUE_H
+#ifndef SYS_QUEUE_H
+#define SYS_QUEUE_H
#include <llvm-libc-macros/sys-queue-macros.h>
-#endif // LLVM_LIBC_SYS_QUEUE_H
+#endif // SYS_QUEUE_H
diff --git a/libc/lib/CMakeLists.txt b/libc/lib/CMakeLists.txt
index e5ebd1e10084..37acf3950b46 100644
--- a/libc/lib/CMakeLists.txt
+++ b/libc/lib/CMakeLists.txt
@@ -77,9 +77,13 @@ install(
)
if(LIBC_TARGET_OS_IS_GPU)
+ set(gpu_install_dir lib${LLVM_LIBDIR_SUFFIX})
+ if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR)
+ set(gpu_install_dir lib${LLVM_LIBDIR_SUFFIX}/${LLVM_HOST_TRIPLE})
+ endif()
install(
TARGETS ${added_gpu_archive_targets}
- ARCHIVE DESTINATION lib${LLVM_LIBDIR_SUFFIX}
+ ARCHIVE DESTINATION ${gpu_install_dir}
COMPONENT libc
)
foreach(file ${added_gpu_bitcode_targets})
diff --git a/libc/spec/spec.td b/libc/spec/spec.td
index 90c076580be1..998f37fb26de 100644
--- a/libc/spec/spec.td
+++ b/libc/spec/spec.td
@@ -51,6 +51,7 @@ def LongDoubleType : NamedType<"long double">;
def CharType : NamedType<"char">;
def UnsignedCharType : NamedType<"unsigned char">;
def UnsignedShortType : NamedType<"unsigned short">;
+def BoolType : NamedType<"bool">;
def Float128Type : NamedType<"float128">;
diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td
index 8a1a235e4eec..94ac62966f3b 100644
--- a/libc/spec/stdc.td
+++ b/libc/spec/stdc.td
@@ -364,37 +364,37 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"copysign", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
FunctionSpec<"copysignf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
FunctionSpec<"copysignl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"copysignf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"copysignf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"ceil", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"ceilf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
FunctionSpec<"ceill", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"ceilf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"ceilf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"fabs", RetValSpec<DoubleType>, [ArgSpec<DoubleType>], [ConstAttr]>,
FunctionSpec<"fabsf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
FunctionSpec<"fabsl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"fabsf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"fabsf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"fdim", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
FunctionSpec<"fdimf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
FunctionSpec<"fdiml", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"fdimf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"fdimf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"floor", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"floorf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
FunctionSpec<"floorl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"floorf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"floorf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"fmin", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
FunctionSpec<"fminf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
FunctionSpec<"fminl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"fminf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"fminf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"fmax", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
FunctionSpec<"fmaxf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
FunctionSpec<"fmaxl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"fmaxf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"fmaxf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"fma", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
FunctionSpec<"fmaf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>, ArgSpec<FloatType>]>,
@@ -406,7 +406,7 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"frexp", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<IntPtr>]>,
FunctionSpec<"frexpf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<IntPtr>]>,
FunctionSpec<"frexpl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntPtr>]>,
- GuardedFunctionSpec<"frexpf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<IntPtr>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"frexpf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<IntPtr>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"hypot", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
FunctionSpec<"hypotf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
@@ -414,17 +414,17 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"ilogb", RetValSpec<IntType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"ilogbf", RetValSpec<IntType>, [ArgSpec<FloatType>]>,
FunctionSpec<"ilogbl", RetValSpec<IntType>, [ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"ilogbf128", RetValSpec<IntType>, [ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"ilogbf128", RetValSpec<IntType>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"llogb", RetValSpec<LongType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"llogbf", RetValSpec<LongType>, [ArgSpec<FloatType>]>,
FunctionSpec<"llogbl", RetValSpec<LongType>, [ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"llogbf128", RetValSpec<LongType>, [ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"llogbf128", RetValSpec<LongType>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"ldexp", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<IntType>]>,
FunctionSpec<"ldexpf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<IntType>]>,
FunctionSpec<"ldexpl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>]>,
- GuardedFunctionSpec<"ldexpf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<IntType>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"ldexpf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<IntType>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"log10", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"log10f", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
@@ -441,7 +441,7 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"logb", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"logbf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
FunctionSpec<"logbl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"logbf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"logbf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"modf", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoublePtr>]>,
FunctionSpec<"modff", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatPtr>]>,
@@ -476,7 +476,7 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"round", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"roundf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
FunctionSpec<"roundl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"roundf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"roundf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"lround", RetValSpec<LongType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"lroundf", RetValSpec<LongType>, [ArgSpec<FloatType>]>,
@@ -501,12 +501,12 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"sqrt", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"sqrtf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
FunctionSpec<"sqrtl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"sqrtf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"sqrtf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"trunc", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"truncf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
FunctionSpec<"truncl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>]>,
- GuardedFunctionSpec<"truncf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_COMPILER_HAS_FLOAT128">,
+ GuardedFunctionSpec<"truncf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"nearbyint", RetValSpec<DoubleType>, [ArgSpec<DoubleType>]>,
FunctionSpec<"nearbyintf", RetValSpec<FloatType>, [ArgSpec<FloatType>]>,
@@ -799,7 +799,8 @@ def StdC : StandardSpec<"stdc"> {
Macro<"stdc_first_trailing_zero">,
Macro<"stdc_first_trailing_one">,
Macro<"stdc_count_zeros">,
- Macro<"stdc_count_ones">
+ Macro<"stdc_count_ones">,
+ Macro<"stdc_has_single_bit">
], // Macros
[], // Types
[], // Enumerations
@@ -848,7 +849,12 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"stdc_count_ones_us", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedShortType>]>,
FunctionSpec<"stdc_count_ones_ui", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedIntType>]>,
FunctionSpec<"stdc_count_ones_ul", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedLongType>]>,
- FunctionSpec<"stdc_count_ones_ull", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedLongLongType>]>
+ FunctionSpec<"stdc_count_ones_ull", RetValSpec<UnsignedIntType>, [ArgSpec<UnsignedLongLongType>]>,
+ FunctionSpec<"stdc_has_single_bit_uc", RetValSpec<BoolType>, [ArgSpec<UnsignedCharType>]>,
+ FunctionSpec<"stdc_has_single_bit_us", RetValSpec<BoolType>, [ArgSpec<UnsignedShortType>]>,
+ FunctionSpec<"stdc_has_single_bit_ui", RetValSpec<BoolType>, [ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"stdc_has_single_bit_ul", RetValSpec<BoolType>, [ArgSpec<UnsignedLongType>]>,
+ FunctionSpec<"stdc_has_single_bit_ull", RetValSpec<BoolType>, [ArgSpec<UnsignedLongLongType>]>
] // Functions
>;
diff --git a/libc/src/__support/CPP/CMakeLists.txt b/libc/src/__support/CPP/CMakeLists.txt
index d747412791bd..6c35bc709081 100644
--- a/libc/src/__support/CPP/CMakeLists.txt
+++ b/libc/src/__support/CPP/CMakeLists.txt
@@ -153,10 +153,10 @@ add_header_library(
type_traits/type_identity.h
type_traits/void_t.h
DEPENDS
+ libc.include.llvm-libc-macros.stdfix_macros
libc.src.__support.macros.attributes
libc.src.__support.macros.config
- libc.src.__support.macros.properties.float
- libc.include.llvm-libc-macros.stdfix_macros
+ libc.src.__support.macros.properties.types
)
add_header_library(
diff --git a/libc/src/__support/CPP/type_traits/is_fixed_point.h b/libc/src/__support/CPP/type_traits/is_fixed_point.h
index 317ba39748b7..025268bc2979 100644
--- a/libc/src/__support/CPP/type_traits/is_fixed_point.h
+++ b/libc/src/__support/CPP/type_traits/is_fixed_point.h
@@ -43,4 +43,4 @@ LIBC_INLINE_VAR constexpr bool is_fixed_point_v = is_fixed_point<T>::value;
} // namespace LIBC_NAMESPACE::cpp
-#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_INTEGRAL_H
+#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_FIXED_POINT_H
diff --git a/libc/src/__support/CPP/type_traits/is_floating_point.h b/libc/src/__support/CPP/type_traits/is_floating_point.h
index 3a5260bcab11..4c8f50f4e91f 100644
--- a/libc/src/__support/CPP/type_traits/is_floating_point.h
+++ b/libc/src/__support/CPP/type_traits/is_floating_point.h
@@ -11,7 +11,7 @@
#include "src/__support/CPP/type_traits/is_same.h"
#include "src/__support/CPP/type_traits/remove_cv.h"
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h" // LIBC_TYPES_HAS_FLOAT128
namespace LIBC_NAMESPACE::cpp {
@@ -24,13 +24,13 @@ private:
}
public:
-#if defined(LIBC_COMPILER_HAS_FLOAT128)
+#if defined(LIBC_TYPES_HAS_FLOAT128)
LIBC_INLINE_VAR static constexpr bool value =
__is_unqualified_any_of<T, float, double, long double, float128>();
#else
LIBC_INLINE_VAR static constexpr bool value =
__is_unqualified_any_of<T, float, double, long double>();
-#endif // LIBC_COMPILER_HAS_FLOAT128
+#endif // LIBC_TYPES_HAS_FLOAT128
};
template <typename T>
LIBC_INLINE_VAR constexpr bool is_floating_point_v =
diff --git a/libc/src/__support/FPUtil/CMakeLists.txt b/libc/src/__support/FPUtil/CMakeLists.txt
index 0c932e8ffcd5..f1c6fba22856 100644
--- a/libc/src/__support/FPUtil/CMakeLists.txt
+++ b/libc/src/__support/FPUtil/CMakeLists.txt
@@ -33,7 +33,7 @@ add_header_library(
libc.src.__support.CPP.type_traits
libc.src.__support.libc_assert
libc.src.__support.macros.attributes
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.math_extras
libc.src.__support.uint128
)
diff --git a/libc/src/__support/FPUtil/FPBits.h b/libc/src/__support/FPUtil/FPBits.h
index b3179a24c747..7b3882dde1b7 100644
--- a/libc/src/__support/FPUtil/FPBits.h
+++ b/libc/src/__support/FPUtil/FPBits.h
@@ -15,7 +15,7 @@
#include "src/__support/common.h"
#include "src/__support/libc_assert.h" // LIBC_ASSERT
#include "src/__support/macros/attributes.h" // LIBC_INLINE, LIBC_INLINE_VAR
-#include "src/__support/macros/properties/float.h" // LIBC_COMPILER_HAS_FLOAT128
+#include "src/__support/macros/properties/types.h" // LIBC_TYPES_HAS_FLOAT128
#include "src/__support/math_extras.h" // mask_trailing_ones
#include <stdint.h>
@@ -239,23 +239,23 @@ protected:
// An opaque type to store a floating point exponent.
// We define special values but it is valid to create arbitrary values as long
- // as they are in the range [MIN, MAX].
+ // as they are in the range [min, max].
struct Exponent : public TypedInt<int32_t> {
using UP = TypedInt<int32_t>;
using UP::UP;
- LIBC_INLINE static constexpr auto SUBNORMAL() {
+ LIBC_INLINE static constexpr auto subnormal() {
return Exponent(-EXP_BIAS);
}
- LIBC_INLINE static constexpr auto MIN() { return Exponent(1 - EXP_BIAS); }
- LIBC_INLINE static constexpr auto ZERO() { return Exponent(0); }
- LIBC_INLINE static constexpr auto MAX() { return Exponent(EXP_BIAS); }
- LIBC_INLINE static constexpr auto INF() { return Exponent(EXP_BIAS + 1); }
+ LIBC_INLINE static constexpr auto min() { return Exponent(1 - EXP_BIAS); }
+ LIBC_INLINE static constexpr auto zero() { return Exponent(0); }
+ LIBC_INLINE static constexpr auto max() { return Exponent(EXP_BIAS); }
+ LIBC_INLINE static constexpr auto inf() { return Exponent(EXP_BIAS + 1); }
};
// An opaque type to store a floating point biased exponent.
// We define special values but it is valid to create arbitrary values as long
- // as they are in the range [BITS_ALL_ZEROES, BITS_ALL_ONES].
- // Values greater than BITS_ALL_ONES are truncated.
+ // as they are in the range [zero, bits_all_ones].
+ // Values greater than bits_all_ones are truncated.
struct BiasedExponent : public TypedInt<uint32_t> {
using UP = TypedInt<uint32_t>;
using UP::UP;
@@ -269,13 +269,13 @@ protected:
}
LIBC_INLINE constexpr BiasedExponent &operator++() {
- LIBC_ASSERT(*this != BiasedExponent(Exponent::INF()));
+ LIBC_ASSERT(*this != BiasedExponent(Exponent::inf()));
++UP::value;
return *this;
}
LIBC_INLINE constexpr BiasedExponent &operator--() {
- LIBC_ASSERT(*this != BiasedExponent(Exponent::SUBNORMAL()));
+ LIBC_ASSERT(*this != BiasedExponent(Exponent::subnormal()));
--UP::value;
return *this;
}
@@ -283,9 +283,9 @@ protected:
// An opaque type to store a floating point significand.
// We define special values but it is valid to create arbitrary values as long
- // as they are in the range [ZERO, BITS_ALL_ONES].
+ // as they are in the range [zero, bits_all_ones].
// Note that the semantics of the Significand are implementation dependent.
- // Values greater than BITS_ALL_ONES are truncated.
+ // Values greater than bits_all_ones are truncated.
struct Significand : public TypedInt<StorageType> {
using UP = TypedInt<StorageType>;
using UP::UP;
@@ -305,16 +305,16 @@ protected:
return Significand(StorageType(a.to_storage_type() >> shift));
}
- LIBC_INLINE static constexpr auto ZERO() {
+ LIBC_INLINE static constexpr auto zero() {
return Significand(StorageType(0));
}
- LIBC_INLINE static constexpr auto LSB() {
+ LIBC_INLINE static constexpr auto lsb() {
return Significand(StorageType(1));
}
- LIBC_INLINE static constexpr auto MSB() {
+ LIBC_INLINE static constexpr auto msb() {
return Significand(StorageType(1) << (SIG_LEN - 1));
}
- LIBC_INLINE static constexpr auto BITS_ALL_ONES() {
+ LIBC_INLINE static constexpr auto bits_all_ones() {
return Significand(SIG_MASK);
}
};
@@ -393,58 +393,58 @@ protected:
public:
// Builders
LIBC_INLINE static constexpr RetT zero(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::SUBNORMAL(), Significand::ZERO()));
+ return RetT(encode(sign, Exponent::subnormal(), Significand::zero()));
}
LIBC_INLINE static constexpr RetT one(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::ZERO(), Significand::ZERO()));
+ return RetT(encode(sign, Exponent::zero(), Significand::zero()));
}
LIBC_INLINE static constexpr RetT min_subnormal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::SUBNORMAL(), Significand::LSB()));
+ return RetT(encode(sign, Exponent::subnormal(), Significand::lsb()));
}
LIBC_INLINE static constexpr RetT max_subnormal(Sign sign = Sign::POS) {
return RetT(
- encode(sign, Exponent::SUBNORMAL(), Significand::BITS_ALL_ONES()));
+ encode(sign, Exponent::subnormal(), Significand::bits_all_ones()));
}
LIBC_INLINE static constexpr RetT min_normal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::MIN(), Significand::ZERO()));
+ return RetT(encode(sign, Exponent::min(), Significand::zero()));
}
LIBC_INLINE static constexpr RetT max_normal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::MAX(), Significand::BITS_ALL_ONES()));
+ return RetT(encode(sign, Exponent::max(), Significand::bits_all_ones()));
}
LIBC_INLINE static constexpr RetT inf(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::INF(), Significand::ZERO()));
+ return RetT(encode(sign, Exponent::inf(), Significand::zero()));
}
LIBC_INLINE static constexpr RetT signaling_nan(Sign sign = Sign::POS,
StorageType v = 0) {
- return RetT(encode(sign, Exponent::INF(),
- (v ? Significand(v) : (Significand::MSB() >> 1))));
+ return RetT(encode(sign, Exponent::inf(),
+ (v ? Significand(v) : (Significand::msb() >> 1))));
}
LIBC_INLINE static constexpr RetT quiet_nan(Sign sign = Sign::POS,
StorageType v = 0) {
return RetT(
- encode(sign, Exponent::INF(), Significand::MSB() | Significand(v)));
+ encode(sign, Exponent::inf(), Significand::msb() | Significand(v)));
}
// Observers
LIBC_INLINE constexpr bool is_zero() const { return exp_sig_bits() == 0; }
LIBC_INLINE constexpr bool is_nan() const {
- return exp_sig_bits() > encode(Exponent::INF(), Significand::ZERO());
+ return exp_sig_bits() > encode(Exponent::inf(), Significand::zero());
}
LIBC_INLINE constexpr bool is_quiet_nan() const {
- return exp_sig_bits() >= encode(Exponent::INF(), Significand::MSB());
+ return exp_sig_bits() >= encode(Exponent::inf(), Significand::msb());
}
LIBC_INLINE constexpr bool is_signaling_nan() const {
return is_nan() && !is_quiet_nan();
}
LIBC_INLINE constexpr bool is_inf() const {
- return exp_sig_bits() == encode(Exponent::INF(), Significand::ZERO());
+ return exp_sig_bits() == encode(Exponent::inf(), Significand::zero());
}
LIBC_INLINE constexpr bool is_finite() const {
- return exp_bits() != encode(Exponent::INF());
+ return exp_bits() != encode(Exponent::inf());
}
LIBC_INLINE
constexpr bool is_subnormal() const {
- return exp_bits() == encode(Exponent::SUBNORMAL());
+ return exp_bits() == encode(Exponent::subnormal());
}
LIBC_INLINE constexpr bool is_normal() const {
return is_finite() && !is_subnormal();
@@ -493,37 +493,37 @@ protected:
public:
// Builders
LIBC_INLINE static constexpr RetT zero(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::SUBNORMAL(), Significand::ZERO()));
+ return RetT(encode(sign, Exponent::subnormal(), Significand::zero()));
}
LIBC_INLINE static constexpr RetT one(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::ZERO(), Significand::MSB()));
+ return RetT(encode(sign, Exponent::zero(), Significand::msb()));
}
LIBC_INLINE static constexpr RetT min_subnormal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::SUBNORMAL(), Significand::LSB()));
+ return RetT(encode(sign, Exponent::subnormal(), Significand::lsb()));
}
LIBC_INLINE static constexpr RetT max_subnormal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::SUBNORMAL(),
- Significand::BITS_ALL_ONES() ^ Significand::MSB()));
+ return RetT(encode(sign, Exponent::subnormal(),
+ Significand::bits_all_ones() ^ Significand::msb()));
}
LIBC_INLINE static constexpr RetT min_normal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::MIN(), Significand::MSB()));
+ return RetT(encode(sign, Exponent::min(), Significand::msb()));
}
LIBC_INLINE static constexpr RetT max_normal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::MAX(), Significand::BITS_ALL_ONES()));
+ return RetT(encode(sign, Exponent::max(), Significand::bits_all_ones()));
}
LIBC_INLINE static constexpr RetT inf(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::INF(), Significand::MSB()));
+ return RetT(encode(sign, Exponent::inf(), Significand::msb()));
}
LIBC_INLINE static constexpr RetT signaling_nan(Sign sign = Sign::POS,
StorageType v = 0) {
- return RetT(encode(sign, Exponent::INF(),
- Significand::MSB() |
- (v ? Significand(v) : (Significand::MSB() >> 2))));
+ return RetT(encode(sign, Exponent::inf(),
+ Significand::msb() |
+ (v ? Significand(v) : (Significand::msb() >> 2))));
}
LIBC_INLINE static constexpr RetT quiet_nan(Sign sign = Sign::POS,
StorageType v = 0) {
- return RetT(encode(sign, Exponent::INF(),
- Significand::MSB() | (Significand::MSB() >> 1) |
+ return RetT(encode(sign, Exponent::inf(),
+ Significand::msb() | (Significand::msb() >> 1) |
Significand(v)));
}
@@ -541,33 +541,33 @@ public:
// - Quiet Not a Number
// - Unnormal
// This can be reduced to the following logic:
- if (exp_bits() == encode(Exponent::INF()))
+ if (exp_bits() == encode(Exponent::inf()))
return !is_inf();
- if (exp_bits() != encode(Exponent::SUBNORMAL()))
- return (sig_bits() & encode(Significand::MSB())) == 0;
+ if (exp_bits() != encode(Exponent::subnormal()))
+ return (sig_bits() & encode(Significand::msb())) == 0;
return false;
}
LIBC_INLINE constexpr bool is_quiet_nan() const {
return exp_sig_bits() >=
- encode(Exponent::INF(),
- Significand::MSB() | (Significand::MSB() >> 1));
+ encode(Exponent::inf(),
+ Significand::msb() | (Significand::msb() >> 1));
}
LIBC_INLINE constexpr bool is_signaling_nan() const {
return is_nan() && !is_quiet_nan();
}
LIBC_INLINE constexpr bool is_inf() const {
- return exp_sig_bits() == encode(Exponent::INF(), Significand::MSB());
+ return exp_sig_bits() == encode(Exponent::inf(), Significand::msb());
}
LIBC_INLINE constexpr bool is_finite() const {
return !is_inf() && !is_nan();
}
LIBC_INLINE
constexpr bool is_subnormal() const {
- return exp_bits() == encode(Exponent::SUBNORMAL());
+ return exp_bits() == encode(Exponent::subnormal());
}
LIBC_INLINE constexpr bool is_normal() const {
const auto exp = exp_bits();
- if (exp == encode(Exponent::SUBNORMAL()) || exp == encode(Exponent::INF()))
+ if (exp == encode(Exponent::subnormal()) || exp == encode(Exponent::inf()))
return false;
return get_implicit_bit();
}
@@ -578,7 +578,7 @@ public:
} else if (exp_sig_bits() == max_subnormal().uintval()) {
return min_normal(sign());
} else if (sig_bits() == SIG_MASK) {
- return RetT(encode(sign(), ++biased_exponent(), Significand::ZERO()));
+ return RetT(encode(sign(), ++biased_exponent(), Significand::zero()));
} else {
return RetT(bits + StorageType(1));
}
@@ -715,9 +715,9 @@ public:
LIBC_INLINE constexpr int get_explicit_exponent() const {
Exponent exponent(UP::biased_exponent());
if (is_zero())
- exponent = Exponent::ZERO();
- if (exponent == Exponent::SUBNORMAL())
- exponent = Exponent::MIN();
+ exponent = Exponent::zero();
+ if (exponent == Exponent::subnormal())
+ exponent = Exponent::min();
return static_cast<int32_t>(exponent);
}
@@ -804,16 +804,12 @@ template <typename T> LIBC_INLINE static constexpr FPType get_fp_type() {
else if constexpr (__LDBL_MANT_DIG__ == 113)
return FPType::IEEE754_Binary128;
}
-#if defined(LIBC_COMPILER_HAS_C23_FLOAT16)
- else if constexpr (cpp::is_same_v<UnqualT, _Float16>)
+#if defined(LIBC_TYPES_HAS_FLOAT16)
+ else if constexpr (cpp::is_same_v<UnqualT, float16>)
return FPType::IEEE754_Binary16;
#endif
-#if defined(LIBC_COMPILER_HAS_C23_FLOAT128)
- else if constexpr (cpp::is_same_v<UnqualT, _Float128>)
- return FPType::IEEE754_Binary128;
-#endif
-#if defined(LIBC_COMPILER_HAS_FLOAT128_EXTENSION)
- else if constexpr (cpp::is_same_v<UnqualT, __float128>)
+#if defined(LIBC_TYPES_HAS_FLOAT128)
+ else if constexpr (cpp::is_same_v<UnqualT, float128>)
return FPType::IEEE754_Binary128;
#endif
else
diff --git a/libc/src/__support/FPUtil/fpbits_str.h b/libc/src/__support/FPUtil/fpbits_str.h
index a1654cddad74..212265bb9ad4 100644
--- a/libc/src/__support/FPUtil/fpbits_str.h
+++ b/libc/src/__support/FPUtil/fpbits_str.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_FPUTIL_FP_BITS_STR_H
-#define LLVM_LIBC_SRC___SUPPORT_FPUTIL_FP_BITS_STR_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_FPUTIL_FPBITS_STR_H
+#define LLVM_LIBC_SRC___SUPPORT_FPUTIL_FPBITS_STR_H
#include "src/__support/CPP/string.h"
#include "src/__support/CPP/type_traits.h"
@@ -73,4 +73,4 @@ template <typename T> LIBC_INLINE cpp::string str(fputil::FPBits<T> x) {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_FPUTIL_FP_BITS_STR_H
+#endif // LLVM_LIBC_SRC___SUPPORT_FPUTIL_FPBITS_STR_H
diff --git a/libc/src/__support/GPU/generic/utils.h b/libc/src/__support/GPU/generic/utils.h
index 58db88dce1ca..c6c3c01cf7d5 100644
--- a/libc/src/__support/GPU/generic/utils.h
+++ b/libc/src/__support/GPU/generic/utils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_GPU_GENERIC_IO_H
-#define LLVM_LIBC_SRC___SUPPORT_GPU_GENERIC_IO_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_GPU_GENERIC_UTILS_H
+#define LLVM_LIBC_SRC___SUPPORT_GPU_GENERIC_UTILS_H
#include "src/__support/common.h"
@@ -78,4 +78,4 @@ LIBC_INLINE uint32_t get_cluster_id() { return 0; }
} // namespace gpu
} // namespace LIBC_NAMESPACE
-#endif
+#endif // LLVM_LIBC_SRC___SUPPORT_GPU_GENERIC_UTILS_H
diff --git a/libc/src/__support/GPU/utils.h b/libc/src/__support/GPU/utils.h
index 6505b18dbd33..0f9167cdee06 100644
--- a/libc/src/__support/GPU/utils.h
+++ b/libc/src/__support/GPU/utils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_GPU_UTIL_H
-#define LLVM_LIBC_SRC___SUPPORT_GPU_UTIL_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_GPU_UTILS_H
+#define LLVM_LIBC_SRC___SUPPORT_GPU_UTILS_H
#include "src/__support/macros/properties/architectures.h"
@@ -34,4 +34,4 @@ LIBC_INLINE bool is_first_lane(uint64_t lane_mask) {
} // namespace gpu
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_IO_H
+#endif // LLVM_LIBC_SRC___SUPPORT_GPU_UTILS_H
diff --git a/libc/src/__support/HashTable/table.h b/libc/src/__support/HashTable/table.h
index 5b4697e5245b..8f6c5887c189 100644
--- a/libc/src/__support/HashTable/table.h
+++ b/libc/src/__support/HashTable/table.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_HASHTABLE_table_H
-#define LLVM_LIBC_SRC___SUPPORT_HASHTABLE_table_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_HASHTABLE_TABLE_H
+#define LLVM_LIBC_SRC___SUPPORT_HASHTABLE_TABLE_H
#include "include/llvm-libc-types/ENTRY.h"
#include "src/__support/CPP/bit.h" // bit_ceil
@@ -351,4 +351,4 @@ public:
} // namespace internal
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_HASHTABLE_table_H
+#endif // LLVM_LIBC_SRC___SUPPORT_HASHTABLE_TABLE_H
diff --git a/libc/src/__support/OSUtil/gpu/io.h b/libc/src/__support/OSUtil/gpu/io.h
index d6c89cf45e3a..e5562eb74a67 100644
--- a/libc/src/__support/OSUtil/gpu/io.h
+++ b/libc/src/__support/OSUtil/gpu/io.h
@@ -18,4 +18,4 @@ void write_to_stderr(cpp::string_view msg);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_LINUX_IO_H
+#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_GPU_IO_H
diff --git a/libc/src/__support/RPC/rpc_client.h b/libc/src/__support/RPC/rpc_client.h
index 571d7cce2a80..6e1827dbfeea 100644
--- a/libc/src/__support/RPC/rpc_client.h
+++ b/libc/src/__support/RPC/rpc_client.h
@@ -11,7 +11,7 @@
#include "rpc.h"
-#include "llvm-libc-types/rpc_opcodes_t.h"
+#include "include/llvm-libc-types/rpc_opcodes_t.h"
namespace LIBC_NAMESPACE {
namespace rpc {
diff --git a/libc/src/__support/RPC/rpc_util.h b/libc/src/__support/RPC/rpc_util.h
index 11d2f751355d..7a9901af83e7 100644
--- a/libc/src/__support/RPC/rpc_util.h
+++ b/libc/src/__support/RPC/rpc_util.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTILS_H
-#define LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTILS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTIL_H
+#define LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTIL_H
#include "src/__support/CPP/type_traits.h"
#include "src/__support/GPU/utils.h"
@@ -69,4 +69,4 @@ LIBC_INLINE void rpc_memcpy(void *dst, const void *src, size_t count) {
} // namespace rpc
} // namespace LIBC_NAMESPACE
-#endif
+#endif // LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTIL_H
diff --git a/libc/src/__support/StringUtil/message_mapper.h b/libc/src/__support/StringUtil/message_mapper.h
index c93a57c62567..dd91839fb920 100644
--- a/libc/src/__support/StringUtil/message_mapper.h
+++ b/libc/src/__support/StringUtil/message_mapper.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_MESSAGE_MAPPER_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_MESSAGE_MAPPER_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_MESSAGE_MAPPER_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_MESSAGE_MAPPER_H
#include "src/__support/CPP/array.h"
#include "src/__support/CPP/optional.h"
@@ -100,4 +100,4 @@ constexpr MsgTable<N1 + N2> operator+(const MsgTable<N1> &t1,
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_MESSAGE_MAPPER_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_MESSAGE_MAPPER_H
diff --git a/libc/src/__support/StringUtil/platform_errors.h b/libc/src/__support/StringUtil/platform_errors.h
index dfa841ce5d82..32e8414b3e3d 100644
--- a/libc/src/__support/StringUtil/platform_errors.h
+++ b/libc/src/__support/StringUtil/platform_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_ERROR_TABLE_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_ERROR_TABLE_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_ERRORS_H
#if defined(__linux__) || defined(__Fuchsia__)
#include "tables/linux_platform_errors.h"
@@ -15,4 +15,4 @@
#include "tables/minimal_platform_errors.h"
#endif
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_ERROR_TABLE_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_ERRORS_H
diff --git a/libc/src/__support/StringUtil/platform_signals.h b/libc/src/__support/StringUtil/platform_signals.h
index 0a1c3f6bef25..52da082649bf 100644
--- a/libc/src/__support/StringUtil/platform_signals.h
+++ b/libc/src/__support/StringUtil/platform_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_SIGNAL_TABLE_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_SIGNAL_TABLE_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_SIGNALS_H
#if defined(__linux__) || defined(__Fuchsia__)
#include "tables/linux_platform_signals.h"
@@ -15,4 +15,4 @@
#include "tables/minimal_platform_signals.h"
#endif
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_SIGNAL_TABLE_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_SIGNALS_H
diff --git a/libc/src/__support/StringUtil/tables/linux_extension_errors.h b/libc/src/__support/StringUtil/tables/linux_extension_errors.h
index 4964fa47efd5..f48968892e96 100644
--- a/libc/src/__support/StringUtil/tables/linux_extension_errors.h
+++ b/libc/src/__support/StringUtil/tables/linux_extension_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_ERRORS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_ERRORS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_ERRORS_H
#include "src/__support/StringUtil/message_mapper.h"
@@ -72,4 +72,4 @@ constexpr MsgTable<52> LINUX_ERRORS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_ERRORS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_ERRORS_H
diff --git a/libc/src/__support/StringUtil/tables/linux_extension_signals.h b/libc/src/__support/StringUtil/tables/linux_extension_signals.h
index 633d0e2ed538..3f9f0c66ff24 100644
--- a/libc/src/__support/StringUtil/tables/linux_extension_signals.h
+++ b/libc/src/__support/StringUtil/tables/linux_extension_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
#include "src/__support/StringUtil/message_mapper.h"
@@ -30,4 +30,4 @@ LIBC_INLINE_VAR constexpr const MsgTable<3> LINUX_SIGNALS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
diff --git a/libc/src/__support/StringUtil/tables/linux_platform_errors.h b/libc/src/__support/StringUtil/tables/linux_platform_errors.h
index a9ae2e8100a1..a7bb545d3bf9 100644
--- a/libc/src/__support/StringUtil/tables/linux_platform_errors.h
+++ b/libc/src/__support/StringUtil/tables/linux_platform_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_ERRORS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_ERRORS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_ERRORS_H
#include "linux_extension_errors.h"
#include "posix_errors.h"
@@ -20,4 +20,4 @@ LIBC_INLINE_VAR constexpr auto PLATFORM_ERRORS =
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_ERRORS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_ERRORS_H
diff --git a/libc/src/__support/StringUtil/tables/linux_platform_signals.h b/libc/src/__support/StringUtil/tables/linux_platform_signals.h
index 1daaa9cc6285..f12d31f222b0 100644
--- a/libc/src/__support/StringUtil/tables/linux_platform_signals.h
+++ b/libc/src/__support/StringUtil/tables/linux_platform_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
#include "linux_extension_signals.h"
#include "posix_signals.h"
@@ -20,4 +20,4 @@ LIBC_INLINE_VAR constexpr auto PLATFORM_SIGNALS =
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
diff --git a/libc/src/__support/StringUtil/tables/minimal_platform_errors.h b/libc/src/__support/StringUtil/tables/minimal_platform_errors.h
index 1cfd9e2e944d..c5672c4d875f 100644
--- a/libc/src/__support/StringUtil/tables/minimal_platform_errors.h
+++ b/libc/src/__support/StringUtil/tables/minimal_platform_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
#include "stdc_errors.h"
@@ -17,4 +17,4 @@ LIBC_INLINE_VAR constexpr auto PLATFORM_ERRORS = STDC_ERRORS;
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
diff --git a/libc/src/__support/StringUtil/tables/minimal_platform_signals.h b/libc/src/__support/StringUtil/tables/minimal_platform_signals.h
index 7fcf91bfee85..7fe0dccfc465 100644
--- a/libc/src/__support/StringUtil/tables/minimal_platform_signals.h
+++ b/libc/src/__support/StringUtil/tables/minimal_platform_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
#include "stdc_signals.h"
@@ -17,4 +17,4 @@ LIBC_INLINE_VAR constexpr auto PLATFORM_SIGNALS = STDC_SIGNALS;
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
diff --git a/libc/src/__support/StringUtil/tables/posix_errors.h b/libc/src/__support/StringUtil/tables/posix_errors.h
index 3ade7aaab4f0..3cb6de394ea3 100644
--- a/libc/src/__support/StringUtil/tables/posix_errors.h
+++ b/libc/src/__support/StringUtil/tables/posix_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_ERRORS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_ERRORS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_ERRORS_H
#include "src/__support/StringUtil/message_mapper.h"
@@ -96,4 +96,4 @@ LIBC_INLINE_VAR constexpr MsgTable<76> POSIX_ERRORS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_ERRORS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_ERRORS_H
diff --git a/libc/src/__support/StringUtil/tables/posix_signals.h b/libc/src/__support/StringUtil/tables/posix_signals.h
index 2fba2d963f4b..b9535cbeb6f6 100644
--- a/libc/src/__support/StringUtil/tables/posix_signals.h
+++ b/libc/src/__support/StringUtil/tables/posix_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_SIGNALS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_SIGNALS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_SIGNALS_H
#include "src/__support/CPP/array.h"
#include "src/__support/StringUtil/message_mapper.h"
@@ -43,4 +43,4 @@ LIBC_INLINE_VAR constexpr MsgTable<22> POSIX_SIGNALS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_SIGNALS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_SIGNALS_H
diff --git a/libc/src/__support/StringUtil/tables/signal_table.h b/libc/src/__support/StringUtil/tables/signal_table.h
index 5035c54770c5..d7ffbc63722e 100644
--- a/libc/src/__support/StringUtil/tables/signal_table.h
+++ b/libc/src/__support/StringUtil/tables/signal_table.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_SIGNAL_TABLE_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_SIGNAL_TABLE_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_SIGNAL_TABLE_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_SIGNAL_TABLE_H
#include "src/__support/StringUtil/message_mapper.h"
@@ -36,4 +36,4 @@ LIBC_INLINE_VAR constexpr auto PLATFORM_SIGNALS = []() {
} // namespace LIBC_NAMESPACE::internal
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_SIGNAL_TABLE_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_SIGNAL_TABLE_H
diff --git a/libc/src/__support/StringUtil/tables/stdc_errors.h b/libc/src/__support/StringUtil/tables/stdc_errors.h
index f0fc78710b18..a9c152783455 100644
--- a/libc/src/__support/StringUtil/tables/stdc_errors.h
+++ b/libc/src/__support/StringUtil/tables/stdc_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_STDC_ERRORS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_STDC_ERRORS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_ERRORS_H
#include "src/__support/StringUtil/message_mapper.h"
@@ -24,4 +24,4 @@ LIBC_INLINE_VAR constexpr const MsgTable<4> STDC_ERRORS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_ERRORS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_ERRORS_H
diff --git a/libc/src/__support/StringUtil/tables/stdc_signals.h b/libc/src/__support/StringUtil/tables/stdc_signals.h
index 773f182140ef..7c93b45a441c 100644
--- a/libc/src/__support/StringUtil/tables/stdc_signals.h
+++ b/libc/src/__support/StringUtil/tables/stdc_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_STDC_SIGNALS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_STDC_SIGNALS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_SIGNALS_H
#include <signal.h> // For signal numbers
@@ -26,4 +26,4 @@ LIBC_INLINE_VAR constexpr const MsgTable<6> STDC_SIGNALS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_SIGNALS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_SIGNALS_H
diff --git a/libc/src/__support/fixed_point/fx_bits.h b/libc/src/__support/fixed_point/fx_bits.h
index fcd47cd72cbb..53e693d4ddfd 100644
--- a/libc/src/__support/fixed_point/fx_bits.h
+++ b/libc/src/__support/fixed_point/fx_bits.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXBITS_H
-#define LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXBITS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_BITS_H
+#define LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_BITS_H
#include "include/llvm-libc-macros/stdfix-macros.h"
#include "src/__support/CPP/bit.h"
@@ -126,7 +126,7 @@ bit_not(T x) {
using BitType = typename FXRep<T>::StorageType;
BitType x_bit = cpp::bit_cast<BitType>(x);
// For some reason, bit_cast cannot deduce BitType from the input.
- return cpp::bit_cast<T, BitType>(~x_bit);
+ return cpp::bit_cast<T, BitType>(static_cast<BitType>(~x_bit));
}
template <typename T> LIBC_INLINE constexpr T abs(T x) {
@@ -165,4 +165,4 @@ template <typename T> LIBC_INLINE constexpr T round(T x, int n) {
#endif // LIBC_COMPILER_HAS_FIXED_POINT
-#endif // LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXBITS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_BITS_H
diff --git a/libc/src/__support/fixed_point/fx_rep.h b/libc/src/__support/fixed_point/fx_rep.h
index fcd7554c4d85..f8593a93684c 100644
--- a/libc/src/__support/fixed_point/fx_rep.h
+++ b/libc/src/__support/fixed_point/fx_rep.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXREP_H
-#define LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXREP_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_REP_H
+#define LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_REP_H
#include "include/llvm-libc-macros/stdfix-macros.h"
#include "src/__support/CPP/type_traits.h"
@@ -273,4 +273,4 @@ struct FXRep<unsigned long sat accum> : FXRep<unsigned long accum> {};
#endif // LIBC_COMPILER_HAS_FIXED_POINT
-#endif // LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXREP_H
+#endif // LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_REP_H
diff --git a/libc/src/__support/macros/properties/CMakeLists.txt b/libc/src/__support/macros/properties/CMakeLists.txt
index 3c492ab55a90..bbc45650f3fc 100644
--- a/libc/src/__support/macros/properties/CMakeLists.txt
+++ b/libc/src/__support/macros/properties/CMakeLists.txt
@@ -25,9 +25,9 @@ add_header_library(
)
add_header_library(
- float
+ types
HDRS
- float.h
+ types.h
DEPENDS
.architectures
.compiler
diff --git a/libc/src/__support/macros/properties/float.h b/libc/src/__support/macros/properties/types.h
index 08a1ab726cbd..595871e73b8f 100644
--- a/libc/src/__support/macros/properties/float.h
+++ b/libc/src/__support/macros/properties/types.h
@@ -1,15 +1,14 @@
-//===-- Float type support --------------------------------------*- C++ -*-===//
+//===-- Types support -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-// Floating point properties are a combination of compiler support, target OS
-// and target architecture.
+// Types detection and support.
-#ifndef LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_FLOAT_H
-#define LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_FLOAT_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_TYPES_H
+#define LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_TYPES_H
#include "include/llvm-libc-macros/float-macros.h" // LDBL_MANT_DIG
#include "include/llvm-libc-types/float128.h" // float128
@@ -28,36 +27,34 @@
#endif
// float16 support.
+// TODO: move this logic to "llvm-libc-types/float16.h"
#if defined(LIBC_TARGET_ARCH_IS_X86_64) && defined(LIBC_TARGET_CPU_HAS_SSE2)
#if (defined(LIBC_COMPILER_CLANG_VER) && (LIBC_COMPILER_CLANG_VER >= 1500)) || \
(defined(LIBC_COMPILER_GCC_VER) && (LIBC_COMPILER_GCC_VER >= 1201))
-#define LIBC_COMPILER_HAS_C23_FLOAT16
+#define LIBC_TYPES_HAS_FLOAT16
+using float16 = _Float16;
#endif
#endif
#if defined(LIBC_TARGET_ARCH_IS_AARCH64)
#if (defined(LIBC_COMPILER_CLANG_VER) && (LIBC_COMPILER_CLANG_VER >= 900)) || \
(defined(LIBC_COMPILER_GCC_VER) && (LIBC_COMPILER_GCC_VER >= 1301))
-#define LIBC_COMPILER_HAS_C23_FLOAT16
+#define LIBC_TYPES_HAS_FLOAT16
+using float16 = _Float16;
#endif
#endif
#if defined(LIBC_TARGET_ARCH_IS_ANY_RISCV)
#if (defined(LIBC_COMPILER_CLANG_VER) && (LIBC_COMPILER_CLANG_VER >= 1300)) || \
(defined(LIBC_COMPILER_GCC_VER) && (LIBC_COMPILER_GCC_VER >= 1301))
-#define LIBC_COMPILER_HAS_C23_FLOAT16
-#endif
-#endif
-
-#if defined(LIBC_COMPILER_HAS_C23_FLOAT16)
+#define LIBC_TYPES_HAS_FLOAT16
using float16 = _Float16;
-#define LIBC_HAS_FLOAT16
+#endif
#endif
// float128 support.
#if defined(LIBC_COMPILER_HAS_C23_FLOAT128) || \
defined(LIBC_COMPILER_HAS_FLOAT128_EXTENSION) || \
defined(LIBC_LONG_DOUBLE_IS_FLOAT128)
-// TODO: Replace with LIBC_HAS_FLOAT128
-#define LIBC_COMPILER_HAS_FLOAT128
+#define LIBC_TYPES_HAS_FLOAT128
#endif
-#endif // LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_FLOAT_H
+#endif // LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_TYPES_H
diff --git a/libc/src/__support/memory_size.h b/libc/src/__support/memory_size.h
index 94aee2520afa..7bd16a1695be 100644
--- a/libc/src/__support/memory_size.h
+++ b/libc/src/__support/memory_size.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC___SUPPORT_MEMORY_SIZE_H
+#define LLVM_LIBC_SRC___SUPPORT_MEMORY_SIZE_H
+
#include "src/__support/CPP/bit.h" // has_single_bit
#include "src/__support/CPP/limits.h"
#include "src/__support/CPP/type_traits.h"
@@ -83,3 +86,5 @@ public:
};
} // namespace internal
} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC___SUPPORT_MEMORY_SIZE_H
diff --git a/libc/src/__support/threads/gpu/mutex.h b/libc/src/__support/threads/gpu/mutex.h
index 7a23604b5b98..71d0ef04cbfe 100644
--- a/libc/src/__support/threads/gpu/mutex.h
+++ b/libc/src/__support/threads/gpu/mutex.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_THREAD_GPU_MUTEX_H
-#define LLVM_LIBC_SRC___SUPPORT_THREAD_GPU_MUTEX_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_THREADS_GPU_MUTEX_H
+#define LLVM_LIBC_SRC___SUPPORT_THREADS_GPU_MUTEX_H
#include "src/__support/macros/attributes.h"
#include "src/__support/threads/mutex_common.h"
@@ -28,4 +28,4 @@ struct Mutex {
} // namespace LIBC_NAMESPACE
-#endif
+#endif // LLVM_LIBC_SRC___SUPPORT_THREADS_GPU_MUTEX_H
diff --git a/libc/src/assert/assert.h b/libc/src/assert/assert.h
index 0318a934acca..6f352af1988b 100644
--- a/libc/src/assert/assert.h
+++ b/libc/src/assert/assert.h
@@ -1,3 +1,4 @@
+// NOLINT(llvm-header-guard) https://github.com/llvm/llvm-project/issues/83339
//===-- Internal header for assert ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
@@ -21,4 +22,4 @@
((e) ? (void)0 \
: LIBC_NAMESPACE::__assert_fail(#e, __FILE__, __LINE__, \
__PRETTY_FUNCTION__))
-#endif
+#endif // NDEBUG
diff --git a/libc/src/gpu/rpc_host_call.h b/libc/src/gpu/rpc_host_call.h
index 14393ab95dc1..473d90ba48fd 100644
--- a/libc/src/gpu/rpc_host_call.h
+++ b/libc/src/gpu/rpc_host_call.h
@@ -17,4 +17,4 @@ void rpc_host_call(void *fn, void *buffer, size_t size);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_GPU_RPC_H_HOST_CALL
+#endif // LLVM_LIBC_SRC_GPU_RPC_HOST_CALL_H
diff --git a/libc/src/math/amdgpu/CMakeLists.txt b/libc/src/math/amdgpu/CMakeLists.txt
index cb77341aa505..c300730208d5 100644
--- a/libc/src/math/amdgpu/CMakeLists.txt
+++ b/libc/src/math/amdgpu/CMakeLists.txt
@@ -177,46 +177,6 @@ add_entrypoint_object(
)
add_entrypoint_object(
- lround
- SRCS
- lround.cpp
- HDRS
- ../lround.h
- COMPILE_OPTIONS
- -O2
-)
-
-add_entrypoint_object(
- lroundf
- SRCS
- lroundf.cpp
- HDRS
- ../lroundf.h
- COMPILE_OPTIONS
- -O2
-)
-
-add_entrypoint_object(
- llround
- SRCS
- llround.cpp
- HDRS
- ../llround.h
- COMPILE_OPTIONS
- -O2
-)
-
-add_entrypoint_object(
- llroundf
- SRCS
- llroundf.cpp
- HDRS
- ../llroundf.h
- COMPILE_OPTIONS
- -O2
-)
-
-add_entrypoint_object(
modf
SRCS
modf.cpp
diff --git a/libc/src/math/amdgpu/declarations.h b/libc/src/math/amdgpu/declarations.h
index 780d5f0a1140..5d7f3c9609d2 100644
--- a/libc/src/math/amdgpu/declarations.h
+++ b/libc/src/math/amdgpu/declarations.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GPU_AMDGPU_DECLARATIONS_H
-#define LLVM_LIBC_SRC_MATH_GPU_AMDGPU_DECLARATIONS_H
+#ifndef LLVM_LIBC_SRC_MATH_AMDGPU_DECLARATIONS_H
+#define LLVM_LIBC_SRC_MATH_AMDGPU_DECLARATIONS_H
#include "platform.h"
@@ -83,4 +83,4 @@ float __ocml_tgamma_f32(float);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_GPU_AMDGPU_DECLARATIONS_H
+#endif // LLVM_LIBC_SRC_MATH_AMDGPU_DECLARATIONS_H
diff --git a/libc/src/math/amdgpu/exp.cpp b/libc/src/math/amdgpu/exp.cpp
index 8590ac759019..d19c73dd0242 100644
--- a/libc/src/math/amdgpu/exp.cpp
+++ b/libc/src/math/amdgpu/exp.cpp
@@ -13,6 +13,6 @@
namespace LIBC_NAMESPACE {
-LLVM_LIBC_FUNCTION(double, exp, (double x)) { return __builtin_exp(x); }
+LLVM_LIBC_FUNCTION(double, exp, (double x)) { return __ocml_exp_f64(x); }
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/amdgpu/expf.cpp b/libc/src/math/amdgpu/expf.cpp
index d682f6293a6c..33393078cfa3 100644
--- a/libc/src/math/amdgpu/expf.cpp
+++ b/libc/src/math/amdgpu/expf.cpp
@@ -13,6 +13,6 @@
namespace LIBC_NAMESPACE {
-LLVM_LIBC_FUNCTION(float, expf, (float x)) { return __builtin_expf(x); }
+LLVM_LIBC_FUNCTION(float, expf, (float x)) { return __ocml_exp_f32(x); }
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/amdgpu/fmax.cpp b/libc/src/math/amdgpu/fmax.cpp
index 09624cc6f092..09f0f942a042 100644
--- a/libc/src/math/amdgpu/fmax.cpp
+++ b/libc/src/math/amdgpu/fmax.cpp
@@ -15,10 +15,6 @@
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(double, fmax, (double x, double y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<double>(cpp::bit_cast<uint64_t>(x) &
- cpp::bit_cast<uint64_t>(y));
return __builtin_fmax(x, y);
}
diff --git a/libc/src/math/amdgpu/fmaxf.cpp b/libc/src/math/amdgpu/fmaxf.cpp
index f6ed46699a04..5913a85df637 100644
--- a/libc/src/math/amdgpu/fmaxf.cpp
+++ b/libc/src/math/amdgpu/fmaxf.cpp
@@ -8,17 +8,11 @@
#include "src/math/fmaxf.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(float, fmaxf, (float x, float y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<float>(cpp::bit_cast<uint32_t>(x) &
- cpp::bit_cast<uint32_t>(y));
return __builtin_fmaxf(x, y);
}
diff --git a/libc/src/math/amdgpu/fmin.cpp b/libc/src/math/amdgpu/fmin.cpp
index 8977ff7a066c..0d6f3521dcb7 100644
--- a/libc/src/math/amdgpu/fmin.cpp
+++ b/libc/src/math/amdgpu/fmin.cpp
@@ -8,17 +8,11 @@
#include "src/math/fmin.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(double, fmin, (double x, double y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<double>(cpp::bit_cast<uint64_t>(x) |
- cpp::bit_cast<uint64_t>(y));
return __builtin_fmin(x, y);
}
diff --git a/libc/src/math/amdgpu/fminf.cpp b/libc/src/math/amdgpu/fminf.cpp
index 3be55257f616..42744abfb3b0 100644
--- a/libc/src/math/amdgpu/fminf.cpp
+++ b/libc/src/math/amdgpu/fminf.cpp
@@ -8,17 +8,11 @@
#include "src/math/fminf.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(float, fminf, (float x, float y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<float>(cpp::bit_cast<uint32_t>(x) |
- cpp::bit_cast<uint32_t>(y));
return __builtin_fminf(x, y);
}
diff --git a/libc/src/math/amdgpu/platform.h b/libc/src/math/amdgpu/platform.h
index e5a9f810cd10..29d6cac1fa49 100644
--- a/libc/src/math/amdgpu/platform.h
+++ b/libc/src/math/amdgpu/platform.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GPU_AMDGPU_PLATFORM_H
-#define LLVM_LIBC_SRC_MATH_GPU_AMDGPU_PLATFORM_H
+#ifndef LLVM_LIBC_SRC_MATH_AMDGPU_PLATFORM_H
+#define LLVM_LIBC_SRC_MATH_AMDGPU_PLATFORM_H
#include "src/__support/macros/attributes.h"
@@ -51,4 +51,4 @@ extern const LIBC_INLINE_VAR uint32_t __oclc_ISA_version = 9000;
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_GPU_AMDGPU_PLATFORM_H
+#endif // LLVM_LIBC_SRC_MATH_AMDGPU_PLATFORM_H
diff --git a/libc/src/math/ceilf128.h b/libc/src/math/ceilf128.h
index db8feffc87ba..b0c4020718b2 100644
--- a/libc/src/math/ceilf128.h
+++ b/libc/src/math/ceilf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_CEILF128_H
#define LLVM_LIBC_SRC_MATH_CEILF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/copysignf128.h b/libc/src/math/copysignf128.h
index 5e40657de33b..06c194985d72 100644
--- a/libc/src/math/copysignf128.h
+++ b/libc/src/math/copysignf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_COPYSIGNF128_H
#define LLVM_LIBC_SRC_MATH_COPYSIGNF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
@@ -17,4 +17,4 @@ float128 copysignf128(float128 x, float128 y);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_COPYSIGN_H
+#endif // LLVM_LIBC_SRC_MATH_COPYSIGNF128_H
diff --git a/libc/src/math/fabsf128.h b/libc/src/math/fabsf128.h
index 5999757decfd..0a275025a5cf 100644
--- a/libc/src/math/fabsf128.h
+++ b/libc/src/math/fabsf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FABSF128_H
#define LLVM_LIBC_SRC_MATH_FABSF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/fdimf128.h b/libc/src/math/fdimf128.h
index c6f488a586dc..f0485aba4822 100644
--- a/libc/src/math/fdimf128.h
+++ b/libc/src/math/fdimf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FDIMF128_H
#define LLVM_LIBC_SRC_MATH_FDIMF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/floorf128.h b/libc/src/math/floorf128.h
index 86b9a8e9265e..b97c4b6c6cec 100644
--- a/libc/src/math/floorf128.h
+++ b/libc/src/math/floorf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FLOORF128_H
#define LLVM_LIBC_SRC_MATH_FLOORF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/fmaxf128.h b/libc/src/math/fmaxf128.h
index 39eaaf616dd5..a4407d9655af 100644
--- a/libc/src/math/fmaxf128.h
+++ b/libc/src/math/fmaxf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FMAXF128_H
#define LLVM_LIBC_SRC_MATH_FMAXF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/fminf128.h b/libc/src/math/fminf128.h
index b3d1bec8e2ad..d2ed593250a4 100644
--- a/libc/src/math/fminf128.h
+++ b/libc/src/math/fminf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FMINF128_H
#define LLVM_LIBC_SRC_MATH_FMINF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/frexpf128.h b/libc/src/math/frexpf128.h
index 5d70860fa155..55c4a47cc80c 100644
--- a/libc/src/math/frexpf128.h
+++ b/libc/src/math/frexpf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FREXPF128_H
#define LLVM_LIBC_SRC_MATH_FREXPF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 120ada8202ab..82d2a5e66af7 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -43,7 +43,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.nearest_integer_operations
)
@@ -216,7 +216,7 @@ add_entrypoint_object(
HDRS
../fabsf128.h
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.basic_operations
COMPILE_OPTIONS
-O3
@@ -267,7 +267,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.nearest_integer_operations
)
@@ -316,7 +316,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.nearest_integer_operations
)
@@ -365,7 +365,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.nearest_integer_operations
)
@@ -908,7 +908,7 @@ add_entrypoint_object(
HDRS
../copysignf128.h
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.manipulation_functions
COMPILE_OPTIONS
-O3
@@ -959,7 +959,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.manipulation_functions
)
@@ -1008,7 +1008,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.manipulation_functions
)
@@ -1057,7 +1057,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.manipulation_functions
)
@@ -1106,7 +1106,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.manipulation_functions
)
@@ -1412,7 +1412,7 @@ add_entrypoint_object(
HDRS
../fminf128.h
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.basic_operations
COMPILE_OPTIONS
-O3
@@ -1461,7 +1461,7 @@ add_entrypoint_object(
HDRS
../fmaxf128.h
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.basic_operations
COMPILE_OPTIONS
-O3
@@ -1510,7 +1510,7 @@ add_entrypoint_object(
HDRS
../sqrtf128.h
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.sqrt
COMPILE_OPTIONS
-O3
@@ -1647,7 +1647,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.basic_operations
)
diff --git a/libc/src/math/generic/exp_utils.h b/libc/src/math/generic/exp_utils.h
index 49d9a8192d34..405678c356f3 100644
--- a/libc/src/math/generic/exp_utils.h
+++ b/libc/src/math/generic/exp_utils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_EXP_UTILS_H
-#define LLVM_LIBC_SRC_MATH_EXP_UTILS_H
+#ifndef LLVM_LIBC_SRC_MATH_GENERIC_EXP_UTILS_H
+#define LLVM_LIBC_SRC_MATH_GENERIC_EXP_UTILS_H
#include <stdint.h>
@@ -30,4 +30,4 @@ extern const Exp2fDataTable exp2f_data;
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_EXP_UTILS_H
+#endif // LLVM_LIBC_SRC_MATH_GENERIC_EXP_UTILS_H
diff --git a/libc/src/math/ilogbf128.h b/libc/src/math/ilogbf128.h
index df1145ffc0f8..d8fe3b970973 100644
--- a/libc/src/math/ilogbf128.h
+++ b/libc/src/math/ilogbf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_ILOGBF128_H
#define LLVM_LIBC_SRC_MATH_ILOGBF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/ldexpf128.h b/libc/src/math/ldexpf128.h
index adf9d8f56b35..7aa6ded3c8e4 100644
--- a/libc/src/math/ldexpf128.h
+++ b/libc/src/math/ldexpf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LDEXPF128_H
#define LLVM_LIBC_SRC_MATH_LDEXPF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/llogb.h b/libc/src/math/llogb.h
index 2d95877425e5..b51f89fc0416 100644
--- a/libc/src/math/llogb.h
+++ b/libc/src/math/llogb.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LLOGB_H
#define LLVM_LIBC_SRC_MATH_LLOGB_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/llogbf.h b/libc/src/math/llogbf.h
index 512e174b66ee..af4aa8a5b15c 100644
--- a/libc/src/math/llogbf.h
+++ b/libc/src/math/llogbf.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LLOGBF_H
#define LLVM_LIBC_SRC_MATH_LLOGBF_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/llogbf128.h b/libc/src/math/llogbf128.h
index 7fb74d4bbe73..ce7c872a63db 100644
--- a/libc/src/math/llogbf128.h
+++ b/libc/src/math/llogbf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LLOGBF128_H
#define LLVM_LIBC_SRC_MATH_LLOGBF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/llogbl.h b/libc/src/math/llogbl.h
index 4033100fbe3d..3c323a3af2a9 100644
--- a/libc/src/math/llogbl.h
+++ b/libc/src/math/llogbl.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LLOGBL_H
#define LLVM_LIBC_SRC_MATH_LLOGBL_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/logbf128.h b/libc/src/math/logbf128.h
index 8baa076af1bf..7823bbd615b8 100644
--- a/libc/src/math/logbf128.h
+++ b/libc/src/math/logbf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LOGBF128_H
#define LLVM_LIBC_SRC_MATH_LOGBF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/nvptx/CMakeLists.txt b/libc/src/math/nvptx/CMakeLists.txt
index 194e1fa7af49..56bff1472f13 100644
--- a/libc/src/math/nvptx/CMakeLists.txt
+++ b/libc/src/math/nvptx/CMakeLists.txt
@@ -178,46 +178,6 @@ add_entrypoint_object(
)
add_entrypoint_object(
- lround
- SRCS
- lround.cpp
- HDRS
- ../lround.h
- COMPILE_OPTIONS
- -O2
-)
-
-add_entrypoint_object(
- lroundf
- SRCS
- lroundf.cpp
- HDRS
- ../lroundf.h
- COMPILE_OPTIONS
- -O2
-)
-
-add_entrypoint_object(
- llround
- SRCS
- llround.cpp
- HDRS
- ../llround.h
- COMPILE_OPTIONS
- -O2
-)
-
-add_entrypoint_object(
- llroundf
- SRCS
- llroundf.cpp
- HDRS
- ../llroundf.h
- COMPILE_OPTIONS
- -O2
-)
-
-add_entrypoint_object(
modf
SRCS
modf.cpp
diff --git a/libc/src/math/nvptx/declarations.h b/libc/src/math/nvptx/declarations.h
index 9cb2be67b85b..d41b16c8eec9 100644
--- a/libc/src/math/nvptx/declarations.h
+++ b/libc/src/math/nvptx/declarations.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GPU_NVPTX_DECLARATIONS_H
-#define LLVM_LIBC_SRC_MATH_GPU_NVPTX_DECLARATIONS_H
+#ifndef LLVM_LIBC_SRC_MATH_NVPTX_DECLARATIONS_H
+#define LLVM_LIBC_SRC_MATH_NVPTX_DECLARATIONS_H
namespace LIBC_NAMESPACE {
@@ -86,4 +86,4 @@ float __nv_tgammaf(float);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_GPU_NVPTX_DECLARATIONS_H
+#endif // LLVM_LIBC_SRC_MATH_NVPTX_DECLARATIONS_H
diff --git a/libc/src/math/nvptx/fmax.cpp b/libc/src/math/nvptx/fmax.cpp
index 09624cc6f092..3ba65d7eccd3 100644
--- a/libc/src/math/nvptx/fmax.cpp
+++ b/libc/src/math/nvptx/fmax.cpp
@@ -8,17 +8,11 @@
#include "src/math/fmax.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(double, fmax, (double x, double y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<double>(cpp::bit_cast<uint64_t>(x) &
- cpp::bit_cast<uint64_t>(y));
return __builtin_fmax(x, y);
}
diff --git a/libc/src/math/nvptx/fmaxf.cpp b/libc/src/math/nvptx/fmaxf.cpp
index f6ed46699a04..e977082b39f4 100644
--- a/libc/src/math/nvptx/fmaxf.cpp
+++ b/libc/src/math/nvptx/fmaxf.cpp
@@ -15,10 +15,6 @@
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(float, fmaxf, (float x, float y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<float>(cpp::bit_cast<uint32_t>(x) &
- cpp::bit_cast<uint32_t>(y));
return __builtin_fmaxf(x, y);
}
diff --git a/libc/src/math/nvptx/fmin.cpp b/libc/src/math/nvptx/fmin.cpp
index 8977ff7a066c..0d6f3521dcb7 100644
--- a/libc/src/math/nvptx/fmin.cpp
+++ b/libc/src/math/nvptx/fmin.cpp
@@ -8,17 +8,11 @@
#include "src/math/fmin.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(double, fmin, (double x, double y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<double>(cpp::bit_cast<uint64_t>(x) |
- cpp::bit_cast<uint64_t>(y));
return __builtin_fmin(x, y);
}
diff --git a/libc/src/math/nvptx/fminf.cpp b/libc/src/math/nvptx/fminf.cpp
index 3be55257f616..42744abfb3b0 100644
--- a/libc/src/math/nvptx/fminf.cpp
+++ b/libc/src/math/nvptx/fminf.cpp
@@ -8,17 +8,11 @@
#include "src/math/fminf.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(float, fminf, (float x, float y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<float>(cpp::bit_cast<uint32_t>(x) |
- cpp::bit_cast<uint32_t>(y));
return __builtin_fminf(x, y);
}
diff --git a/libc/src/math/nvptx/lround.cpp b/libc/src/math/nvptx/lround.cpp
deleted file mode 100644
index 51e8f2245af8..000000000000
--- a/libc/src/math/nvptx/lround.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- Implementation of the GPU lround function -------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "src/math/lround.h"
-#include "src/__support/common.h"
-
-namespace LIBC_NAMESPACE {
-
-LLVM_LIBC_FUNCTION(long, lround, (double x)) { return __builtin_lround(x); }
-
-} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/nvptx/lroundf.cpp b/libc/src/math/nvptx/lroundf.cpp
deleted file mode 100644
index 2a6fe7200d8c..000000000000
--- a/libc/src/math/nvptx/lroundf.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- Implementation of the GPU lroundf function ------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "src/math/lroundf.h"
-#include "src/__support/common.h"
-
-namespace LIBC_NAMESPACE {
-
-LLVM_LIBC_FUNCTION(long, lroundf, (float x)) { return __builtin_lroundf(x); }
-
-} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/nvptx/nvptx.h b/libc/src/math/nvptx/nvptx.h
index 110d570a84a3..5f9b32f311ea 100644
--- a/libc/src/math/nvptx/nvptx.h
+++ b/libc/src/math/nvptx/nvptx.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GPU_NVPTX_H
-#define LLVM_LIBC_SRC_MATH_GPU_NVPTX_H
+#ifndef LLVM_LIBC_SRC_MATH_NVPTX_NVPTX_H
+#define LLVM_LIBC_SRC_MATH_NVPTX_NVPTX_H
#include "declarations.h"
@@ -99,4 +99,4 @@ LIBC_INLINE float tgammaf(float x) { return __nv_tgammaf(x); }
} // namespace internal
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_GPU_NVPTX_H
+#endif // LLVM_LIBC_SRC_MATH_NVPTX_NVPTX_H
diff --git a/libc/src/math/roundf128.h b/libc/src/math/roundf128.h
index c67c946cc5e8..e4aca17d7eb6 100644
--- a/libc/src/math/roundf128.h
+++ b/libc/src/math/roundf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_ROUNDF128_H
#define LLVM_LIBC_SRC_MATH_ROUNDF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/sqrtf128.h b/libc/src/math/sqrtf128.h
index bccb6bbb6332..9da9eb69374c 100644
--- a/libc/src/math/sqrtf128.h
+++ b/libc/src/math/sqrtf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_SQRTF128_H
#define LLVM_LIBC_SRC_MATH_SQRTF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/truncf128.h b/libc/src/math/truncf128.h
index c92c8202d4ee..5eb6116551d1 100644
--- a/libc/src/math/truncf128.h
+++ b/libc/src/math/truncf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_TRUNCF128_H
#define LLVM_LIBC_SRC_MATH_TRUNCF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/search/hsearch/global.h b/libc/src/search/hsearch/global.h
index 292008cb0c80..9579195a2f3e 100644
--- a/libc/src/search/hsearch/global.h
+++ b/libc/src/search/hsearch/global.h
@@ -6,8 +6,13 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC_SEARCH_HSEARCH_GLOBAL_H
+#define LLVM_LIBC_SRC_SEARCH_HSEARCH_GLOBAL_H
+
namespace LIBC_NAMESPACE {
namespace internal {
extern struct HashTable *global_hash_table;
}
} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_SEARCH_HSEARCH_GLOBAL_H
diff --git a/libc/src/stdbit/CMakeLists.txt b/libc/src/stdbit/CMakeLists.txt
index 5fb77d21e57a..8bc7dd7852bb 100644
--- a/libc/src/stdbit/CMakeLists.txt
+++ b/libc/src/stdbit/CMakeLists.txt
@@ -9,6 +9,7 @@ set(prefixes
first_trailing_one
count_zeros
count_ones
+ has_single_bit
)
set(suffixes c s i l ll)
foreach(prefix IN LISTS prefixes)
diff --git a/libc/src/math/nvptx/llroundf.cpp b/libc/src/stdbit/stdc_has_single_bit_uc.cpp
index 897ed15b6928..e5acdc2a71b4 100644
--- a/libc/src/math/nvptx/llroundf.cpp
+++ b/libc/src/stdbit/stdc_has_single_bit_uc.cpp
@@ -1,4 +1,4 @@
-//===-- Implementation of the GPU llroundf function -----------------------===//
+//===-- Implementation of stdc_has_single_bit_uc --------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,13 +6,15 @@
//
//===----------------------------------------------------------------------===//
-#include "src/math/llroundf.h"
+#include "src/stdbit/stdc_has_single_bit_uc.h"
+
+#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
namespace LIBC_NAMESPACE {
-LLVM_LIBC_FUNCTION(long long, llroundf, (float x)) {
- return __builtin_lroundf(x);
+LLVM_LIBC_FUNCTION(bool, stdc_has_single_bit_uc, (unsigned char value)) {
+ return cpp::has_single_bit(value);
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/amdgpu/lround.cpp b/libc/src/stdbit/stdc_has_single_bit_uc.h
index 51e8f2245af8..028d4ee71050 100644
--- a/libc/src/math/amdgpu/lround.cpp
+++ b/libc/src/stdbit/stdc_has_single_bit_uc.h
@@ -1,4 +1,4 @@
-//===-- Implementation of the GPU lround function -------------------------===//
+//===-- Implementation header for stdc_has_single_bit_uc --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,11 +6,13 @@
//
//===----------------------------------------------------------------------===//
-#include "src/math/lround.h"
-#include "src/__support/common.h"
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_UC_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_UC_H
namespace LIBC_NAMESPACE {
-LLVM_LIBC_FUNCTION(long, lround, (double x)) { return __builtin_lround(x); }
+bool stdc_has_single_bit_uc(unsigned char value);
} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_UC_H
diff --git a/libc/src/math/nvptx/llround.cpp b/libc/src/stdbit/stdc_has_single_bit_ui.cpp
index afd98308730a..37578882324a 100644
--- a/libc/src/math/nvptx/llround.cpp
+++ b/libc/src/stdbit/stdc_has_single_bit_ui.cpp
@@ -1,4 +1,4 @@
-//===-- Implementation of the GPU llround function ------------------------===//
+//===-- Implementation of stdc_has_single_bit_ui --------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,13 +6,15 @@
//
//===----------------------------------------------------------------------===//
-#include "src/math/llround.h"
+#include "src/stdbit/stdc_has_single_bit_ui.h"
+
+#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
namespace LIBC_NAMESPACE {
-LLVM_LIBC_FUNCTION(long long, llround, (double x)) {
- return __builtin_llround(x);
+LLVM_LIBC_FUNCTION(bool, stdc_has_single_bit_ui, (unsigned value)) {
+ return cpp::has_single_bit(value);
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/amdgpu/lroundf.cpp b/libc/src/stdbit/stdc_has_single_bit_ui.h
index 2a6fe7200d8c..1e8cd9afaee8 100644
--- a/libc/src/math/amdgpu/lroundf.cpp
+++ b/libc/src/stdbit/stdc_has_single_bit_ui.h
@@ -1,4 +1,4 @@
-//===-- Implementation of the GPU lroundf function ------------------------===//
+//===-- Implementation header for stdc_has_single_bit_ui --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,11 +6,13 @@
//
//===----------------------------------------------------------------------===//
-#include "src/math/lroundf.h"
-#include "src/__support/common.h"
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_UI_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_UI_H
namespace LIBC_NAMESPACE {
-LLVM_LIBC_FUNCTION(long, lroundf, (float x)) { return __builtin_lroundf(x); }
+bool stdc_has_single_bit_ui(unsigned value);
} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_UI_H
diff --git a/libc/src/math/amdgpu/llround.cpp b/libc/src/stdbit/stdc_has_single_bit_ul.cpp
index afd98308730a..85133ab81cc6 100644
--- a/libc/src/math/amdgpu/llround.cpp
+++ b/libc/src/stdbit/stdc_has_single_bit_ul.cpp
@@ -1,4 +1,4 @@
-//===-- Implementation of the GPU llround function ------------------------===//
+//===-- Implementation of stdc_has_single_bit_ul --------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,13 +6,15 @@
//
//===----------------------------------------------------------------------===//
-#include "src/math/llround.h"
+#include "src/stdbit/stdc_has_single_bit_ul.h"
+
+#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
namespace LIBC_NAMESPACE {
-LLVM_LIBC_FUNCTION(long long, llround, (double x)) {
- return __builtin_llround(x);
+LLVM_LIBC_FUNCTION(bool, stdc_has_single_bit_ul, (unsigned long value)) {
+ return cpp::has_single_bit(value);
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_has_single_bit_ul.h b/libc/src/stdbit/stdc_has_single_bit_ul.h
new file mode 100644
index 000000000000..9b924fca9f06
--- /dev/null
+++ b/libc/src/stdbit/stdc_has_single_bit_ul.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_has_single_bit_ul --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_UL_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_UL_H
+
+namespace LIBC_NAMESPACE {
+
+bool stdc_has_single_bit_ul(unsigned long value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_UL_H
diff --git a/libc/src/stdbit/stdc_has_single_bit_ull.cpp b/libc/src/stdbit/stdc_has_single_bit_ull.cpp
new file mode 100644
index 000000000000..4491cf2b98b6
--- /dev/null
+++ b/libc/src/stdbit/stdc_has_single_bit_ull.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of stdc_has_single_bit_ull -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdbit/stdc_has_single_bit_ull.h"
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(bool, stdc_has_single_bit_ull, (unsigned long long value)) {
+ return cpp::has_single_bit(value);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_has_single_bit_ull.h b/libc/src/stdbit/stdc_has_single_bit_ull.h
new file mode 100644
index 000000000000..d4802bc28727
--- /dev/null
+++ b/libc/src/stdbit/stdc_has_single_bit_ull.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_has_single_bit_ull -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_ULL_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_ULL_H
+
+namespace LIBC_NAMESPACE {
+
+bool stdc_has_single_bit_ull(unsigned long long value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_ULL_H
diff --git a/libc/src/math/amdgpu/llroundf.cpp b/libc/src/stdbit/stdc_has_single_bit_us.cpp
index 897ed15b6928..7a42ae553aa2 100644
--- a/libc/src/math/amdgpu/llroundf.cpp
+++ b/libc/src/stdbit/stdc_has_single_bit_us.cpp
@@ -1,4 +1,4 @@
-//===-- Implementation of the GPU llroundf function -----------------------===//
+//===-- Implementation of stdc_has_single_bit_us --------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,13 +6,15 @@
//
//===----------------------------------------------------------------------===//
-#include "src/math/llroundf.h"
+#include "src/stdbit/stdc_has_single_bit_us.h"
+
+#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
namespace LIBC_NAMESPACE {
-LLVM_LIBC_FUNCTION(long long, llroundf, (float x)) {
- return __builtin_lroundf(x);
+LLVM_LIBC_FUNCTION(bool, stdc_has_single_bit_us, (unsigned short value)) {
+ return cpp::has_single_bit(value);
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdbit/stdc_has_single_bit_us.h b/libc/src/stdbit/stdc_has_single_bit_us.h
new file mode 100644
index 000000000000..201ff4954c3b
--- /dev/null
+++ b/libc/src/stdbit/stdc_has_single_bit_us.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for stdc_has_single_bit_us --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_US_H
+#define LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_US_H
+
+namespace LIBC_NAMESPACE {
+
+bool stdc_has_single_bit_us(unsigned short value);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDBIT_STDC_HAS_SINGLE_BIT_US_H
diff --git a/libc/src/string/memory_utils/aarch64/inline_bcmp.h b/libc/src/string/memory_utils/aarch64/inline_bcmp.h
index 8e0827f1361f..b80b57818763 100644
--- a/libc/src/string/memory_utils/aarch64/inline_bcmp.h
+++ b/libc/src/string/memory_utils/aarch64/inline_bcmp.h
@@ -27,7 +27,7 @@ namespace LIBC_NAMESPACE {
}
switch (count) {
case 0:
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
case 1:
return generic::Bcmp<uint8_t>::block(p1, p2);
case 2:
diff --git a/libc/src/string/memory_utils/aarch64/inline_memcmp.h b/libc/src/string/memory_utils/aarch64/inline_memcmp.h
index 839c8ec13854..d0e0bd7cf025 100644
--- a/libc/src/string/memory_utils/aarch64/inline_memcmp.h
+++ b/libc/src/string/memory_utils/aarch64/inline_memcmp.h
@@ -50,7 +50,7 @@ inline_memcmp_aarch64_neon_gt16(CPtr p1, CPtr p2, size_t count) {
LIBC_INLINE MemcmpReturnType inline_memcmp_aarch64(CPtr p1, CPtr p2,
size_t count) {
if (count == 0)
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
if (count == 1)
return generic::Memcmp<uint8_t>::block(p1, p2);
if (count == 2)
diff --git a/libc/src/string/memory_utils/aarch64/inline_memcpy.h b/libc/src/string/memory_utils/aarch64/inline_memcpy.h
index 0a159f476cd6..ea1a03f4fa0b 100644
--- a/libc/src/string/memory_utils/aarch64/inline_memcpy.h
+++ b/libc/src/string/memory_utils/aarch64/inline_memcpy.h
@@ -5,8 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#ifndef LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
-#define LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
+#ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
+#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
#include "src/__support/macros/config.h" // LIBC_INLINE
#include "src/string/memory_utils/op_builtin.h"
@@ -45,4 +45,4 @@ inline_memcpy_aarch64(Ptr __restrict dst, CPtr __restrict src, size_t count) {
} // namespace LIBC_NAMESPACE
-#endif // LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
+#endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
diff --git a/libc/src/string/memory_utils/generic/aligned_access.h b/libc/src/string/memory_utils/generic/aligned_access.h
index 65bc63f6cbe5..b6ece816756c 100644
--- a/libc/src/string/memory_utils/generic/aligned_access.h
+++ b/libc/src/string/memory_utils/generic/aligned_access.h
@@ -135,7 +135,7 @@ inline_bcmp_aligned_access_32bit(CPtr p1, CPtr p2, size_t count) {
uint32_t a = load32_aligned<uint32_t>(p1, offset);
uint32_t b = load32_aligned(p2, offset, p2_alignment);
if (a != b)
- return BcmpReturnType::NONZERO();
+ return BcmpReturnType::nonzero();
}
return inline_bcmp_byte_per_byte(p1, p2, count, offset);
}
@@ -154,7 +154,7 @@ inline_bcmp_aligned_access_64bit(CPtr p1, CPtr p2, size_t count) {
uint64_t a = load64_aligned<uint64_t>(p1, offset);
uint64_t b = load64_aligned(p2, offset, p2_alignment);
if (a != b)
- return BcmpReturnType::NONZERO();
+ return BcmpReturnType::nonzero();
}
return inline_bcmp_byte_per_byte(p1, p2, count, offset);
}
diff --git a/libc/src/string/memory_utils/generic/byte_per_byte.h b/libc/src/string/memory_utils/generic/byte_per_byte.h
index a666c5da3136..9515398794df 100644
--- a/libc/src/string/memory_utils/generic/byte_per_byte.h
+++ b/libc/src/string/memory_utils/generic/byte_per_byte.h
@@ -56,8 +56,8 @@ inline_bcmp_byte_per_byte(CPtr p1, CPtr p2, size_t count, size_t offset = 0) {
LIBC_LOOP_NOUNROLL
for (; offset < count; ++offset)
if (p1[offset] != p2[offset])
- return BcmpReturnType::NONZERO();
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::nonzero();
+ return BcmpReturnType::zero();
}
[[maybe_unused]] LIBC_INLINE MemcmpReturnType
@@ -70,7 +70,7 @@ inline_memcmp_byte_per_byte(CPtr p1, CPtr p2, size_t count, size_t offset = 0) {
if (diff)
return diff;
}
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/string/memory_utils/op_aarch64.h b/libc/src/string/memory_utils/op_aarch64.h
index 3aae328945dd..6a2013b2a8fa 100644
--- a/libc/src/string/memory_utils/op_aarch64.h
+++ b/libc/src/string/memory_utils/op_aarch64.h
@@ -108,7 +108,7 @@ template <size_t Size> struct Bcmp {
} else {
static_assert(cpp::always_false<decltype(Size)>, "SIZE not implemented");
}
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
LIBC_INLINE static BcmpReturnType tail(CPtr p1, CPtr p2, size_t count) {
@@ -154,7 +154,7 @@ template <size_t Size> struct Bcmp {
} else {
static_assert(cpp::always_false<decltype(Size)>, "SIZE not implemented");
}
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
LIBC_INLINE static BcmpReturnType loop_and_tail(CPtr p1, CPtr p2,
@@ -217,7 +217,7 @@ LIBC_INLINE MemcmpReturnType cmp<uint64_t>(CPtr p1, CPtr p2, size_t offset) {
const auto b = load_be<uint64_t>(p2, offset);
if (a != b)
return a > b ? 1 : -1;
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
///////////////////////////////////////////////////////////////////////////////
@@ -245,7 +245,7 @@ LIBC_INLINE MemcmpReturnType cmp<uint8x16_t>(CPtr p1, CPtr p2, size_t offset) {
return cmp_neq_uint64_t(a, b);
offset += sizeof(uint64_t);
}
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
///////////////////////////////////////////////////////////////////////////////
@@ -262,7 +262,7 @@ LIBC_INLINE MemcmpReturnType cmp<uint8x16x2_t>(CPtr p1, CPtr p2,
return cmp_neq_uint64_t(a, b);
offset += sizeof(uint64_t);
}
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
} // namespace LIBC_NAMESPACE::generic
diff --git a/libc/src/string/memory_utils/op_builtin.h b/libc/src/string/memory_utils/op_builtin.h
index 3c17eef781e5..75dd4de53a47 100644
--- a/libc/src/string/memory_utils/op_builtin.h
+++ b/libc/src/string/memory_utils/op_builtin.h
@@ -105,22 +105,22 @@ template <size_t Size> struct Bcmp {
LIBC_INLINE static BcmpReturnType block(CPtr, CPtr) {
static_assert(cpp::always_false<decltype(Size)>,
"Missing __builtin_memcmp_inline");
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
LIBC_INLINE static BcmpReturnType tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
LIBC_INLINE static BcmpReturnType head_tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
LIBC_INLINE static BcmpReturnType loop_and_tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
};
@@ -132,22 +132,22 @@ template <size_t Size> struct Memcmp {
LIBC_INLINE static MemcmpReturnType block(CPtr, CPtr) {
static_assert(cpp::always_false<decltype(Size)>,
"Missing __builtin_memcmp_inline");
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
LIBC_INLINE static MemcmpReturnType tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
LIBC_INLINE static MemcmpReturnType head_tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
LIBC_INLINE static MemcmpReturnType loop_and_tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
};
diff --git a/libc/src/string/memory_utils/op_generic.h b/libc/src/string/memory_utils/op_generic.h
index db218f8577ab..c7dbd5dd1d6c 100644
--- a/libc/src/string/memory_utils/op_generic.h
+++ b/libc/src/string/memory_utils/op_generic.h
@@ -390,7 +390,7 @@ private:
if constexpr (cmp_is_expensive<T>::value) {
if (!eq<T>(p1, p2, offset))
return cmp_neq<T>(p1, p2, offset);
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
} else {
return cmp<T>(p1, p2, offset);
}
@@ -443,7 +443,7 @@ public:
for (; offset < count; offset += SIZE)
if (auto value = cmp<T>(p1, p2, offset))
return value;
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
}
@@ -475,7 +475,7 @@ template <typename T, typename... TS> struct MemcmpSequence {
if constexpr (sizeof...(TS) > 0)
return MemcmpSequence<TS...>::block(p1 + sizeof(T), p2 + sizeof(T));
else
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
};
@@ -521,7 +521,7 @@ template <typename T> struct Bcmp {
for (; offset < count; offset += SIZE)
if (const auto value = neq<T>(p1, p2, offset))
return value;
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
}
@@ -547,7 +547,7 @@ template <typename T, typename... TS> struct BcmpSequence {
if constexpr (sizeof...(TS) > 0)
return BcmpSequence<TS...>::block(p1 + sizeof(T), p2 + sizeof(T));
else
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
};
diff --git a/libc/src/string/memory_utils/riscv/inline_memmove.h b/libc/src/string/memory_utils/riscv/inline_memmove.h
index 1c26917a96d9..1a95a8ebba07 100644
--- a/libc/src/string/memory_utils/riscv/inline_memmove.h
+++ b/libc/src/string/memory_utils/riscv/inline_memmove.h
@@ -5,8 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#ifndef LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
-#define LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
+#ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
+#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
#include "src/__support/macros/attributes.h" // LIBC_INLINE
#include "src/__support/macros/properties/architectures.h" // LIBC_TARGET_ARCH_IS_RISCV64
@@ -24,4 +24,4 @@ inline_memmove_riscv(Ptr __restrict dst, CPtr __restrict src, size_t count) {
} // namespace LIBC_NAMESPACE
-#endif // LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
+#endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
diff --git a/libc/src/string/memory_utils/utils.h b/libc/src/string/memory_utils/utils.h
index 543d45b7c4e3..701a84375ea8 100644
--- a/libc/src/string/memory_utils/utils.h
+++ b/libc/src/string/memory_utils/utils.h
@@ -130,8 +130,8 @@ template <typename T> struct StrictIntegralType {
}
// Helper to get the zero value.
- LIBC_INLINE static constexpr StrictIntegralType ZERO() { return {T(0)}; }
- LIBC_INLINE static constexpr StrictIntegralType NONZERO() { return {T(1)}; }
+ LIBC_INLINE static constexpr StrictIntegralType zero() { return {T(0)}; }
+ LIBC_INLINE static constexpr StrictIntegralType nonzero() { return {T(1)}; }
private:
T value;
diff --git a/libc/src/string/memory_utils/x86_64/inline_bcmp.h b/libc/src/string/memory_utils/x86_64/inline_bcmp.h
index 31aff86e6059..58eaedbbe015 100644
--- a/libc/src/string/memory_utils/x86_64/inline_bcmp.h
+++ b/libc/src/string/memory_utils/x86_64/inline_bcmp.h
@@ -58,7 +58,7 @@ inline_bcmp_x86_avx512bw_gt16(CPtr p1, CPtr p2, size_t count) {
[[maybe_unused]] LIBC_INLINE BcmpReturnType inline_bcmp_x86(CPtr p1, CPtr p2,
size_t count) {
if (count == 0)
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
if (count == 1)
return generic::Bcmp<uint8_t>::block(p1, p2);
if (count == 2)
diff --git a/libc/src/string/memory_utils/x86_64/inline_memcmp.h b/libc/src/string/memory_utils/x86_64/inline_memcmp.h
index d5fa77cdbbcd..6a315adcd566 100644
--- a/libc/src/string/memory_utils/x86_64/inline_memcmp.h
+++ b/libc/src/string/memory_utils/x86_64/inline_memcmp.h
@@ -59,7 +59,7 @@ inline_memcmp_x86_avx512bw_gt16(CPtr p1, CPtr p2, size_t count) {
LIBC_INLINE MemcmpReturnType inline_memcmp_x86(CPtr p1, CPtr p2, size_t count) {
if (count == 0)
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
if (count == 1)
return generic::Memcmp<uint8_t>::block(p1, p2);
if (count == 2)
diff --git a/libc/test/UnitTest/ExecuteFunction.h b/libc/test/UnitTest/ExecuteFunction.h
index 2129e63a3a00..95950567e74a 100644
--- a/libc/test/UnitTest/ExecuteFunction.h
+++ b/libc/test/UnitTest/ExecuteFunction.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_TESTUTILS_EXECUTEFUNCTION_H
-#define LLVM_LIBC_UTILS_TESTUTILS_EXECUTEFUNCTION_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_EXECUTEFUNCTION_H
+#define LLVM_LIBC_TEST_UNITTEST_EXECUTEFUNCTION_H
#include <stdint.h>
@@ -49,4 +49,4 @@ const char *signal_as_string(int signum);
} // namespace testutils
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_UTILS_TESTUTILS_EXECUTEFUNCTION_H
+#endif // LLVM_LIBC_TEST_UNITTEST_EXECUTEFUNCTION_H
diff --git a/libc/test/UnitTest/FPExceptMatcher.h b/libc/test/UnitTest/FPExceptMatcher.h
index 98c4f737d172..d36e98d22d4b 100644
--- a/libc/test/UnitTest/FPExceptMatcher.h
+++ b/libc/test/UnitTest/FPExceptMatcher.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_FPEXCEPTMATCHER_H
-#define LLVM_LIBC_UTILS_UNITTEST_FPEXCEPTMATCHER_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_FPEXCEPTMATCHER_H
+#define LLVM_LIBC_TEST_UNITTEST_FPEXCEPTMATCHER_H
#ifndef LIBC_COPT_TEST_USE_FUCHSIA
@@ -61,4 +61,4 @@ public:
#define ASSERT_RAISES_FP_EXCEPT(func) ASSERT_DEATH(func, WITH_SIGNAL(SIGFPE))
#endif // LIBC_COPT_TEST_USE_FUCHSIA
-#endif // LLVM_LIBC_UTILS_UNITTEST_FPEXCEPTMATCHER_H
+#endif // LLVM_LIBC_TEST_UNITTEST_FPEXCEPTMATCHER_H
diff --git a/libc/test/UnitTest/FPMatcher.h b/libc/test/UnitTest/FPMatcher.h
index c4a1cfa1bc1d..4525b9e83019 100644
--- a/libc/test/UnitTest/FPMatcher.h
+++ b/libc/test/UnitTest/FPMatcher.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_FPMATCHER_H
-#define LLVM_LIBC_UTILS_UNITTEST_FPMATCHER_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_FPMATCHER_H
+#define LLVM_LIBC_TEST_UNITTEST_FPMATCHER_H
#include "src/__support/CPP/type_traits.h"
#include "src/__support/FPUtil/FEnvImpl.h"
@@ -210,4 +210,4 @@ template <typename T> struct FPTest : public Test {
} \
} while (0)
-#endif // LLVM_LIBC_UTILS_UNITTEST_FPMATCHER_H
+#endif // LLVM_LIBC_TEST_UNITTEST_FPMATCHER_H
diff --git a/libc/test/UnitTest/LibcTest.h b/libc/test/UnitTest/LibcTest.h
index 00e34a4da858..639f60058325 100644
--- a/libc/test/UnitTest/LibcTest.h
+++ b/libc/test/UnitTest/LibcTest.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_LIBCTEST_H
-#define LLVM_LIBC_UTILS_UNITTEST_LIBCTEST_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_LIBCTEST_H
+#define LLVM_LIBC_TEST_UNITTEST_LIBCTEST_H
// This is defined as a simple macro in test.h so that it exists for platforms
// that don't use our test infrastructure. It's defined as a proper function
@@ -493,4 +493,4 @@ CString libc_make_test_file_path_func(const char *file_name);
#define WITH_SIGNAL(X) X
-#endif // LLVM_LIBC_UTILS_UNITTEST_LIBCTEST_H
+#endif // LLVM_LIBC_TEST_UNITTEST_LIBCTEST_H
diff --git a/libc/test/UnitTest/MemoryMatcher.h b/libc/test/UnitTest/MemoryMatcher.h
index cf861a6757ae..c548bafb7ae4 100644
--- a/libc/test/UnitTest/MemoryMatcher.h
+++ b/libc/test/UnitTest/MemoryMatcher.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_MEMORY_MATCHER_H
-#define LLVM_LIBC_UTILS_UNITTEST_MEMORY_MATCHER_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_MEMORYMATCHER_H
+#define LLVM_LIBC_TEST_UNITTEST_MEMORYMATCHER_H
#include "src/__support/CPP/span.h"
@@ -66,4 +66,4 @@ public:
#endif
-#endif // LLVM_LIBC_UTILS_UNITTEST_MEMORY_MATCHER_H
+#endif // LLVM_LIBC_TEST_UNITTEST_MEMORYMATCHER_H
diff --git a/libc/test/UnitTest/PlatformDefs.h b/libc/test/UnitTest/PlatformDefs.h
index 40472f4eb4eb..f9911b155769 100644
--- a/libc/test/UnitTest/PlatformDefs.h
+++ b/libc/test/UnitTest/PlatformDefs.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_PLATFORMDEFS_H
-#define LLVM_LIBC_UTILS_UNITTEST_PLATFORMDEFS_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_PLATFORMDEFS_H
+#define LLVM_LIBC_TEST_UNITTEST_PLATFORMDEFS_H
#if !defined(_WIN32)
#define ENABLE_SUBPROCESS_TESTS
#endif
-#endif // LLVM_LIBC_UTILS_UNITTEST_PLATFORMDEFS_H
+#endif // LLVM_LIBC_TEST_UNITTEST_PLATFORMDEFS_H
diff --git a/libc/test/UnitTest/RoundingModeUtils.h b/libc/test/UnitTest/RoundingModeUtils.h
index d1c3c6ff400a..b986c98fa2e5 100644
--- a/libc/test/UnitTest/RoundingModeUtils.h
+++ b/libc/test/UnitTest/RoundingModeUtils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_TESTUTILS_ROUNDINGMODEUTILS_H
-#define LLVM_LIBC_UTILS_TESTUTILS_ROUNDINGMODEUTILS_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_ROUNDINGMODEUTILS_H
+#define LLVM_LIBC_TEST_UNITTEST_ROUNDINGMODEUTILS_H
#include <stdint.h>
@@ -34,4 +34,4 @@ template <RoundingMode R> struct ForceRoundingModeTest : ForceRoundingMode {
} // namespace fputil
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_UTILS_TESTUTILS_ROUNDINGMODEUTILS_H
+#endif // LLVM_LIBC_TEST_UNITTEST_ROUNDINGMODEUTILS_H
diff --git a/libc/test/UnitTest/StringUtils.h b/libc/test/UnitTest/StringUtils.h
index ac28926d51cd..54cff97ceafb 100644
--- a/libc/test/UnitTest/StringUtils.h
+++ b/libc/test/UnitTest/StringUtils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_SIMPLE_STRING_CONV_H
-#define LLVM_LIBC_UTILS_UNITTEST_SIMPLE_STRING_CONV_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_STRINGUTILS_H
+#define LLVM_LIBC_TEST_UNITTEST_STRINGUTILS_H
#include "src/__support/CPP/string.h"
#include "src/__support/CPP/type_traits.h"
@@ -33,4 +33,4 @@ int_to_hex(T value, size_t length = sizeof(T) * 2) {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_UTILS_UNITTEST_SIMPLE_STRING_CONV_H
+#endif // LLVM_LIBC_TEST_UNITTEST_STRINGUTILS_H
diff --git a/libc/test/UnitTest/Test.h b/libc/test/UnitTest/Test.h
index 61021b9d0e13..f7ce3cfa5cf6 100644
--- a/libc/test/UnitTest/Test.h
+++ b/libc/test/UnitTest/Test.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_TEST_H
-#define LLVM_LIBC_UTILS_UNITTEST_TEST_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_TEST_H
+#define LLVM_LIBC_TEST_UNITTEST_TEST_H
// This macro takes a file name and returns a value implicitly castable to
// a const char*. That const char* is the path to a file with the provided name
@@ -24,4 +24,4 @@
#include "LibcTest.h"
#endif
-#endif // LLVM_LIBC_UTILS_UNITTEST_TEST_H
+#endif // LLVM_LIBC_TEST_UNITTEST_TEST_H
diff --git a/libc/test/include/stdbit_test.cpp b/libc/test/include/stdbit_test.cpp
index 46019075a7c1..acb79ca0f3ff 100644
--- a/libc/test/include/stdbit_test.cpp
+++ b/libc/test/include/stdbit_test.cpp
@@ -81,6 +81,11 @@ unsigned stdc_count_ones_us(unsigned short) noexcept { return 0x3BU; }
unsigned stdc_count_ones_ui(unsigned) noexcept { return 0x3CU; }
unsigned stdc_count_ones_ul(unsigned long) noexcept { return 0x3DU; }
unsigned stdc_count_ones_ull(unsigned long long) noexcept { return 0x3FU; }
+bool stdc_has_single_bit_uc(unsigned char) noexcept { return false; }
+bool stdc_has_single_bit_us(unsigned short) noexcept { return false; }
+bool stdc_has_single_bit_ui(unsigned) noexcept { return false; }
+bool stdc_has_single_bit_ul(unsigned long) noexcept { return false; }
+bool stdc_has_single_bit_ull(unsigned long long) noexcept { return false; }
}
#include "include/llvm-libc-macros/stdbit-macros.h"
@@ -164,3 +169,11 @@ TEST(LlvmLibcStdbitTest, TypeGenericMacroCountOnes) {
EXPECT_EQ(stdc_count_ones(0UL), 0x3DU);
EXPECT_EQ(stdc_count_ones(0ULL), 0x3FU);
}
+
+TEST(LlvmLibcStdbitTest, TypeGenericMacroHasSingleBit) {
+ EXPECT_EQ(stdc_has_single_bit(static_cast<unsigned char>(1U)), false);
+ EXPECT_EQ(stdc_has_single_bit(static_cast<unsigned short>(1U)), false);
+ EXPECT_EQ(stdc_has_single_bit(1U), false);
+ EXPECT_EQ(stdc_has_single_bit(1UL), false);
+ EXPECT_EQ(stdc_has_single_bit(1ULL), false);
+}
diff --git a/libc/test/include/sys/queue_test.cpp b/libc/test/include/sys/queue_test.cpp
index 48c0e811c615..c10e48d627ca 100644
--- a/libc/test/include/sys/queue_test.cpp
+++ b/libc/test/include/sys/queue_test.cpp
@@ -10,7 +10,7 @@
#include "src/__support/char_vector.h"
#include "test/UnitTest/Test.h"
-#include "llvm-libc-macros/sys-queue-macros.h"
+#include "include/llvm-libc-macros/sys-queue-macros.h"
using LIBC_NAMESPACE::CharVector;
using LIBC_NAMESPACE::cpp::string;
diff --git a/libc/test/integration/src/spawn/test_binary_properties.h b/libc/test/integration/src/spawn/test_binary_properties.h
index f1521c218c0c..8e6a1fe6747c 100644
--- a/libc/test/integration/src/spawn/test_binary_properties.h
+++ b/libc/test/integration/src/spawn/test_binary_properties.h
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
-#define LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
+#ifndef LLVM_LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
+#define LLVM_LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
constexpr int CHILD_FD = 10;
constexpr char TEXT[] = "Hello, posix_spawn";
-#endif // LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
+#endif // LLVM_LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
diff --git a/libc/test/src/__support/FPUtil/fpbits_test.cpp b/libc/test/src/__support/FPUtil/fpbits_test.cpp
index 4f9f53afe547..852ab6e31b65 100644
--- a/libc/test/src/__support/FPUtil/fpbits_test.cpp
+++ b/libc/test/src/__support/FPUtil/fpbits_test.cpp
@@ -575,7 +575,7 @@ TEST(LlvmLibcFPBitsTest, LongDoubleType) {
}
#endif
-#if defined(LIBC_COMPILER_HAS_FLOAT128)
+#if defined(LIBC_TYPES_HAS_FLOAT128)
TEST(LlvmLibcFPBitsTest, Float128Type) {
using Float128Bits = FPBits<float128>;
@@ -643,4 +643,4 @@ TEST(LlvmLibcFPBitsTest, Float128Type) {
Float128Bits quiet_nan = Float128Bits::quiet_nan();
EXPECT_EQ(quiet_nan.is_quiet_nan(), true);
}
-#endif // LIBC_COMPILER_HAS_FLOAT128
+#endif // LIBC_TYPES_HAS_FLOAT128
diff --git a/libc/test/src/__support/fixed_point/fx_bits_test.cpp b/libc/test/src/__support/fixed_point/fx_bits_test.cpp
index 58627816eb8d..3cbd800adc3c 100644
--- a/libc/test/src/__support/fixed_point/fx_bits_test.cpp
+++ b/libc/test/src/__support/fixed_point/fx_bits_test.cpp
@@ -20,9 +20,22 @@ using LIBC_NAMESPACE::operator""_u16;
using LIBC_NAMESPACE::operator""_u32;
using LIBC_NAMESPACE::operator""_u64;
+class LlvmLibcFxBitsTest : public LIBC_NAMESPACE::testing::Test {
+public:
+ template <typename T> void testBitwiseOps() {
+ EXPECT_EQ(LIBC_NAMESPACE::fixed_point::bit_and(T(0.75), T(0.375)), T(0.25));
+ EXPECT_EQ(LIBC_NAMESPACE::fixed_point::bit_or(T(0.75), T(0.375)), T(0.875));
+ using StorageType = typename FXRep<T>::StorageType;
+ StorageType a = LIBC_NAMESPACE::cpp::bit_cast<StorageType>(T(0.75));
+ a = ~a;
+ EXPECT_EQ(LIBC_NAMESPACE::fixed_point::bit_not(T(0.75)),
+ FXBits<T>(a).get_val());
+ }
+};
+
// -------------------------------- SHORT TESTS --------------------------------
-TEST(LlvmLibcFxBitsTest, FXBits_UnsignedShortFract) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_UnsignedShortFract) {
auto bits_var = FXBits<unsigned short fract>(0b00000000_u8);
EXPECT_EQ(bits_var.get_sign(), false);
@@ -51,9 +64,12 @@ TEST(LlvmLibcFxBitsTest, FXBits_UnsignedShortFract) {
EXPECT_EQ(bits_var.get_sign(), false);
EXPECT_EQ(bits_var.get_integral(), 0x00_u8);
EXPECT_EQ(bits_var.get_fraction(), 0xcd_u8);
+
+ // Bitwise ops
+ testBitwiseOps<unsigned short fract>();
}
-TEST(LlvmLibcFxBitsTest, FXBits_UnsignedShortAccum) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_UnsignedShortAccum) {
auto bits_var = FXBits<unsigned short accum>(0b00000000'00000000_u16);
EXPECT_EQ(bits_var.get_sign(), false);
@@ -77,9 +93,12 @@ TEST(LlvmLibcFxBitsTest, FXBits_UnsignedShortAccum) {
EXPECT_EQ(bits_var.get_sign(), false);
EXPECT_EQ(bits_var.get_integral(), 0x00cd_u16);
EXPECT_EQ(bits_var.get_fraction(), 0x00fe_u16);
+
+ // Bitwise ops
+ testBitwiseOps<unsigned short accum>();
}
-TEST(LlvmLibcFxBitsTest, FXBits_ShortFract) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_ShortFract) {
auto bits_var = FXBits<short fract>(0b0'0000000_u8);
EXPECT_EQ(bits_var.get_sign(), false);
@@ -103,9 +122,12 @@ TEST(LlvmLibcFxBitsTest, FXBits_ShortFract) {
EXPECT_EQ(bits_var.get_sign(), true);
EXPECT_EQ(bits_var.get_integral(), 0x00_u8);
EXPECT_EQ(bits_var.get_fraction(), 0x4d_u8);
+
+ // Bitwise ops
+ testBitwiseOps<short fract>();
}
-TEST(LlvmLibcFxBitsTest, FXBits_ShortAccum) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_ShortAccum) {
auto bits_var = FXBits<short accum>(0b0'00000000'0000000_u16);
EXPECT_EQ(bits_var.get_sign(), false);
@@ -129,9 +151,14 @@ TEST(LlvmLibcFxBitsTest, FXBits_ShortAccum) {
EXPECT_EQ(bits_var.get_sign(), true);
EXPECT_EQ(bits_var.get_integral(), 0x00cd_u16);
EXPECT_EQ(bits_var.get_fraction(), 0x007e_u16);
+
+ // Bitwise ops
+ testBitwiseOps<short accum>();
}
-TEST(LlvmLibcFxBitsTest, FXBits_UnsignedFract) {
+// -------------------------------- NORMAL TESTS -------------------------------
+
+TEST_F(LlvmLibcFxBitsTest, FXBits_UnsignedFract) {
auto bits_var = FXBits<unsigned fract>(0b0000000000000000_u16);
EXPECT_EQ(bits_var.get_sign(), false);
@@ -155,11 +182,12 @@ TEST(LlvmLibcFxBitsTest, FXBits_UnsignedFract) {
EXPECT_EQ(bits_var.get_sign(), false);
EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
EXPECT_EQ(bits_var.get_fraction(), 0xef12_u16);
-}
-// -------------------------------- NORMAL TESTS -------------------------------
+ // Bitwise ops
+ testBitwiseOps<unsigned fract>();
+}
-TEST(LlvmLibcFxBitsTest, FXBits_UnsignedAccum) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_UnsignedAccum) {
auto bits_var =
FXBits<unsigned accum>(0b0000000000000000'0000000000000000_u32);
@@ -184,9 +212,12 @@ TEST(LlvmLibcFxBitsTest, FXBits_UnsignedAccum) {
EXPECT_EQ(bits_var.get_sign(), false);
EXPECT_EQ(bits_var.get_integral(), 0x0000abcd_u32);
EXPECT_EQ(bits_var.get_fraction(), 0x0000ef12_u32);
+
+ // Bitwise ops
+ testBitwiseOps<unsigned accum>();
}
-TEST(LlvmLibcFxBitsTest, FXBits_Fract) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_Fract) {
auto bits_var = FXBits<fract>(0b0'000000000000000_u16);
EXPECT_EQ(bits_var.get_sign(), false);
@@ -210,9 +241,12 @@ TEST(LlvmLibcFxBitsTest, FXBits_Fract) {
EXPECT_EQ(bits_var.get_sign(), true);
EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
EXPECT_EQ(bits_var.get_fraction(), 0x6f12_u16);
+
+ // Bitwise ops
+ testBitwiseOps<fract>();
}
-TEST(LlvmLibcFxBitsTest, FXBits_Accum) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_Accum) {
auto bits_var = FXBits<accum>(0b0'0000000000000000'000000000000000_u32);
EXPECT_EQ(bits_var.get_sign(), false);
@@ -236,11 +270,14 @@ TEST(LlvmLibcFxBitsTest, FXBits_Accum) {
EXPECT_EQ(bits_var.get_sign(), true);
EXPECT_EQ(bits_var.get_integral(), 0x0000abcd_u32);
EXPECT_EQ(bits_var.get_fraction(), 0x00006f12_u32);
+
+ // Bitwise ops
+ testBitwiseOps<accum>();
}
// --------------------------------- LONG TESTS --------------------------------
-TEST(LlvmLibcFxBitsTest, FXBits_UnsignedLongFract) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_UnsignedLongFract) {
auto bits_var =
FXBits<unsigned long fract>(0b00000000000000000000000000000000_u32);
@@ -265,9 +302,12 @@ TEST(LlvmLibcFxBitsTest, FXBits_UnsignedLongFract) {
EXPECT_EQ(bits_var.get_sign(), false);
EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
EXPECT_EQ(bits_var.get_fraction(), 0xfedcba98_u32);
+
+ // Bitwise ops
+ testBitwiseOps<unsigned long fract>();
}
-TEST(LlvmLibcFxBitsTest, FXBits_UnsignedLongAccum) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_UnsignedLongAccum) {
auto bits_var = FXBits<unsigned long accum>(
0b00000000000000000000000000000000'00000000000000000000000000000000_u64);
@@ -292,9 +332,12 @@ TEST(LlvmLibcFxBitsTest, FXBits_UnsignedLongAccum) {
EXPECT_EQ(bits_var.get_sign(), false);
EXPECT_EQ(bits_var.get_integral(), 0x00000000abcdef12_u64);
EXPECT_EQ(bits_var.get_fraction(), 0x00000000fedcba98_u64);
+
+ // Bitwise ops
+ testBitwiseOps<unsigned long accum>();
}
-TEST(LlvmLibcFxBitsTest, FXBits_LongFract) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_LongFract) {
auto bits_var = FXBits<long fract>(0b0'0000000000000000000000000000000_u32);
EXPECT_EQ(bits_var.get_sign(), false);
@@ -318,9 +361,12 @@ TEST(LlvmLibcFxBitsTest, FXBits_LongFract) {
EXPECT_EQ(bits_var.get_sign(), true);
EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
EXPECT_EQ(bits_var.get_fraction(), 0x7edcba98_u32);
+
+ // Bitwise ops
+ testBitwiseOps<long fract>();
}
-TEST(LlvmLibcFxBitsTest, FXBits_LongAccum) {
+TEST_F(LlvmLibcFxBitsTest, FXBits_LongAccum) {
auto bits_var = FXBits<long accum>(
0b0'00000000000000000000000000000000'0000000000000000000000000000000_u64);
@@ -345,4 +391,7 @@ TEST(LlvmLibcFxBitsTest, FXBits_LongAccum) {
EXPECT_EQ(bits_var.get_sign(), true);
EXPECT_EQ(bits_var.get_integral(), 0x00000000abcdef12_u64);
EXPECT_EQ(bits_var.get_fraction(), 0x000000007edcba98_u64);
+
+ // Bitwise ops
+ testBitwiseOps<long accum>();
}
diff --git a/libc/test/src/__support/uint_test.cpp b/libc/test/src/__support/uint_test.cpp
index 1a1171b46781..963c553b10d0 100644
--- a/libc/test/src/__support/uint_test.cpp
+++ b/libc/test/src/__support/uint_test.cpp
@@ -54,7 +54,7 @@ TEST(LlvmLibcUIntClassTest, BitCastToFromNativeUint128) {
}
#endif
-#ifdef LIBC_COMPILER_HAS_FLOAT128
+#ifdef LIBC_TYPES_HAS_FLOAT128
TEST(LlvmLibcUIntClassTest, BitCastToFromNativeFloat128) {
static_assert(cpp::is_trivially_copyable<LL_UInt128>::value);
static_assert(sizeof(LL_UInt128) == sizeof(float128));
@@ -65,7 +65,7 @@ TEST(LlvmLibcUIntClassTest, BitCastToFromNativeFloat128) {
EXPECT_TRUE(value == forth);
}
}
-#endif
+#endif // LIBC_TYPES_HAS_FLOAT128
TEST(LlvmLibcUIntClassTest, BasicInit) {
LL_UInt128 half_val(12345);
diff --git a/libc/test/src/compiler/stack_chk_guard_test.cpp b/libc/test/src/compiler/stack_chk_guard_test.cpp
index 427e20c2ac50..6b71e155fa3e 100644
--- a/libc/test/src/compiler/stack_chk_guard_test.cpp
+++ b/libc/test/src/compiler/stack_chk_guard_test.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm-libc-macros/signal-macros.h"
+#include "include/llvm-libc-macros/signal-macros.h"
#include "src/__support/macros/sanitizer.h"
#include "src/compiler/__stack_chk_fail.h"
#include "src/string/memset.h"
diff --git a/libc/test/src/math/FAbsTest.h b/libc/test/src/math/FAbsTest.h
index bf3052afc816..54f5f87e08e7 100644
--- a/libc/test/src/math/FAbsTest.h
+++ b/libc/test/src/math/FAbsTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_FABSTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_FABSTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -47,3 +50,5 @@ public:
using LlvmLibcFAbsTest = FAbsTest<T>; \
TEST_F(LlvmLibcFAbsTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcFAbsTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_FABSTEST_H
diff --git a/libc/test/src/math/FMaxTest.h b/libc/test/src/math/FMaxTest.h
index edc46ae5bb0f..f8046f380f5f 100644
--- a/libc/test/src/math/FMaxTest.h
+++ b/libc/test/src/math/FMaxTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_FMAXTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_FMAXTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -83,3 +86,5 @@ public:
TEST_F(LlvmLibcFMaxTest, NegInfArg) { testNegInfArg(&func); } \
TEST_F(LlvmLibcFMaxTest, BothZero) { testBothZero(&func); } \
TEST_F(LlvmLibcFMaxTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_FMAXTEST_H
diff --git a/libc/test/src/math/FMinTest.h b/libc/test/src/math/FMinTest.h
index 5ff583604ebc..7a6534f320c9 100644
--- a/libc/test/src/math/FMinTest.h
+++ b/libc/test/src/math/FMinTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_FMINTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_FMINTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -83,3 +86,5 @@ public:
TEST_F(LlvmLibcFMinTest, NegInfArg) { testNegInfArg(&func); } \
TEST_F(LlvmLibcFMinTest, BothZero) { testBothZero(&func); } \
TEST_F(LlvmLibcFMinTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_FMINTEST_H
diff --git a/libc/test/src/math/FloorTest.h b/libc/test/src/math/FloorTest.h
index 5e459ebd4928..66b37d69d7ba 100644
--- a/libc/test/src/math/FloorTest.h
+++ b/libc/test/src/math/FloorTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_FLOORTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_FLOORTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -82,3 +85,5 @@ public:
TEST_F(LlvmLibcFloorTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcFloorTest, Fractions) { testFractions(&func); } \
TEST_F(LlvmLibcFloorTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_FLOORTEST_H
diff --git a/libc/test/src/math/RandUtils.h b/libc/test/src/math/RandUtils.h
index 05236ead2ace..fecbd8eaabf2 100644
--- a/libc/test/src/math/RandUtils.h
+++ b/libc/test/src/math/RandUtils.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_RANDUTILS_H
+#define LLVM_LIBC_TEST_SRC_MATH_RANDUTILS_H
+
namespace LIBC_NAMESPACE {
namespace testutils {
@@ -14,3 +17,5 @@ int rand();
} // namespace testutils
} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_RANDUTILS_H
diff --git a/libc/test/src/math/RoundTest.h b/libc/test/src/math/RoundTest.h
index 4860464be908..b255ecc4fa84 100644
--- a/libc/test/src/math/RoundTest.h
+++ b/libc/test/src/math/RoundTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_ROUNDTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_ROUNDTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -82,3 +85,5 @@ public:
TEST_F(LlvmLibcRoundTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcRoundTest, Fractions) { testFractions(&func); } \
TEST_F(LlvmLibcRoundTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_ROUNDTEST_H
diff --git a/libc/test/src/math/TruncTest.h b/libc/test/src/math/TruncTest.h
index 0d99363526e8..6d0ea1182ec1 100644
--- a/libc/test/src/math/TruncTest.h
+++ b/libc/test/src/math/TruncTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_TRUNCTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_TRUNCTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -82,3 +85,5 @@ public:
TEST_F(LlvmLibcTruncTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcTruncTest, Fractions) { testFractions(&func); } \
TEST_F(LlvmLibcTruncTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_TRUNCTEST_H
diff --git a/libc/test/src/math/differential_testing/Timer.h b/libc/test/src/math/differential_testing/Timer.h
index d4acff7ba0eb..0d9518c37d9e 100644
--- a/libc/test/src/math/differential_testing/Timer.h
+++ b/libc/test/src/math/differential_testing/Timer.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_TESTUTILS_TIMER_H
-#define LLVM_LIBC_UTILS_TESTUTILS_TIMER_H
+#ifndef LLVM_LIBC_TEST_SRC_MATH_DIFFERENTIAL_TESTING_TIMER_H
+#define LLVM_LIBC_TEST_SRC_MATH_DIFFERENTIAL_TESTING_TIMER_H
#include <stdint.h>
@@ -30,4 +30,4 @@ public:
} // namespace testing
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_UTILS_TESTUTILS_TIMER_H
+#endif // LLVM_LIBC_TEST_SRC_MATH_DIFFERENTIAL_TESTING_TIMER_H
diff --git a/libc/test/src/math/in_float_range_test_helper.h b/libc/test/src/math/in_float_range_test_helper.h
index 5f345c0cf17a..35e039e74af5 100644
--- a/libc/test/src/math/in_float_range_test_helper.h
+++ b/libc/test/src/math/in_float_range_test_helper.h
@@ -2,8 +2,8 @@
// Created by kirill on 8/30/22.
//
-#ifndef LLVM_IN_FLOAT_RANGE_TEST_HELPER_H
-#define LLVM_IN_FLOAT_RANGE_TEST_HELPER_H
+#ifndef LLVM_LIBC_TEST_SRC_MATH_IN_FLOAT_RANGE_TEST_HELPER_H
+#define LLVM_LIBC_TEST_SRC_MATH_IN_FLOAT_RANGE_TEST_HELPER_H
#include <stdint.h>
@@ -23,4 +23,4 @@
} \
}
-#endif // LLVM_IN_FLOAT_RANGE_TEST_HELPER_H
+#endif // LLVM_LIBC_TEST_SRC_MATH_IN_FLOAT_RANGE_TEST_HELPER_H
diff --git a/libc/test/src/math/smoke/CeilTest.h b/libc/test/src/math/smoke/CeilTest.h
index c10fd2816014..5248dbca5037 100644
--- a/libc/test/src/math/smoke/CeilTest.h
+++ b/libc/test/src/math/smoke/CeilTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_CEILTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_CEILTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -66,3 +69,5 @@ public:
TEST_F(LlvmLibcCeilTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcCeilTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcCeilTest, Fractions) { testFractions(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_CEILTEST_H
diff --git a/libc/test/src/math/smoke/CopySignTest.h b/libc/test/src/math/smoke/CopySignTest.h
index 1108a45ae673..9ee34338ba80 100644
--- a/libc/test/src/math/smoke/CopySignTest.h
+++ b/libc/test/src/math/smoke/CopySignTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_COPYSIGNTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_COPYSIGNTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -52,3 +55,5 @@ public:
using LlvmLibcCopySignTest = CopySignTest<T>; \
TEST_F(LlvmLibcCopySignTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcCopySignTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_COPYSIGNTEST_H
diff --git a/libc/test/src/math/smoke/FAbsTest.h b/libc/test/src/math/smoke/FAbsTest.h
index 7d905baefe85..cf05882e22f9 100644
--- a/libc/test/src/math/smoke/FAbsTest.h
+++ b/libc/test/src/math/smoke/FAbsTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FABSTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FABSTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -35,3 +38,5 @@ public:
#define LIST_FABS_TESTS(T, func) \
using LlvmLibcFAbsTest = FAbsTest<T>; \
TEST_F(LlvmLibcFAbsTest, SpecialNumbers) { testSpecialNumbers(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FABSTEST_H
diff --git a/libc/test/src/math/smoke/FMaxTest.h b/libc/test/src/math/smoke/FMaxTest.h
index 1a376af2e0b7..98fae06ee2a0 100644
--- a/libc/test/src/math/smoke/FMaxTest.h
+++ b/libc/test/src/math/smoke/FMaxTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -80,3 +83,5 @@ public:
TEST_F(LlvmLibcFMaxTest, NegInfArg) { testNegInfArg(&func); } \
TEST_F(LlvmLibcFMaxTest, BothZero) { testBothZero(&func); } \
TEST_F(LlvmLibcFMaxTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXTEST_H
diff --git a/libc/test/src/math/smoke/FMinTest.h b/libc/test/src/math/smoke/FMinTest.h
index add2544424a0..b1ffe38829f4 100644
--- a/libc/test/src/math/smoke/FMinTest.h
+++ b/libc/test/src/math/smoke/FMinTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -80,3 +83,5 @@ public:
TEST_F(LlvmLibcFMinTest, NegInfArg) { testNegInfArg(&func); } \
TEST_F(LlvmLibcFMinTest, BothZero) { testBothZero(&func); } \
TEST_F(LlvmLibcFMinTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINTEST_H
diff --git a/libc/test/src/math/smoke/FloorTest.h b/libc/test/src/math/smoke/FloorTest.h
index 1c1b62c2dcda..610f5c206ed3 100644
--- a/libc/test/src/math/smoke/FloorTest.h
+++ b/libc/test/src/math/smoke/FloorTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FLOORTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FLOORTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -66,3 +69,5 @@ public:
TEST_F(LlvmLibcFloorTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcFloorTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcFloorTest, Fractions) { testFractions(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FLOORTEST_H
diff --git a/libc/test/src/math/smoke/RIntTest.h b/libc/test/src/math/smoke/RIntTest.h
index 233164b41247..4c90dffa39cb 100644
--- a/libc/test/src/math/smoke/RIntTest.h
+++ b/libc/test/src/math/smoke/RIntTest.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_TEST_SRC_MATH_RINTTEST_H
-#define LLVM_LIBC_TEST_SRC_MATH_RINTTEST_H
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_RINTTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_RINTTEST_H
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
@@ -54,4 +54,4 @@ public:
using LlvmLibcRIntTest = RIntTestTemplate<F>; \
TEST_F(LlvmLibcRIntTest, specialNumbers) { testSpecialNumbers(&func); }
-#endif // LLVM_LIBC_TEST_SRC_MATH_RINTTEST_H
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_RINTTEST_H
diff --git a/libc/test/src/math/smoke/RoundTest.h b/libc/test/src/math/smoke/RoundTest.h
index 2e95f182ce94..d2a5906b1e29 100644
--- a/libc/test/src/math/smoke/RoundTest.h
+++ b/libc/test/src/math/smoke/RoundTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -66,3 +69,5 @@ public:
TEST_F(LlvmLibcRoundTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcRoundTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcRoundTest, Fractions) { testFractions(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTEST_H
diff --git a/libc/test/src/math/smoke/RoundToIntegerTest.h b/libc/test/src/math/smoke/RoundToIntegerTest.h
index 59694131f7f5..e86533ca09e1 100644
--- a/libc/test/src/math/smoke/RoundToIntegerTest.h
+++ b/libc/test/src/math/smoke/RoundToIntegerTest.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_TEST_SRC_MATH_ROUNDTOINTEGERTEST_H
-#define LLVM_LIBC_TEST_SRC_MATH_ROUNDTOINTEGERTEST_H
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTOINTEGERTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTOINTEGERTEST_H
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
@@ -169,4 +169,4 @@ public:
#define LIST_ROUND_TO_INTEGER_TESTS_WITH_MODES(F, I, func) \
LIST_ROUND_TO_INTEGER_TESTS_HELPER(F, I, func, true)
-#endif // LLVM_LIBC_TEST_SRC_MATH_ROUNDTOINTEGERTEST_H
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTOINTEGERTEST_H
diff --git a/libc/test/src/math/smoke/TruncTest.h b/libc/test/src/math/smoke/TruncTest.h
index 8334a7b7c0f9..71b1ab9df3f0 100644
--- a/libc/test/src/math/smoke/TruncTest.h
+++ b/libc/test/src/math/smoke/TruncTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_TRUNCTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_TRUNCTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -66,3 +69,5 @@ public:
TEST_F(LlvmLibcTruncTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcTruncTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcTruncTest, Fractions) { testFractions(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_TRUNCTEST_H
diff --git a/libc/test/src/stdbit/CMakeLists.txt b/libc/test/src/stdbit/CMakeLists.txt
index 659e575fedea..a886ee4a3532 100644
--- a/libc/test/src/stdbit/CMakeLists.txt
+++ b/libc/test/src/stdbit/CMakeLists.txt
@@ -11,6 +11,7 @@ set(prefixes
first_trailing_one
count_zeros
count_ones
+ has_single_bit
)
set(suffixes c s i l ll)
foreach(prefix IN LISTS prefixes)
diff --git a/libc/test/src/stdbit/stdc_has_single_bit_uc_test.cpp b/libc/test/src/stdbit/stdc_has_single_bit_uc_test.cpp
new file mode 100644
index 000000000000..6212b1ec765a
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_has_single_bit_uc_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_has_single_bit_uc ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_has_single_bit_uc.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcCountOnesUcTest, Zero) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_has_single_bit_uc(0U), false);
+}
+
+TEST(LlvmLibcStdcCountOnesUcTest, OneHot) {
+ for (unsigned i = 0U; i != UCHAR_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_has_single_bit_uc(1U << i), true);
+}
diff --git a/libc/test/src/stdbit/stdc_has_single_bit_ui_test.cpp b/libc/test/src/stdbit/stdc_has_single_bit_ui_test.cpp
new file mode 100644
index 000000000000..2e00507aa025
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_has_single_bit_ui_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_has_single_bit_ui ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_has_single_bit_ui.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcCountOnesUiTest, Zero) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_has_single_bit_ui(0U), false);
+}
+
+TEST(LlvmLibcStdcCountOnesUiTest, OneHot) {
+ for (unsigned i = 0U; i != UINT_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_has_single_bit_ui(1U << i), true);
+}
diff --git a/libc/test/src/stdbit/stdc_has_single_bit_ul_test.cpp b/libc/test/src/stdbit/stdc_has_single_bit_ul_test.cpp
new file mode 100644
index 000000000000..8c0178998bbe
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_has_single_bit_ul_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_has_single_bit_ul ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_has_single_bit_ul.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcCountOnesUlTest, Zero) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_has_single_bit_ul(0U), false);
+}
+
+TEST(LlvmLibcStdcCountOnesUlTest, OneHot) {
+ for (unsigned i = 0U; i != ULONG_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_has_single_bit_ul(1UL << i), true);
+}
diff --git a/libc/test/src/stdbit/stdc_has_single_bit_ull_test.cpp b/libc/test/src/stdbit/stdc_has_single_bit_ull_test.cpp
new file mode 100644
index 000000000000..1d9f976b6d63
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_has_single_bit_ull_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_has_single_bit_ull -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_has_single_bit_ull.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcCountOnesUllTest, Zero) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_has_single_bit_ull(0U), false);
+}
+
+TEST(LlvmLibcStdcCountOnesUllTest, OneHot) {
+ for (unsigned i = 0U; i != ULLONG_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_has_single_bit_ull(1ULL << i), true);
+}
diff --git a/libc/test/src/stdbit/stdc_has_single_bit_us_test.cpp b/libc/test/src/stdbit/stdc_has_single_bit_us_test.cpp
new file mode 100644
index 000000000000..52c4de881044
--- /dev/null
+++ b/libc/test/src/stdbit/stdc_has_single_bit_us_test.cpp
@@ -0,0 +1,20 @@
+//===-- Unittests for stdc_has_single_bit_us ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/limits.h"
+#include "src/stdbit/stdc_has_single_bit_us.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcStdcCountOnesUsTest, Zero) {
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_has_single_bit_us(0U), false);
+}
+
+TEST(LlvmLibcStdcCountOnesUsTest, OneHot) {
+ for (unsigned i = 0U; i != USHRT_WIDTH; ++i)
+ EXPECT_EQ(LIBC_NAMESPACE::stdc_has_single_bit_us(1U << i), true);
+}
diff --git a/libc/test/src/time/TmHelper.h b/libc/test/src/time/TmHelper.h
index d8e638d8dbaf..16210944bf15 100644
--- a/libc/test/src/time/TmHelper.h
+++ b/libc/test/src/time/TmHelper.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_TEST_SRC_TIME_TM_HELPER_H
-#define LLVM_LIBC_TEST_SRC_TIME_TM_HELPER_H
+#ifndef LLVM_LIBC_TEST_SRC_TIME_TMHELPER_H
+#define LLVM_LIBC_TEST_SRC_TIME_TMHELPER_H
#include <time.h>
@@ -40,4 +40,4 @@ static inline void initialize_tm_data(struct tm *tm_data, int year, int month,
} // namespace tmhelper
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_TEST_SRC_TIME_TM_HELPER_H
+#endif // LLVM_LIBC_TEST_SRC_TIME_TMHELPER_H
diff --git a/libc/utils/MPFRWrapper/MPFRUtils.h b/libc/utils/MPFRWrapper/MPFRUtils.h
index 25e6b0ba9ac0..6164d78fa5ad 100644
--- a/libc/utils/MPFRWrapper/MPFRUtils.h
+++ b/libc/utils/MPFRWrapper/MPFRUtils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_TESTUTILS_MPFRUTILS_H
-#define LLVM_LIBC_UTILS_TESTUTILS_MPFRUTILS_H
+#ifndef LLVM_LIBC_UTILS_MPFRWRAPPER_MPFRUTILS_H
+#define LLVM_LIBC_UTILS_MPFRWRAPPER_MPFRUTILS_H
#include "src/__support/CPP/type_traits.h"
#include "test/UnitTest/RoundingModeUtils.h"
@@ -426,4 +426,4 @@ template <typename T> bool round_to_long(T x, RoundingMode mode, long &result);
} \
}
-#endif // LLVM_LIBC_UTILS_TESTUTILS_MPFRUTILS_H
+#endif // LLVM_LIBC_UTILS_MPFRWRAPPER_MPFRUTILS_H
diff --git a/libc/utils/gpu/loader/amdgpu/Loader.cpp b/libc/utils/gpu/loader/amdgpu/Loader.cpp
index 0ff2dce813ed..e3911eda2bd8 100644
--- a/libc/utils/gpu/loader/amdgpu/Loader.cpp
+++ b/libc/utils/gpu/loader/amdgpu/Loader.cpp
@@ -230,12 +230,12 @@ hsa_status_t launch_kernel(hsa_agent_t dev_agent, hsa_executable_t executable,
implicit_args_t *implicit_args = reinterpret_cast<implicit_args_t *>(
reinterpret_cast<uint8_t *>(args) + sizeof(args_t));
implicit_args->grid_dims = dims;
- implicit_args->grid_size_x = params.num_threads_x;
- implicit_args->grid_size_y = params.num_threads_y;
- implicit_args->grid_size_z = params.num_threads_z;
- implicit_args->workgroup_size_x = params.num_blocks_x;
- implicit_args->workgroup_size_y = params.num_blocks_y;
- implicit_args->workgroup_size_z = params.num_blocks_z;
+ implicit_args->grid_size_x = params.num_blocks_x;
+ implicit_args->grid_size_y = params.num_blocks_y;
+ implicit_args->grid_size_z = params.num_blocks_z;
+ implicit_args->workgroup_size_x = params.num_threads_x;
+ implicit_args->workgroup_size_y = params.num_threads_y;
+ implicit_args->workgroup_size_z = params.num_threads_z;
// Obtain a packet from the queue.
uint64_t packet_id = hsa_queue_add_write_index_relaxed(queue, 1);
diff --git a/libcxx/cmake/config-ix.cmake b/libcxx/cmake/config-ix.cmake
index 1e8c2f5ce463..7406fba482e6 100644
--- a/libcxx/cmake/config-ix.cmake
+++ b/libcxx/cmake/config-ix.cmake
@@ -1,5 +1,6 @@
include(CMakePushCheckState)
include(CheckLibraryExists)
+include(CheckSymbolExists)
include(LLVMCheckCompilerLinkerFlag)
include(CheckCCompilerFlag)
include(CheckCXXCompilerFlag)
@@ -97,6 +98,8 @@ int main(void) { return 0; }
cmake_pop_check_state()
endif()
+check_symbol_exists(__PICOLIBC__ "string.h" PICOLIBC)
+
# Check libraries
if(WIN32 AND NOT MINGW)
# TODO(compnerd) do we want to support an emulation layer that allows for the
@@ -116,6 +119,10 @@ elseif(ANDROID)
set(LIBCXX_HAS_PTHREAD_LIB NO)
set(LIBCXX_HAS_RT_LIB NO)
set(LIBCXX_HAS_ATOMIC_LIB NO)
+elseif(PICOLIBC)
+ set(LIBCXX_HAS_PTHREAD_LIB NO)
+ set(LIBCXX_HAS_RT_LIB NO)
+ set(LIBCXX_HAS_ATOMIC_LIB NO)
else()
check_library_exists(pthread pthread_create "" LIBCXX_HAS_PTHREAD_LIB)
check_library_exists(rt clock_gettime "" LIBCXX_HAS_RT_LIB)
diff --git a/libcxx/docs/FeatureTestMacroTable.rst b/libcxx/docs/FeatureTestMacroTable.rst
index 468226c0c2dd..60e0aea9768b 100644
--- a/libcxx/docs/FeatureTestMacroTable.rst
+++ b/libcxx/docs/FeatureTestMacroTable.rst
@@ -354,6 +354,8 @@ Status
--------------------------------------------------- -----------------
``__cpp_lib_ranges_chunk_by`` ``202202L``
--------------------------------------------------- -----------------
+ ``__cpp_lib_ranges_contains`` ``202207L``
+ --------------------------------------------------- -----------------
``__cpp_lib_ranges_iota`` *unimplemented*
--------------------------------------------------- -----------------
``__cpp_lib_ranges_join_with`` *unimplemented*
@@ -362,7 +364,7 @@ Status
--------------------------------------------------- -----------------
``__cpp_lib_ranges_slide`` *unimplemented*
--------------------------------------------------- -----------------
- ``__cpp_lib_ranges_starts_ends_with`` *unimplemented*
+ ``__cpp_lib_ranges_starts_ends_with`` ``202106L``
--------------------------------------------------- -----------------
``__cpp_lib_ranges_to_container`` ``202202L``
--------------------------------------------------- -----------------
diff --git a/libcxx/docs/ReleaseNotes/19.rst b/libcxx/docs/ReleaseNotes/19.rst
index 6c8f8d17af9b..0d381df5f044 100644
--- a/libcxx/docs/ReleaseNotes/19.rst
+++ b/libcxx/docs/ReleaseNotes/19.rst
@@ -41,7 +41,8 @@ Implemented Papers
- P2637R3 - Member ``visit``
- P2652R2 - Disallow User Specialization of ``allocator_traits``
- P2819R2 - Add ``tuple`` protocol to ``complex``
-
+- P2302R4 - ``std::ranges::contains``
+- P1659R3 - ``std::ranges::starts_with`` and ``std::ranges::ends_with``
Improvements and New Features
-----------------------------
@@ -73,6 +74,11 @@ Deprecations and Removals
- The ``_LIBCPP_INLINE_VISIBILITY`` and ``_VSTD`` macros have been removed in LLVM 19.
+- The ``_LIBCPP_ATOMIC_ONLY_USE_BUILTINS`` configuration option has been removed in LLVM 19. This should not affect
+ many users, except perhaps users using the library with ``-ffreestanding`` with a toolchain where compiler-rt or
+ libatomic is not available. If you are one such user, please reach out to the libc++ developers so we can collaborate
+ on a path for supporting atomics properly on freestanding platforms.
+
Upcoming Deprecations and Removals
----------------------------------
diff --git a/libcxx/docs/Status/Cxx23Papers.csv b/libcxx/docs/Status/Cxx23Papers.csv
index eb415ed8c031..56e1468b4ca1 100644
--- a/libcxx/docs/Status/Cxx23Papers.csv
+++ b/libcxx/docs/Status/Cxx23Papers.csv
@@ -17,7 +17,7 @@
"`P1328R1 <https://wg21.link/P1328R1>`__","LWG","Making std::type_info::operator== constexpr","June 2021","|Complete|","17.0"
"`P1425R4 <https://wg21.link/P1425R4>`__","LWG","Iterators pair constructors for stack and queue","June 2021","|Complete|","14.0","|ranges|"
"`P1518R2 <https://wg21.link/P1518R2>`__","LWG","Stop overconstraining allocators in container deduction guides","June 2021","|Complete|","13.0"
-"`P1659R3 <https://wg21.link/P1659R3>`__","LWG","starts_with and ends_with","June 2021","","","|ranges|"
+"`P1659R3 <https://wg21.link/P1659R3>`__","LWG","starts_with and ends_with","June 2021","|Complete|","19.0","|ranges|"
"`P1951R1 <https://wg21.link/P1951R1>`__","LWG","Default Arguments for pair Forwarding Constructor","June 2021","|Complete|","14.0"
"`P1989R2 <https://wg21.link/P1989R2>`__","LWG","Range constructor for std::string_view","June 2021","|Complete|","14.0","|ranges|"
"`P2136R3 <https://wg21.link/P2136R3>`__","LWG","invoke_r","June 2021","|Complete|","17.0"
@@ -64,7 +64,7 @@
"`P2278R4 <https://wg21.link/P2278R4>`__","LWG","``cbegin`` should always return a constant iterator","July 2022","","","|ranges|"
"`P2286R8 <https://wg21.link/P2286R8>`__","LWG","Formatting Ranges","July 2022","|Complete|","16.0","|format| |ranges|"
"`P2291R3 <https://wg21.link/P2291R3>`__","LWG","Add Constexpr Modifiers to Functions ``to_chars`` and ``from_chars`` for Integral Types in ``<charconv>`` Header","July 2022","|Complete|","16.0"
-"`P2302R4 <https://wg21.link/P2302R4>`__","LWG","``std::ranges::contains``","July 2022","","","|ranges|"
+"`P2302R4 <https://wg21.link/P2302R4>`__","LWG","``std::ranges::contains``","July 2022","|Complete|","19.0","|ranges|"
"`P2322R6 <https://wg21.link/P2322R6>`__","LWG","``ranges::fold``","July 2022","","","|ranges|"
"`P2374R4 <https://wg21.link/P2374R4>`__","LWG","``views::cartesian_product``","July 2022","","","|ranges|"
"`P2404R3 <https://wg21.link/P2404R3>`__","LWG","Move-only types for ``equality_comparable_with``, ``totally_ordered_with``, and ``three_way_comparable_with``","July 2022","",""
diff --git a/libcxx/include/__assert b/libcxx/include/__assert
index eb862b5369b2..49769fb4d449 100644
--- a/libcxx/include/__assert
+++ b/libcxx/include/__assert
@@ -34,4 +34,85 @@
# define _LIBCPP_ASSUME(expression) ((void)0)
#endif
+// clang-format off
+// Fast hardening mode checks.
+
+#if _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST
+
+// Enabled checks.
+# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message)
+// Disabled checks.
+// On most modern platforms, dereferencing a null pointer does not lead to an actual memory access.
+# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression)
+// Overlapping ranges will make algorithms produce incorrect results but don't directly lead to a security
+// vulnerability.
+# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression)
+
+// Extensive hardening mode checks.
+
+#elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_EXTENSIVE
+
+// Enabled checks.
+# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message)
+// Disabled checks.
+# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression)
+
+// Debug hardening mode checks.
+
+#elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG
+
+// All checks enabled.
+# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message)
+
+// Disable all checks if hardening is not enabled.
+
+#else
+
+// All checks disabled.
+# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression)
+# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression)
+
+#endif // _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST
+// clang-format on
+
#endif // _LIBCPP___ASSERT
diff --git a/libcxx/include/__atomic/aliases.h b/libcxx/include/__atomic/aliases.h
index 0fa289de54b0..e27e09af6b77 100644
--- a/libcxx/include/__atomic/aliases.h
+++ b/libcxx/include/__atomic/aliases.h
@@ -18,7 +18,6 @@
#include <__type_traits/make_unsigned.h>
#include <cstddef>
#include <cstdint>
-#include <cstdlib>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
@@ -92,7 +91,7 @@ using __largest_lock_free_type = short;
# elif ATOMIC_CHAR_LOCK_FREE == 2
using __largest_lock_free_type = char;
# else
-# define _LIBCPP_NO_LOCK_FREE_TYPES // There are no lockfree types (this can happen in freestanding)
+# define _LIBCPP_NO_LOCK_FREE_TYPES // There are no lockfree types (this can happen on unusual platforms)
# endif
# ifndef _LIBCPP_NO_LOCK_FREE_TYPES
diff --git a/libcxx/include/__atomic/atomic_flag.h b/libcxx/include/__atomic/atomic_flag.h
index a45a71835477..18a864523de0 100644
--- a/libcxx/include/__atomic/atomic_flag.h
+++ b/libcxx/include/__atomic/atomic_flag.h
@@ -13,6 +13,7 @@
#include <__atomic/contention_t.h>
#include <__atomic/cxx_atomic_impl.h>
#include <__atomic/memory_order.h>
+#include <__availability>
#include <__chrono/duration.h>
#include <__config>
#include <__thread/support.h>
diff --git a/libcxx/include/__atomic/atomic_sync.h b/libcxx/include/__atomic/atomic_sync.h
index d07b35878493..2997bad83344 100644
--- a/libcxx/include/__atomic/atomic_sync.h
+++ b/libcxx/include/__atomic/atomic_sync.h
@@ -137,9 +137,9 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_nonatomic_compare_equal(_Tp const& __lhs, _Tp c
template <class _Tp>
struct __atomic_compare_unequal_to {
- _Tp __val;
- _LIBCPP_HIDE_FROM_ABI bool operator()(_Tp& __current_val) const {
- return !std::__cxx_nonatomic_compare_equal(__current_val, __val);
+ _Tp __val_;
+ _LIBCPP_HIDE_FROM_ABI bool operator()(const _Tp& __arg) const {
+ return !std::__cxx_nonatomic_compare_equal(__arg, __val_);
}
};
diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/cxx_atomic_impl.h
index 1a0b808a0cb1..b900cc135f78 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/cxx_atomic_impl.h
@@ -9,16 +9,13 @@
#ifndef _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
#define _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
-#include <__atomic/is_always_lock_free.h>
#include <__atomic/memory_order.h>
#include <__config>
#include <__memory/addressof.h>
-#include <__type_traits/conditional.h>
#include <__type_traits/is_assignable.h>
#include <__type_traits/is_trivially_copyable.h>
#include <__type_traits/remove_const.h>
#include <cstddef>
-#include <cstring>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
@@ -26,7 +23,7 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) || defined(_LIBCPP_ATOMIC_ONLY_USE_BUILTINS)
+#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP)
// [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because
// the default operator= in an object is not volatile, a byte-by-byte copy
@@ -44,10 +41,6 @@ _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp volatile& __a_value,
*__to++ = *__from++;
}
-#endif
-
-#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP)
-
template <typename _Tp>
struct __cxx_atomic_base_impl {
_LIBCPP_HIDE_FROM_ABI
@@ -529,289 +522,7 @@ __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_o
#endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP
-#ifdef _LIBCPP_ATOMIC_ONLY_USE_BUILTINS
-
-template <typename _Tp>
-struct __cxx_atomic_lock_impl {
- _LIBCPP_HIDE_FROM_ABI __cxx_atomic_lock_impl() _NOEXCEPT : __a_value(), __a_lock(0) {}
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __cxx_atomic_lock_impl(_Tp value) _NOEXCEPT
- : __a_value(value),
- __a_lock(0) {}
-
- _Tp __a_value;
- mutable __cxx_atomic_base_impl<_LIBCPP_ATOMIC_FLAG_TYPE> __a_lock;
-
- _LIBCPP_HIDE_FROM_ABI void __lock() const volatile {
- while (1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire))
- /*spin*/;
- }
- _LIBCPP_HIDE_FROM_ABI void __lock() const {
- while (1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire))
- /*spin*/;
- }
- _LIBCPP_HIDE_FROM_ABI void __unlock() const volatile {
- __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release);
- }
- _LIBCPP_HIDE_FROM_ABI void __unlock() const {
- __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release);
- }
- _LIBCPP_HIDE_FROM_ABI _Tp __read() const volatile {
- __lock();
- _Tp __old;
- __cxx_atomic_assign_volatile(__old, __a_value);
- __unlock();
- return __old;
- }
- _LIBCPP_HIDE_FROM_ABI _Tp __read() const {
- __lock();
- _Tp __old = __a_value;
- __unlock();
- return __old;
- }
- _LIBCPP_HIDE_FROM_ABI void __read_inplace(_Tp* __dst) const volatile {
- __lock();
- __cxx_atomic_assign_volatile(*__dst, __a_value);
- __unlock();
- }
- _LIBCPP_HIDE_FROM_ABI void __read_inplace(_Tp* __dst) const {
- __lock();
- *__dst = __a_value;
- __unlock();
- }
-};
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) {
- __cxx_atomic_assign_volatile(__a->__a_value, __val);
-}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) {
- __a->__a_value = __val;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __val, memory_order) {
- __a->__lock();
- __cxx_atomic_assign_volatile(__a->__a_value, __val);
- __a->__unlock();
-}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val, memory_order) {
- __a->__lock();
- __a->__a_value = __val;
- __a->__unlock();
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, memory_order) {
- return __a->__read();
-}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, memory_order) {
- return __a->__read();
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void
-__cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __dst, memory_order) {
- __a->__read_inplace(__dst);
-}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __dst, memory_order) {
- __a->__read_inplace(__dst);
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) {
- __a->__lock();
- _Tp __old;
- __cxx_atomic_assign_volatile(__old, __a->__a_value);
- __cxx_atomic_assign_volatile(__a->__a_value, __value);
- __a->__unlock();
- return __old;
-}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) {
- __a->__lock();
- _Tp __old = __a->__a_value;
- __a->__a_value = __value;
- __a->__unlock();
- return __old;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
- volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) {
- _Tp __temp;
- __a->__lock();
- __cxx_atomic_assign_volatile(__temp, __a->__a_value);
- bool __ret = (std::memcmp(&__temp, __expected, sizeof(_Tp)) == 0);
- if (__ret)
- __cxx_atomic_assign_volatile(__a->__a_value, __value);
- else
- __cxx_atomic_assign_volatile(*__expected, __a->__a_value);
- __a->__unlock();
- return __ret;
-}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong(
- __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) {
- __a->__lock();
- bool __ret = (std::memcmp(&__a->__a_value, __expected, sizeof(_Tp)) == 0);
- if (__ret)
- std::memcpy(&__a->__a_value, &__value, sizeof(_Tp));
- else
- std::memcpy(__expected, &__a->__a_value, sizeof(_Tp));
- __a->__unlock();
- return __ret;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
- volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) {
- _Tp __temp;
- __a->__lock();
- __cxx_atomic_assign_volatile(__temp, __a->__a_value);
- bool __ret = (std::memcmp(&__temp, __expected, sizeof(_Tp)) == 0);
- if (__ret)
- __cxx_atomic_assign_volatile(__a->__a_value, __value);
- else
- __cxx_atomic_assign_volatile(*__expected, __a->__a_value);
- __a->__unlock();
- return __ret;
-}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak(
- __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) {
- __a->__lock();
- bool __ret = (std::memcmp(&__a->__a_value, __expected, sizeof(_Tp)) == 0);
- if (__ret)
- std::memcpy(&__a->__a_value, &__value, sizeof(_Tp));
- else
- std::memcpy(__expected, &__a->__a_value, sizeof(_Tp));
- __a->__unlock();
- return __ret;
-}
-
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) {
- __a->__lock();
- _Tp __old;
- __cxx_atomic_assign_volatile(__old, __a->__a_value);
- __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old + __delta));
- __a->__unlock();
- return __old;
-}
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) {
- __a->__lock();
- _Tp __old = __a->__a_value;
- __a->__a_value += __delta;
- __a->__unlock();
- return __old;
-}
-
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp*
-__cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order) {
- __a->__lock();
- _Tp* __old;
- __cxx_atomic_assign_volatile(__old, __a->__a_value);
- __cxx_atomic_assign_volatile(__a->__a_value, __old + __delta);
- __a->__unlock();
- return __old;
-}
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp* __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order) {
- __a->__lock();
- _Tp* __old = __a->__a_value;
- __a->__a_value += __delta;
- __a->__unlock();
- return __old;
-}
-
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) {
- __a->__lock();
- _Tp __old;
- __cxx_atomic_assign_volatile(__old, __a->__a_value);
- __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old - __delta));
- __a->__unlock();
- return __old;
-}
-template <typename _Tp, typename _Td>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(__cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) {
- __a->__lock();
- _Tp __old = __a->__a_value;
- __a->__a_value -= __delta;
- __a->__unlock();
- return __old;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) {
- __a->__lock();
- _Tp __old;
- __cxx_atomic_assign_volatile(__old, __a->__a_value);
- __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old & __pattern));
- __a->__unlock();
- return __old;
-}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_and(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) {
- __a->__lock();
- _Tp __old = __a->__a_value;
- __a->__a_value &= __pattern;
- __a->__unlock();
- return __old;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) {
- __a->__lock();
- _Tp __old;
- __cxx_atomic_assign_volatile(__old, __a->__a_value);
- __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old | __pattern));
- __a->__unlock();
- return __old;
-}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_or(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) {
- __a->__lock();
- _Tp __old = __a->__a_value;
- __a->__a_value |= __pattern;
- __a->__unlock();
- return __old;
-}
-
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp
-__cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) {
- __a->__lock();
- _Tp __old;
- __cxx_atomic_assign_volatile(__old, __a->__a_value);
- __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old ^ __pattern));
- __a->__unlock();
- return __old;
-}
-template <typename _Tp>
-_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_xor(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) {
- __a->__lock();
- _Tp __old = __a->__a_value;
- __a->__a_value ^= __pattern;
- __a->__unlock();
- return __old;
-}
-
-template <typename _Tp,
- typename _Base = typename conditional<__libcpp_is_always_lock_free<_Tp>::__value,
- __cxx_atomic_base_impl<_Tp>,
- __cxx_atomic_lock_impl<_Tp> >::type>
-#else
template <typename _Tp, typename _Base = __cxx_atomic_base_impl<_Tp> >
-#endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS
struct __cxx_atomic_impl : public _Base {
static_assert(is_trivially_copyable<_Tp>::value, "std::atomic<T> requires that 'T' be a trivially copyable type");
diff --git a/libcxx/include/__charconv/from_chars_integral.h b/libcxx/include/__charconv/from_chars_integral.h
index e969cedb33cb..c1f033b37b91 100644
--- a/libcxx/include/__charconv/from_chars_integral.h
+++ b/libcxx/include/__charconv/from_chars_integral.h
@@ -11,6 +11,7 @@
#define _LIBCPP___CHARCONV_FROM_CHARS_INTEGRAL_H
#include <__algorithm/copy_n.h>
+#include <__assert>
#include <__charconv/from_chars_result.h>
#include <__charconv/traits.h>
#include <__config>
diff --git a/libcxx/include/__charconv/to_chars_base_10.h b/libcxx/include/__charconv/to_chars_base_10.h
index 0dee351521f9..c49f4f6797aa 100644
--- a/libcxx/include/__charconv/to_chars_base_10.h
+++ b/libcxx/include/__charconv/to_chars_base_10.h
@@ -11,6 +11,7 @@
#define _LIBCPP___CHARCONV_TO_CHARS_BASE_10_H
#include <__algorithm/copy_n.h>
+#include <__assert>
#include <__charconv/tables.h>
#include <__config>
#include <cstdint>
diff --git a/libcxx/include/__charconv/to_chars_integral.h b/libcxx/include/__charconv/to_chars_integral.h
index 40fbe334d8d5..0369f4dfb9bd 100644
--- a/libcxx/include/__charconv/to_chars_integral.h
+++ b/libcxx/include/__charconv/to_chars_integral.h
@@ -11,6 +11,7 @@
#define _LIBCPP___CHARCONV_TO_CHARS_INTEGRAL_H
#include <__algorithm/copy_n.h>
+#include <__assert>
#include <__bit/countl.h>
#include <__charconv/tables.h>
#include <__charconv/to_chars_base_10.h>
diff --git a/libcxx/include/__charconv/traits.h b/libcxx/include/__charconv/traits.h
index b4907c3f7757..c91c6da32479 100644
--- a/libcxx/include/__charconv/traits.h
+++ b/libcxx/include/__charconv/traits.h
@@ -10,6 +10,7 @@
#ifndef _LIBCPP___CHARCONV_TRAITS
#define _LIBCPP___CHARCONV_TRAITS
+#include <__assert>
#include <__bit/countl.h>
#include <__charconv/tables.h>
#include <__charconv/to_chars_base_10.h>
diff --git a/libcxx/include/__config b/libcxx/include/__config
index 0797880cb2f5..8d4d17378b29 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -345,87 +345,6 @@ _LIBCPP_HARDENING_MODE_EXTENSIVE, \
_LIBCPP_HARDENING_MODE_DEBUG
# endif
-// clang-format off
-// Fast hardening mode checks.
-
-# if _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST
-
-// Enabled checks.
-# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message)
-// Disabled checks.
-// On most modern platforms, dereferencing a null pointer does not lead to an actual memory access.
-# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression)
-// Overlapping ranges will make algorithms produce incorrect results but don't directly lead to a security
-// vulnerability.
-# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression)
-
-// Extensive hardening mode checks.
-
-# elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_EXTENSIVE
-
-// Enabled checks.
-# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message)
-// Disabled checks.
-# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression)
-
-// Debug hardening mode checks.
-
-# elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG
-
-// All checks enabled.
-# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSERT(expression, message)
-# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message)
-
-// Disable all checks if hardening is not enabled.
-
-# else
-
-// All checks disabled.
-# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression)
-# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression)
-
-# endif // _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST
-// clang-format on
-
// } HARDENING
# define _LIBCPP_TOSTRING2(x) #x
@@ -460,6 +379,11 @@ _LIBCPP_HARDENING_MODE_DEBUG
# define __has_constexpr_builtin(x) 0
# endif
+// This checks wheter a Clang module is built
+# ifndef __building_module
+# define __building_module(...) 0
+# endif
+
// '__is_identifier' returns '0' if '__x' is a reserved identifier provided by
// the compiler and '1' otherwise.
# ifndef __is_identifier
@@ -1200,9 +1124,6 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c
# ifndef _LIBCPP_ATOMIC_FLAG_TYPE
# define _LIBCPP_ATOMIC_FLAG_TYPE bool
# endif
-# ifdef _LIBCPP_FREESTANDING
-# define _LIBCPP_ATOMIC_ONLY_USE_BUILTINS
-# endif
# endif
# if defined(__FreeBSD__) && defined(__clang__) && __has_attribute(__no_thread_safety_analysis__)
diff --git a/libcxx/include/__format/formatter_floating_point.h b/libcxx/include/__format/formatter_floating_point.h
index 6802a8b7bd4c..f01d323efff5 100644
--- a/libcxx/include/__format/formatter_floating_point.h
+++ b/libcxx/include/__format/formatter_floating_point.h
@@ -16,6 +16,7 @@
#include <__algorithm/min.h>
#include <__algorithm/rotate.h>
#include <__algorithm/transform.h>
+#include <__assert>
#include <__charconv/chars_format.h>
#include <__charconv/to_chars_floating_point.h>
#include <__charconv/to_chars_result.h>
diff --git a/libcxx/include/__numeric/saturation_arithmetic.h b/libcxx/include/__numeric/saturation_arithmetic.h
index 0e6f455cf228..41596a0c58e2 100644
--- a/libcxx/include/__numeric/saturation_arithmetic.h
+++ b/libcxx/include/__numeric/saturation_arithmetic.h
@@ -10,6 +10,7 @@
#ifndef _LIBCPP___NUMERIC_SATURATION_ARITHMETIC_H
#define _LIBCPP___NUMERIC_SATURATION_ARITHMETIC_H
+#include <__assert>
#include <__concepts/arithmetic.h>
#include <__config>
#include <__utility/cmp.h>
diff --git a/libcxx/include/__random/negative_binomial_distribution.h b/libcxx/include/__random/negative_binomial_distribution.h
index eed4f511e871..6d0055d01ed4 100644
--- a/libcxx/include/__random/negative_binomial_distribution.h
+++ b/libcxx/include/__random/negative_binomial_distribution.h
@@ -9,6 +9,7 @@
#ifndef _LIBCPP___RANDOM_NEGATIVE_BINOMIAL_DISTRIBUTION_H
#define _LIBCPP___RANDOM_NEGATIVE_BINOMIAL_DISTRIBUTION_H
+#include <__assert>
#include <__config>
#include <__random/bernoulli_distribution.h>
#include <__random/gamma_distribution.h>
diff --git a/libcxx/include/__ranges/repeat_view.h b/libcxx/include/__ranges/repeat_view.h
index d08f0e0d4e9f..620a26454972 100644
--- a/libcxx/include/__ranges/repeat_view.h
+++ b/libcxx/include/__ranges/repeat_view.h
@@ -10,6 +10,7 @@
#ifndef _LIBCPP___RANGES_REPEAT_VIEW_H
#define _LIBCPP___RANGES_REPEAT_VIEW_H
+#include <__assert>
#include <__concepts/constructible.h>
#include <__concepts/same_as.h>
#include <__concepts/semiregular.h>
diff --git a/libcxx/include/__stop_token/stop_state.h b/libcxx/include/__stop_token/stop_state.h
index 462aa73952b8..df07573f8786 100644
--- a/libcxx/include/__stop_token/stop_state.h
+++ b/libcxx/include/__stop_token/stop_state.h
@@ -10,6 +10,7 @@
#ifndef _LIBCPP___STOP_TOKEN_STOP_STATE_H
#define _LIBCPP___STOP_TOKEN_STOP_STATE_H
+#include <__assert>
#include <__availability>
#include <__config>
#include <__stop_token/atomic_unique_lock.h>
diff --git a/libcxx/include/__string/char_traits.h b/libcxx/include/__string/char_traits.h
index 8ea9625d0718..5880d3a22db2 100644
--- a/libcxx/include/__string/char_traits.h
+++ b/libcxx/include/__string/char_traits.h
@@ -14,6 +14,7 @@
#include <__algorithm/find_end.h>
#include <__algorithm/find_first_of.h>
#include <__algorithm/min.h>
+#include <__assert>
#include <__compare/ordering.h>
#include <__config>
#include <__functional/hash.h>
diff --git a/libcxx/include/__thread/support/pthread.h b/libcxx/include/__thread/support/pthread.h
index d0b8367e448f..d8e3f938ddf6 100644
--- a/libcxx/include/__thread/support/pthread.h
+++ b/libcxx/include/__thread/support/pthread.h
@@ -30,7 +30,10 @@
// so libc++'s <math.h> usually absorbs atomic_wide_counter.h into the
// module with <math.h> and makes atomic_wide_counter.h invisible.
// Include <math.h> here to work around that.
-#include <math.h>
+// This checks wheter a Clang module is built
+#if __building_module(std)
+# include <math.h>
+#endif
#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER
# pragma GCC system_header
diff --git a/libcxx/include/__utility/integer_sequence.h b/libcxx/include/__utility/integer_sequence.h
index e63f3f265b7d..ccce9433e7a8 100644
--- a/libcxx/include/__utility/integer_sequence.h
+++ b/libcxx/include/__utility/integer_sequence.h
@@ -31,65 +31,16 @@ struct __integer_sequence {
using __to_tuple_indices = __tuple_indices<(_Values + _Sp)...>;
};
-#if !__has_builtin(__make_integer_seq) || defined(_LIBCPP_TESTING_FALLBACK_MAKE_INTEGER_SEQUENCE)
-
-namespace __detail {
-
-template <typename _Tp, size_t... _Extra>
-struct __repeat;
-template <typename _Tp, _Tp... _Np, size_t... _Extra>
-struct __repeat<__integer_sequence<_Tp, _Np...>, _Extra...> {
- typedef _LIBCPP_NODEBUG __integer_sequence<
- _Tp,
- _Np...,
- sizeof...(_Np) + _Np...,
- 2 * sizeof...(_Np) + _Np...,
- 3 * sizeof...(_Np) + _Np...,
- 4 * sizeof...(_Np) + _Np...,
- 5 * sizeof...(_Np) + _Np...,
- 6 * sizeof...(_Np) + _Np...,
- 7 * sizeof...(_Np) + _Np...,
- _Extra...>
- type;
-};
-
-template <size_t _Np>
-struct __parity;
-template <size_t _Np>
-struct __make : __parity<_Np % 8>::template __pmake<_Np> {};
-
-// clang-format off
-template<> struct __make<0> { typedef __integer_sequence<size_t> type; };
-template<> struct __make<1> { typedef __integer_sequence<size_t, 0> type; };
-template<> struct __make<2> { typedef __integer_sequence<size_t, 0, 1> type; };
-template<> struct __make<3> { typedef __integer_sequence<size_t, 0, 1, 2> type; };
-template<> struct __make<4> { typedef __integer_sequence<size_t, 0, 1, 2, 3> type; };
-template<> struct __make<5> { typedef __integer_sequence<size_t, 0, 1, 2, 3, 4> type; };
-template<> struct __make<6> { typedef __integer_sequence<size_t, 0, 1, 2, 3, 4, 5> type; };
-template<> struct __make<7> { typedef __integer_sequence<size_t, 0, 1, 2, 3, 4, 5, 6> type; };
-
-template<> struct __parity<0> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type> {}; };
-template<> struct __parity<1> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 1> {}; };
-template<> struct __parity<2> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 2, _Np - 1> {}; };
-template<> struct __parity<3> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 3, _Np - 2, _Np - 1> {}; };
-template<> struct __parity<4> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 4, _Np - 3, _Np - 2, _Np - 1> {}; };
-template<> struct __parity<5> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 5, _Np - 4, _Np - 3, _Np - 2, _Np - 1> {}; };
-template<> struct __parity<6> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 6, _Np - 5, _Np - 4, _Np - 3, _Np - 2, _Np - 1> {}; };
-template<> struct __parity<7> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 7, _Np - 6, _Np - 5, _Np - 4, _Np - 3, _Np - 2, _Np - 1> {}; };
-// clang-format on
-
-} // namespace __detail
-
-#endif
-
#if __has_builtin(__make_integer_seq)
template <size_t _Ep, size_t _Sp>
using __make_indices_imp =
typename __make_integer_seq<__integer_sequence, size_t, _Ep - _Sp>::template __to_tuple_indices<_Sp>;
-#else
+#elif __has_builtin(__integer_pack)
template <size_t _Ep, size_t _Sp>
-using __make_indices_imp = typename __detail::__make<_Ep - _Sp>::type::template __to_tuple_indices<_Sp>;
-
+using __make_indices_imp =
+ typename __integer_sequence<size_t, __integer_pack(_Ep - _Sp)...>::template __to_tuple_indices<_Sp>;
+#else
+# error "No known way to get an integer pack from the compiler"
#endif
#if _LIBCPP_STD_VER >= 14
@@ -104,34 +55,20 @@ struct _LIBCPP_TEMPLATE_VIS integer_sequence {
template <size_t... _Ip>
using index_sequence = integer_sequence<size_t, _Ip...>;
-# if __has_builtin(__make_integer_seq) && !defined(_LIBCPP_TESTING_FALLBACK_MAKE_INTEGER_SEQUENCE)
+# if __has_builtin(__make_integer_seq)
template <class _Tp, _Tp _Ep>
-using __make_integer_sequence _LIBCPP_NODEBUG = __make_integer_seq<integer_sequence, _Tp, _Ep>;
-
-# else
+using make_integer_sequence _LIBCPP_NODEBUG = __make_integer_seq<integer_sequence, _Tp, _Ep>;
-template <typename _Tp, _Tp _Np>
-using __make_integer_sequence_unchecked _LIBCPP_NODEBUG =
- typename __detail::__make<_Np>::type::template __convert<integer_sequence, _Tp>;
+# elif __has_builtin(__integer_pack)
-template <class _Tp, _Tp _Ep>
-struct __make_integer_sequence_checked {
- static_assert(is_integral<_Tp>::value, "std::make_integer_sequence can only be instantiated with an integral type");
- static_assert(0 <= _Ep, "std::make_integer_sequence must have a non-negative sequence length");
- // Workaround GCC bug by preventing bad installations when 0 <= _Ep
- // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68929
- typedef _LIBCPP_NODEBUG __make_integer_sequence_unchecked<_Tp, 0 <= _Ep ? _Ep : 0> type;
-};
-
-template <class _Tp, _Tp _Ep>
-using __make_integer_sequence _LIBCPP_NODEBUG = typename __make_integer_sequence_checked<_Tp, _Ep>::type;
+template <class _Tp, _Tp _SequenceSize>
+using make_integer_sequence _LIBCPP_NODEBUG = integer_sequence<_Tp, __integer_pack(_SequenceSize)...>;
+# else
+# error "No known way to get an integer pack from the compiler"
# endif
-template <class _Tp, _Tp _Np>
-using make_integer_sequence = __make_integer_sequence<_Tp, _Np>;
-
template <size_t _Np>
using make_index_sequence = make_integer_sequence<size_t, _Np>;
diff --git a/libcxx/include/algorithm b/libcxx/include/algorithm
index 70e30bc87e81..0f62de7fa83f 100644
--- a/libcxx/include/algorithm
+++ b/libcxx/include/algorithm
@@ -1793,7 +1793,6 @@ template <class BidirectionalIterator, class Compare>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <version>
diff --git a/libcxx/include/any b/libcxx/include/any
index 378dfb6e21b5..ce54803cd91b 100644
--- a/libcxx/include/any
+++ b/libcxx/include/any
@@ -80,7 +80,6 @@ namespace std {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__config>
#include <__memory/allocator.h>
diff --git a/libcxx/include/array b/libcxx/include/array
index 41f016a4859a..961b620efb93 100644
--- a/libcxx/include/array
+++ b/libcxx/include/array
@@ -116,7 +116,7 @@ template <size_t I, class T, size_t N> const T&& get(const array<T, N>&&) noexce
#include <__algorithm/lexicographical_compare.h>
#include <__algorithm/lexicographical_compare_three_way.h>
#include <__algorithm/swap_ranges.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__config>
#include <__fwd/array.h>
#include <__iterator/reverse_iterator.h>
diff --git a/libcxx/include/atomic b/libcxx/include/atomic
index 2e8f5b521a55..61ff61d415dd 100644
--- a/libcxx/include/atomic
+++ b/libcxx/include/atomic
@@ -587,7 +587,6 @@ template <class T>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__atomic/aliases.h>
#include <__atomic/atomic.h>
#include <__atomic/atomic_base.h>
@@ -620,6 +619,7 @@ template <class T>
#if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 20
# include <cmath>
# include <compare>
+# include <cstdlib>
# include <cstring>
# include <type_traits>
#endif
diff --git a/libcxx/include/barrier b/libcxx/include/barrier
index f91452c8d006..c5fd84b91925 100644
--- a/libcxx/include/barrier
+++ b/libcxx/include/barrier
@@ -51,7 +51,7 @@ namespace std
# error "<barrier> is not supported since libc++ has been configured without support for threads."
#endif
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__atomic/atomic_base.h>
#include <__atomic/memory_order.h>
#include <__availability>
diff --git a/libcxx/include/bit b/libcxx/include/bit
index 84e2080377e4..b8e4bdc2dfe2 100644
--- a/libcxx/include/bit
+++ b/libcxx/include/bit
@@ -61,7 +61,6 @@ namespace std {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__bit/bit_cast.h>
#include <__bit/bit_ceil.h>
#include <__bit/bit_floor.h>
diff --git a/libcxx/include/bitset b/libcxx/include/bitset
index 95f7a63b2317..8818ab6563b5 100644
--- a/libcxx/include/bitset
+++ b/libcxx/include/bitset
@@ -129,7 +129,6 @@ template <size_t N> struct hash<std::bitset<N>>;
#include <__algorithm/count.h>
#include <__algorithm/fill.h>
#include <__algorithm/find.h>
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__bit_reference>
#include <__config>
#include <__functional/hash.h>
diff --git a/libcxx/include/cassert b/libcxx/include/cassert
index 761f57dee1db..6fec37dc6376 100644
--- a/libcxx/include/cassert
+++ b/libcxx/include/cassert
@@ -16,7 +16,6 @@ Macros:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
// <assert.h> is not provided by libc++
diff --git a/libcxx/include/ccomplex b/libcxx/include/ccomplex
index cf05c7a91081..94d2c8d7d003 100644
--- a/libcxx/include/ccomplex
+++ b/libcxx/include/ccomplex
@@ -17,7 +17,6 @@
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <complex>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/libcxx/include/cctype b/libcxx/include/cctype
index 32be6f38e5f8..d7af7e084aa2 100644
--- a/libcxx/include/cctype
+++ b/libcxx/include/cctype
@@ -34,7 +34,6 @@ int toupper(int c);
} // std
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <ctype.h>
diff --git a/libcxx/include/cerrno b/libcxx/include/cerrno
index 937ec23c6971..d488fa72a54b 100644
--- a/libcxx/include/cerrno
+++ b/libcxx/include/cerrno
@@ -22,7 +22,6 @@ Macros:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <errno.h>
diff --git a/libcxx/include/cfenv b/libcxx/include/cfenv
index 16b3761ee27b..f8cacd562f76 100644
--- a/libcxx/include/cfenv
+++ b/libcxx/include/cfenv
@@ -52,7 +52,6 @@ int feupdateenv(const fenv_t* envp);
} // std
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <fenv.h>
diff --git a/libcxx/include/cfloat b/libcxx/include/cfloat
index 4f991dd49ff4..5d1b38c557dc 100644
--- a/libcxx/include/cfloat
+++ b/libcxx/include/cfloat
@@ -69,7 +69,6 @@ Macros:
LDBL_TRUE_MIN // C11
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <float.h>
diff --git a/libcxx/include/charconv b/libcxx/include/charconv
index 5a2869acba87..5bc7b9011be0 100644
--- a/libcxx/include/charconv
+++ b/libcxx/include/charconv
@@ -69,7 +69,6 @@ namespace std {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__charconv/chars_format.h>
#include <__charconv/from_chars_integral.h>
#include <__charconv/from_chars_result.h>
diff --git a/libcxx/include/chrono b/libcxx/include/chrono
index fe73f7c772b9..b3b260c2a998 100644
--- a/libcxx/include/chrono
+++ b/libcxx/include/chrono
@@ -825,7 +825,6 @@ constexpr chrono::year operator ""y(unsigned lo
// clang-format on
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__chrono/calendar.h>
#include <__chrono/convert_to_timespec.h>
#include <__chrono/convert_to_tm.h>
diff --git a/libcxx/include/cinttypes b/libcxx/include/cinttypes
index a5b9558abde1..52663a4f35fa 100644
--- a/libcxx/include/cinttypes
+++ b/libcxx/include/cinttypes
@@ -234,7 +234,6 @@ uintmax_t wcstoumax(const wchar_t* restrict nptr, wchar_t** restrict endptr, int
} // std
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
// standard-mandated includes
diff --git a/libcxx/include/ciso646 b/libcxx/include/ciso646
index e0cd722495ed..1d859f08fac5 100644
--- a/libcxx/include/ciso646
+++ b/libcxx/include/ciso646
@@ -15,7 +15,6 @@
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/libcxx/include/climits b/libcxx/include/climits
index 2e8993e4d6a5..bcd8b4a56a07 100644
--- a/libcxx/include/climits
+++ b/libcxx/include/climits
@@ -37,7 +37,6 @@ Macros:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <limits.h>
diff --git a/libcxx/include/clocale b/libcxx/include/clocale
index e2ace355d7b6..c689a64be288 100644
--- a/libcxx/include/clocale
+++ b/libcxx/include/clocale
@@ -34,7 +34,6 @@ lconv* localeconv();
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <locale.h>
diff --git a/libcxx/include/cmath b/libcxx/include/cmath
index 798ddb4963b0..dd194bbb5589 100644
--- a/libcxx/include/cmath
+++ b/libcxx/include/cmath
@@ -304,7 +304,6 @@ constexpr long double lerp(long double a, long double b, long double t) noexcept
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__type_traits/enable_if.h>
#include <__type_traits/is_arithmetic.h>
diff --git a/libcxx/include/codecvt b/libcxx/include/codecvt
index 504dd71f3004..b7182ff47155 100644
--- a/libcxx/include/codecvt
+++ b/libcxx/include/codecvt
@@ -54,7 +54,6 @@ class codecvt_utf8_utf16
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__locale>
#include <version>
diff --git a/libcxx/include/compare b/libcxx/include/compare
index cc0cae8a544d..93953254b784 100644
--- a/libcxx/include/compare
+++ b/libcxx/include/compare
@@ -140,7 +140,6 @@ namespace std {
}
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__compare/common_comparison_category.h>
#include <__compare/compare_partial_order_fallback.h>
#include <__compare/compare_strong_order_fallback.h>
diff --git a/libcxx/include/complex b/libcxx/include/complex
index 0aba60e514ba..e996485a38ae 100644
--- a/libcxx/include/complex
+++ b/libcxx/include/complex
@@ -256,7 +256,6 @@ template<class T> complex<T> tanh (const complex<T>&);
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__fwd/complex.h>
#include <__tuple/tuple_element.h>
diff --git a/libcxx/include/concepts b/libcxx/include/concepts
index 5fdf30ecfbd3..e10f5ab5ad8a 100644
--- a/libcxx/include/concepts
+++ b/libcxx/include/concepts
@@ -129,7 +129,6 @@ namespace std {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__concepts/arithmetic.h>
#include <__concepts/assignable.h>
#include <__concepts/boolean_testable.h>
diff --git a/libcxx/include/condition_variable b/libcxx/include/condition_variable
index 6aac3c13ef4a..4ded1140d46b 100644
--- a/libcxx/include/condition_variable
+++ b/libcxx/include/condition_variable
@@ -118,7 +118,6 @@ public:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__chrono/duration.h>
#include <__chrono/steady_clock.h>
diff --git a/libcxx/include/coroutine b/libcxx/include/coroutine
index f264570128bb..4bd1d4e9c310 100644
--- a/libcxx/include/coroutine
+++ b/libcxx/include/coroutine
@@ -38,7 +38,6 @@ struct suspend_always;
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__coroutine/coroutine_handle.h>
#include <__coroutine/coroutine_traits.h>
diff --git a/libcxx/include/csetjmp b/libcxx/include/csetjmp
index 9012cad22ebe..7ba90068710a 100644
--- a/libcxx/include/csetjmp
+++ b/libcxx/include/csetjmp
@@ -30,7 +30,6 @@ void longjmp(jmp_buf env, int val);
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
// <setjmp.h> is not provided by libc++
diff --git a/libcxx/include/csignal b/libcxx/include/csignal
index cf45f507535e..804a7f95ae96 100644
--- a/libcxx/include/csignal
+++ b/libcxx/include/csignal
@@ -39,7 +39,6 @@ int raise(int sig);
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
// <signal.h> is not provided by libc++
diff --git a/libcxx/include/cstdarg b/libcxx/include/cstdarg
index 3a4291f4584a..4642eb7b5258 100644
--- a/libcxx/include/cstdarg
+++ b/libcxx/include/cstdarg
@@ -31,7 +31,6 @@ Types:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
// <stdarg.h> is not provided by libc++
diff --git a/libcxx/include/cstdbool b/libcxx/include/cstdbool
index ce608033a22c..ef731c021a4a 100644
--- a/libcxx/include/cstdbool
+++ b/libcxx/include/cstdbool
@@ -19,7 +19,6 @@ Macros:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/libcxx/include/cstddef b/libcxx/include/cstddef
index 1d7bac24c81e..ed16ae44fb2b 100644
--- a/libcxx/include/cstddef
+++ b/libcxx/include/cstddef
@@ -33,7 +33,6 @@ Types:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__type_traits/enable_if.h>
#include <__type_traits/integral_constant.h>
diff --git a/libcxx/include/cstdint b/libcxx/include/cstdint
index 829d9398f387..8c4782859426 100644
--- a/libcxx/include/cstdint
+++ b/libcxx/include/cstdint
@@ -140,7 +140,6 @@ Types:
} // std
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <stdint.h>
diff --git a/libcxx/include/cstdio b/libcxx/include/cstdio
index 0a867cec1a38..7f94371081f8 100644
--- a/libcxx/include/cstdio
+++ b/libcxx/include/cstdio
@@ -95,7 +95,6 @@ void perror(const char* s);
} // std
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <stdio.h>
diff --git a/libcxx/include/cstdlib b/libcxx/include/cstdlib
index 9bf0ea3f73b1..c817fd8f4acc 100644
--- a/libcxx/include/cstdlib
+++ b/libcxx/include/cstdlib
@@ -81,7 +81,6 @@ void *aligned_alloc(size_t alignment, size_t size); // C11
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <stdlib.h>
diff --git a/libcxx/include/cstring b/libcxx/include/cstring
index a9bdf4ff2dfc..c2c92b02e73c 100644
--- a/libcxx/include/cstring
+++ b/libcxx/include/cstring
@@ -56,7 +56,6 @@ size_t strlen(const char* s);
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__type_traits/is_constant_evaluated.h>
diff --git a/libcxx/include/ctgmath b/libcxx/include/ctgmath
index bfcf2f98d470..6237979be490 100644
--- a/libcxx/include/ctgmath
+++ b/libcxx/include/ctgmath
@@ -18,7 +18,6 @@
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <ccomplex>
#include <cmath>
diff --git a/libcxx/include/ctime b/libcxx/include/ctime
index b61e19d6446d..f47b49a43e23 100644
--- a/libcxx/include/ctime
+++ b/libcxx/include/ctime
@@ -45,7 +45,6 @@ int timespec_get( struct timespec *ts, int base); // C++17
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
// <time.h> is not provided by libc++
diff --git a/libcxx/include/cuchar b/libcxx/include/cuchar
index 03b8c7d2a88b..f0015be27536 100644
--- a/libcxx/include/cuchar
+++ b/libcxx/include/cuchar
@@ -36,7 +36,6 @@ size_t c32rtomb(char* s, char32_t c32, mbstate_t* ps);
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <uchar.h>
diff --git a/libcxx/include/cwchar b/libcxx/include/cwchar
index 122af242880e..7442438d8f44 100644
--- a/libcxx/include/cwchar
+++ b/libcxx/include/cwchar
@@ -102,7 +102,6 @@ size_t wcsrtombs(char* restrict dst, const wchar_t** restrict src, size_t len,
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__type_traits/apply_cv.h>
#include <__type_traits/is_constant_evaluated.h>
diff --git a/libcxx/include/cwctype b/libcxx/include/cwctype
index 5a2d2427d847..04abfabef579 100644
--- a/libcxx/include/cwctype
+++ b/libcxx/include/cwctype
@@ -49,7 +49,6 @@ wctrans_t wctrans(const char* property);
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <cctype>
diff --git a/libcxx/include/deque b/libcxx/include/deque
index c539a06bdd95..85ea9c6f661e 100644
--- a/libcxx/include/deque
+++ b/libcxx/include/deque
@@ -188,7 +188,7 @@ template <class T, class Allocator, class Predicate>
#include <__algorithm/remove.h>
#include <__algorithm/remove_if.h>
#include <__algorithm/unwrap_iter.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__config>
#include <__format/enable_insertable.h>
diff --git a/libcxx/include/exception b/libcxx/include/exception
index 97fee977690d..5eff8e3f8a4b 100644
--- a/libcxx/include/exception
+++ b/libcxx/include/exception
@@ -76,7 +76,6 @@ template <class E> void rethrow_if_nested(const E& e);
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__exception/exception.h>
#include <__exception/exception_ptr.h>
diff --git a/libcxx/include/execution b/libcxx/include/execution
index 56facc87379e..822ffa1fd3eb 100644
--- a/libcxx/include/execution
+++ b/libcxx/include/execution
@@ -32,7 +32,6 @@ namespace std {
}
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__type_traits/is_execution_policy.h>
#include <__type_traits/is_same.h>
diff --git a/libcxx/include/expected b/libcxx/include/expected
index 44d0ce6b00c8..f455ab7d5d61 100644
--- a/libcxx/include/expected
+++ b/libcxx/include/expected
@@ -38,7 +38,6 @@ namespace std {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__expected/bad_expected_access.h>
#include <__expected/expected.h>
diff --git a/libcxx/include/experimental/__simd/scalar.h b/libcxx/include/experimental/__simd/scalar.h
index 717fd6cd92d7..aff2cd11cfcf 100644
--- a/libcxx/include/experimental/__simd/scalar.h
+++ b/libcxx/include/experimental/__simd/scalar.h
@@ -10,6 +10,7 @@
#ifndef _LIBCPP_EXPERIMENTAL___SIMD_SCALAR_H
#define _LIBCPP_EXPERIMENTAL___SIMD_SCALAR_H
+#include <__assert>
#include <cstddef>
#include <experimental/__config>
#include <experimental/__simd/declaration.h>
diff --git a/libcxx/include/experimental/__simd/vec_ext.h b/libcxx/include/experimental/__simd/vec_ext.h
index 7883132ba6c0..c9423df93cfa 100644
--- a/libcxx/include/experimental/__simd/vec_ext.h
+++ b/libcxx/include/experimental/__simd/vec_ext.h
@@ -10,6 +10,7 @@
#ifndef _LIBCPP_EXPERIMENTAL___SIMD_VEC_EXT_H
#define _LIBCPP_EXPERIMENTAL___SIMD_VEC_EXT_H
+#include <__assert>
#include <__bit/bit_ceil.h>
#include <__utility/forward.h>
#include <__utility/integer_sequence.h>
diff --git a/libcxx/include/experimental/iterator b/libcxx/include/experimental/iterator
index e9c1fb6924ec..de82da2d3d72 100644
--- a/libcxx/include/experimental/iterator
+++ b/libcxx/include/experimental/iterator
@@ -52,7 +52,6 @@ namespace std {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__memory/addressof.h>
#include <__type_traits/decay.h>
#include <__utility/forward.h>
diff --git a/libcxx/include/experimental/propagate_const b/libcxx/include/experimental/propagate_const
index 06d7ba43daf1..8c2ceb9def33 100644
--- a/libcxx/include/experimental/propagate_const
+++ b/libcxx/include/experimental/propagate_const
@@ -107,7 +107,6 @@
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__functional/operations.h>
#include <__fwd/hash.h>
#include <__type_traits/conditional.h>
diff --git a/libcxx/include/experimental/simd b/libcxx/include/experimental/simd
index adca9faa47bb..fad6431d13a1 100644
--- a/libcxx/include/experimental/simd
+++ b/libcxx/include/experimental/simd
@@ -71,8 +71,6 @@ inline namespace parallelism_v2 {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
-
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
diff --git a/libcxx/include/experimental/type_traits b/libcxx/include/experimental/type_traits
index 62f9574ec58f..37be434f8edd 100644
--- a/libcxx/include/experimental/type_traits
+++ b/libcxx/include/experimental/type_traits
@@ -68,7 +68,6 @@ inline namespace fundamentals_v1 {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <experimental/__config>
#if _LIBCPP_STD_VER >= 14
diff --git a/libcxx/include/experimental/utility b/libcxx/include/experimental/utility
index c1bd9364fd51..8bd0a055b778 100644
--- a/libcxx/include/experimental/utility
+++ b/libcxx/include/experimental/utility
@@ -30,7 +30,6 @@ inline namespace fundamentals_v1 {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <experimental/__config>
#include <utility>
diff --git a/libcxx/include/ext/hash_map b/libcxx/include/ext/hash_map
index 7ac268d5dcbd..7b5b31c40817 100644
--- a/libcxx/include/ext/hash_map
+++ b/libcxx/include/ext/hash_map
@@ -201,7 +201,6 @@ template <class Key, class T, class Hash, class Pred, class Alloc>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__hash_table>
#include <algorithm>
diff --git a/libcxx/include/ext/hash_set b/libcxx/include/ext/hash_set
index 79f0925f6f4c..1ab259b59979 100644
--- a/libcxx/include/ext/hash_set
+++ b/libcxx/include/ext/hash_set
@@ -192,7 +192,6 @@ template <class Value, class Hash, class Pred, class Alloc>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__hash_table>
#include <algorithm>
diff --git a/libcxx/include/filesystem b/libcxx/include/filesystem
index ec68354a9fc9..b344ed468082 100644
--- a/libcxx/include/filesystem
+++ b/libcxx/include/filesystem
@@ -533,7 +533,6 @@ inline constexpr bool std::ranges::enable_view<std::filesystem::recursive_direct
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__filesystem/copy_options.h>
#include <__filesystem/directory_entry.h>
diff --git a/libcxx/include/format b/libcxx/include/format
index 64f6ba1d2528..b2fe0053b974 100644
--- a/libcxx/include/format
+++ b/libcxx/include/format
@@ -188,7 +188,6 @@ namespace std {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__format/buffer.h>
#include <__format/concepts.h>
diff --git a/libcxx/include/forward_list b/libcxx/include/forward_list
index ffa390f42a10..a62b171a4678 100644
--- a/libcxx/include/forward_list
+++ b/libcxx/include/forward_list
@@ -199,7 +199,6 @@ template <class T, class Allocator, class Predicate>
#include <__algorithm/lexicographical_compare.h>
#include <__algorithm/lexicographical_compare_three_way.h>
#include <__algorithm/min.h>
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__config>
#include <__iterator/distance.h>
diff --git a/libcxx/include/fstream b/libcxx/include/fstream
index 203cc6dfb4b1..513c8dc2b127 100644
--- a/libcxx/include/fstream
+++ b/libcxx/include/fstream
@@ -187,7 +187,7 @@ typedef basic_fstream<wchar_t> wfstream;
*/
#include <__algorithm/max.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__config>
#include <__fwd/fstream.h>
diff --git a/libcxx/include/functional b/libcxx/include/functional
index fd99e11fb181..a2774a48bda0 100644
--- a/libcxx/include/functional
+++ b/libcxx/include/functional
@@ -513,7 +513,6 @@ POLICY: For non-variadic implementations, the number of arguments is limited
*/
#include <__algorithm/search.h>
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__compare/compare_three_way.h>
#include <__config>
#include <__functional/binary_function.h>
diff --git a/libcxx/include/future b/libcxx/include/future
index 13828680f033..fda1591818a6 100644
--- a/libcxx/include/future
+++ b/libcxx/include/future
@@ -368,7 +368,7 @@ template <class R, class Alloc> struct uses_allocator<packaged_task<R>, Alloc>;
# error "<future> is not supported since libc++ has been configured without support for threads."
#endif
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__chrono/duration.h>
#include <__chrono/time_point.h>
diff --git a/libcxx/include/initializer_list b/libcxx/include/initializer_list
index 4c2a7925a57b..680ca1cd20d5 100644
--- a/libcxx/include/initializer_list
+++ b/libcxx/include/initializer_list
@@ -42,7 +42,6 @@ template<class E> const E* end(initializer_list<E> il) noexcept; // constexpr in
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <cstddef>
diff --git a/libcxx/include/iomanip b/libcxx/include/iomanip
index 867408affd22..fb4f15b9a585 100644
--- a/libcxx/include/iomanip
+++ b/libcxx/include/iomanip
@@ -42,7 +42,6 @@ template <class charT, class traits, class Allocator>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <istream>
#include <version>
diff --git a/libcxx/include/ios b/libcxx/include/ios
index 8465860d08dc..4b1306fc2ad8 100644
--- a/libcxx/include/ios
+++ b/libcxx/include/ios
@@ -217,7 +217,6 @@ storage-class-specifier const error_category& iostream_category() noexcept;
# error "The iostreams library is not supported since libc++ has been configured without support for localization."
#endif
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__fwd/ios.h>
#include <__ios/fpos.h>
#include <__locale>
diff --git a/libcxx/include/iosfwd b/libcxx/include/iosfwd
index e28998d00415..1579fa12754d 100644
--- a/libcxx/include/iosfwd
+++ b/libcxx/include/iosfwd
@@ -106,7 +106,6 @@ using wosyncstream = basic_osyncstream<wchar_t>; // C++20
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__fwd/fstream.h>
#include <__fwd/ios.h>
diff --git a/libcxx/include/iostream b/libcxx/include/iostream
index 568ce8caed6e..5df45c6d3f78 100644
--- a/libcxx/include/iostream
+++ b/libcxx/include/iostream
@@ -33,7 +33,6 @@ extern wostream wclog;
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <version>
diff --git a/libcxx/include/istream b/libcxx/include/istream
index 7975a9e599a5..3f20c355046c 100644
--- a/libcxx/include/istream
+++ b/libcxx/include/istream
@@ -158,7 +158,6 @@ template <class Stream, class T>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__fwd/istream.h>
#include <__iterator/istreambuf_iterator.h>
diff --git a/libcxx/include/iterator b/libcxx/include/iterator
index 2f9280742370..5779bf828711 100644
--- a/libcxx/include/iterator
+++ b/libcxx/include/iterator
@@ -674,7 +674,6 @@ template <class E> constexpr const E* data(initializer_list<E> il) noexcept;
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__iterator/access.h>
#include <__iterator/advance.h>
diff --git a/libcxx/include/latch b/libcxx/include/latch
index dd389d296f5c..3fe201b63d13 100644
--- a/libcxx/include/latch
+++ b/libcxx/include/latch
@@ -46,7 +46,7 @@ namespace std
# error "<latch> is not supported since libc++ has been configured without support for threads."
#endif
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__atomic/atomic_base.h>
#include <__atomic/atomic_sync.h>
#include <__atomic/memory_order.h>
diff --git a/libcxx/include/libcxx.imp b/libcxx/include/libcxx.imp
index 22fbea99b848..eeeae39ca101 100644
--- a/libcxx/include/libcxx.imp
+++ b/libcxx/include/libcxx.imp
@@ -425,17 +425,17 @@
{ include: [ "<__fwd/bit_reference.h>", "private", "<bitset>", "public" ] },
{ include: [ "<__fwd/bit_reference.h>", "private", "<vector>", "public" ] },
{ include: [ "<__fwd/complex.h>", "private", "<complex>", "public" ] },
- { include: [ "<__fwd/fstream.h>", "private", "<fstream>", "public" ] },
+ { include: [ "<__fwd/fstream.h>", "private", "<iosfwd>", "public" ] },
{ include: [ "<__fwd/hash.h>", "private", "<functional>", "public" ] },
- { include: [ "<__fwd/ios.h>", "private", "<ios>", "public" ] },
- { include: [ "<__fwd/istream.h>", "private", "<istream>", "public" ] },
+ { include: [ "<__fwd/ios.h>", "private", "<iosfwd>", "public" ] },
+ { include: [ "<__fwd/istream.h>", "private", "<iosfwd>", "public" ] },
{ include: [ "<__fwd/mdspan.h>", "private", "<mdspan>", "public" ] },
{ include: [ "<__fwd/memory_resource.h>", "private", "<memory_resource>", "public" ] },
- { include: [ "<__fwd/ostream.h>", "private", "<ostream>", "public" ] },
+ { include: [ "<__fwd/ostream.h>", "private", "<iosfwd>", "public" ] },
{ include: [ "<__fwd/pair.h>", "private", "<utility>", "public" ] },
{ include: [ "<__fwd/span.h>", "private", "<span>", "public" ] },
- { include: [ "<__fwd/sstream.h>", "private", "<sstream>", "public" ] },
- { include: [ "<__fwd/streambuf.h>", "private", "<streambuf>", "public" ] },
+ { include: [ "<__fwd/sstream.h>", "private", "<iosfwd>", "public" ] },
+ { include: [ "<__fwd/streambuf.h>", "private", "<iosfwd>", "public" ] },
{ include: [ "<__fwd/string.h>", "private", "<string>", "public" ] },
{ include: [ "<__fwd/string_view.h>", "private", "<string_view>", "public" ] },
{ include: [ "<__fwd/subrange.h>", "private", "<ranges>", "public" ] },
diff --git a/libcxx/include/limits b/libcxx/include/limits
index c704b4dddaf8..f15b5b1ab1d5 100644
--- a/libcxx/include/limits
+++ b/libcxx/include/limits
@@ -102,7 +102,6 @@ template<> class numeric_limits<cv long double>;
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__type_traits/is_arithmetic.h>
#include <__type_traits/is_signed.h>
diff --git a/libcxx/include/list b/libcxx/include/list
index 2705d4c9914d..8f0689268e2a 100644
--- a/libcxx/include/list
+++ b/libcxx/include/list
@@ -202,7 +202,7 @@ template <class T, class Allocator, class Predicate>
#include <__algorithm/lexicographical_compare.h>
#include <__algorithm/lexicographical_compare_three_way.h>
#include <__algorithm/min.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__config>
#include <__format/enable_insertable.h>
diff --git a/libcxx/include/locale b/libcxx/include/locale
index 9e97eb9f3395..e3c63e3abe13 100644
--- a/libcxx/include/locale
+++ b/libcxx/include/locale
@@ -193,7 +193,7 @@ template <class charT> class messages_byname;
#include <__algorithm/max.h>
#include <__algorithm/reverse.h>
#include <__algorithm/unwrap_iter.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__config>
#include <__iterator/access.h>
#include <__iterator/back_insert_iterator.h>
diff --git a/libcxx/include/map b/libcxx/include/map
index a56584589f5c..5b6ec9d3a219 100644
--- a/libcxx/include/map
+++ b/libcxx/include/map
@@ -574,7 +574,7 @@ erase_if(multimap<Key, T, Compare, Allocator>& c, Predicate pred); // C++20
#include <__algorithm/equal.h>
#include <__algorithm/lexicographical_compare.h>
#include <__algorithm/lexicographical_compare_three_way.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__config>
#include <__functional/binary_function.h>
diff --git a/libcxx/include/memory b/libcxx/include/memory
index 0ada7cdfa206..a8c0264eb9eb 100644
--- a/libcxx/include/memory
+++ b/libcxx/include/memory
@@ -917,7 +917,6 @@ template<size_t N, class T>
// clang-format on
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__memory/addressof.h>
#include <__memory/align.h>
diff --git a/libcxx/include/mutex b/libcxx/include/mutex
index e67135fc0ec0..ea56e3051908 100644
--- a/libcxx/include/mutex
+++ b/libcxx/include/mutex
@@ -186,7 +186,6 @@ template<class Callable, class ...Args>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__chrono/steady_clock.h>
#include <__chrono/time_point.h>
#include <__condition_variable/condition_variable.h>
diff --git a/libcxx/include/new b/libcxx/include/new
index 86fbcb524b66..988f7a84422c 100644
--- a/libcxx/include/new
+++ b/libcxx/include/new
@@ -86,13 +86,13 @@ void operator delete[](void* ptr, void*) noexcept;
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__config>
#include <__exception/exception.h>
#include <__type_traits/is_function.h>
#include <__type_traits/is_same.h>
#include <__type_traits/remove_cv.h>
+#include <__verbose_abort>
#include <cstddef>
#include <version>
diff --git a/libcxx/include/numbers b/libcxx/include/numbers
index 0d834c6b863f..f48ba4baf38f 100644
--- a/libcxx/include/numbers
+++ b/libcxx/include/numbers
@@ -58,7 +58,6 @@ namespace std::numbers {
}
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__concepts/arithmetic.h>
#include <__config>
#include <version>
diff --git a/libcxx/include/numeric b/libcxx/include/numeric
index 0fe7115f1c66..8b429fa2f7e7 100644
--- a/libcxx/include/numeric
+++ b/libcxx/include/numeric
@@ -156,7 +156,6 @@ constexpr T saturate_cast(U x) noexcept; // freestanding, Sin
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <version>
diff --git a/libcxx/include/optional b/libcxx/include/optional
index 73da0a8a5a7c..9e4f0fff2f4a 100644
--- a/libcxx/include/optional
+++ b/libcxx/include/optional
@@ -177,7 +177,7 @@ namespace std {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__compare/compare_three_way_result.h>
#include <__compare/three_way_comparable.h>
diff --git a/libcxx/include/ostream b/libcxx/include/ostream
index 2e2607340a5d..42819ceb252c 100644
--- a/libcxx/include/ostream
+++ b/libcxx/include/ostream
@@ -171,7 +171,6 @@ void vprint_nonunicode(ostream& os, string_view fmt, format_args args);
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__config>
#include <__exception/operations.h>
diff --git a/libcxx/include/print b/libcxx/include/print
index 543a540ee4f2..a9f10433a7dc 100644
--- a/libcxx/include/print
+++ b/libcxx/include/print
@@ -31,7 +31,7 @@ namespace std {
}
*/
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__concepts/same_as.h>
#include <__config>
diff --git a/libcxx/include/queue b/libcxx/include/queue
index 2263f71fde90..521a465713cd 100644
--- a/libcxx/include/queue
+++ b/libcxx/include/queue
@@ -258,7 +258,6 @@ template <class T, class Container, class Compare>
#include <__algorithm/pop_heap.h>
#include <__algorithm/push_heap.h>
#include <__algorithm/ranges_copy.h>
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__functional/operations.h>
#include <__iterator/back_insert_iterator.h>
diff --git a/libcxx/include/random b/libcxx/include/random
index 02d71ad6dd25..9edd6c4608ec 100644
--- a/libcxx/include/random
+++ b/libcxx/include/random
@@ -1677,7 +1677,6 @@ class piecewise_linear_distribution
} // std
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__random/bernoulli_distribution.h>
#include <__random/binomial_distribution.h>
diff --git a/libcxx/include/ranges b/libcxx/include/ranges
index 660d533b2a78..167d2137eaf4 100644
--- a/libcxx/include/ranges
+++ b/libcxx/include/ranges
@@ -375,7 +375,6 @@ namespace std {
}
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__ranges/access.h>
#include <__ranges/all.h>
diff --git a/libcxx/include/ratio b/libcxx/include/ratio
index de656f38e01d..b989c272aaee 100644
--- a/libcxx/include/ratio
+++ b/libcxx/include/ratio
@@ -81,7 +81,6 @@ using quetta = ratio <1'000'000'000'000'000'000'000'000'000'000, 1>; // Since C+
}
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__type_traits/integral_constant.h>
#include <climits>
diff --git a/libcxx/include/regex b/libcxx/include/regex
index 48af5b8b57fd..dc3db93744b4 100644
--- a/libcxx/include/regex
+++ b/libcxx/include/regex
@@ -791,7 +791,7 @@ typedef regex_token_iterator<wstring::const_iterator> wsregex_token_iterator;
#include <__algorithm/find.h>
#include <__algorithm/search.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__config>
#include <__iterator/back_insert_iterator.h>
diff --git a/libcxx/include/scoped_allocator b/libcxx/include/scoped_allocator
index fa6c6c5d20d8..c53261025be9 100644
--- a/libcxx/include/scoped_allocator
+++ b/libcxx/include/scoped_allocator
@@ -109,7 +109,6 @@ template <class OuterA1, class OuterA2, class... InnerAllocs>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__memory/allocator_traits.h>
#include <__memory/uses_allocator_construction.h>
diff --git a/libcxx/include/semaphore b/libcxx/include/semaphore
index 448b5fbd8c58..2dfdae9aa148 100644
--- a/libcxx/include/semaphore
+++ b/libcxx/include/semaphore
@@ -51,7 +51,7 @@ using binary_semaphore = counting_semaphore<1>;
# error "<semaphore> is not supported since libc++ has been configured without support for threads."
#endif
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__atomic/atomic_base.h>
#include <__atomic/atomic_sync.h>
#include <__atomic/memory_order.h>
diff --git a/libcxx/include/set b/libcxx/include/set
index 7f8245f8b605..e2e87e4cdcfe 100644
--- a/libcxx/include/set
+++ b/libcxx/include/set
@@ -515,7 +515,7 @@ erase_if(multiset<Key, Compare, Allocator>& c, Predicate pred); // C++20
#include <__algorithm/equal.h>
#include <__algorithm/lexicographical_compare.h>
#include <__algorithm/lexicographical_compare_three_way.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__config>
#include <__functional/is_transparent.h>
diff --git a/libcxx/include/shared_mutex b/libcxx/include/shared_mutex
index 57f385b5435e..38b559e8930f 100644
--- a/libcxx/include/shared_mutex
+++ b/libcxx/include/shared_mutex
@@ -128,7 +128,6 @@ template <class Mutex>
# error "<shared_mutex> is not supported since libc++ has been configured without support for threads."
#endif
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__chrono/duration.h>
#include <__chrono/steady_clock.h>
diff --git a/libcxx/include/span b/libcxx/include/span
index 32364b4270be..9efaac517fc8 100644
--- a/libcxx/include/span
+++ b/libcxx/include/span
@@ -128,7 +128,7 @@ template<class R>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__config>
#include <__fwd/span.h>
#include <__iterator/bounded_iter.h>
diff --git a/libcxx/include/sstream b/libcxx/include/sstream
index 8862e2ef99f8..60bec52209d7 100644
--- a/libcxx/include/sstream
+++ b/libcxx/include/sstream
@@ -278,7 +278,6 @@ typedef basic_stringstream<wchar_t> wstringstream;
// clang-format on
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__config>
#include <__fwd/sstream.h>
diff --git a/libcxx/include/stack b/libcxx/include/stack
index 77f1a4e11b73..4003792600a0 100644
--- a/libcxx/include/stack
+++ b/libcxx/include/stack
@@ -114,7 +114,6 @@ template <class T, class Container>
*/
#include <__algorithm/ranges_copy.h>
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__iterator/back_insert_iterator.h>
#include <__iterator/iterator_traits.h>
diff --git a/libcxx/include/stdexcept b/libcxx/include/stdexcept
index 3016c130a91b..4e4cd22a6a64 100644
--- a/libcxx/include/stdexcept
+++ b/libcxx/include/stdexcept
@@ -41,7 +41,6 @@ public:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__exception/exception.h>
#include <__fwd/string.h>
diff --git a/libcxx/include/stop_token b/libcxx/include/stop_token
index 66c7a6ab5996..fee195f9d63d 100644
--- a/libcxx/include/stop_token
+++ b/libcxx/include/stop_token
@@ -37,7 +37,6 @@ namespace std {
# error "<stop_token> is not supported since libc++ has been configured without support for threads."
#endif
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__stop_token/stop_callback.h>
#include <__stop_token/stop_source.h>
#include <__stop_token/stop_token.h>
diff --git a/libcxx/include/streambuf b/libcxx/include/streambuf
index aad7686a435c..aec537866c20 100644
--- a/libcxx/include/streambuf
+++ b/libcxx/include/streambuf
@@ -107,7 +107,6 @@ protected:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__fwd/streambuf.h>
#include <__type_traits/is_same.h>
diff --git a/libcxx/include/string b/libcxx/include/string
index 530a22338604..ca5b3fa6a014 100644
--- a/libcxx/include/string
+++ b/libcxx/include/string
@@ -572,7 +572,7 @@ basic_string<char32_t> operator""s( const char32_t *str, size_t len );
#include <__algorithm/min.h>
#include <__algorithm/remove.h>
#include <__algorithm/remove_if.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__config>
#include <__format/enable_insertable.h>
#include <__functional/hash.h>
diff --git a/libcxx/include/string_view b/libcxx/include/string_view
index e414507a7933..48bbcd800216 100644
--- a/libcxx/include/string_view
+++ b/libcxx/include/string_view
@@ -206,7 +206,7 @@ namespace std {
// clang-format on
#include <__algorithm/min.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__config>
#include <__functional/hash.h>
#include <__functional/unary_function.h>
diff --git a/libcxx/include/strstream b/libcxx/include/strstream
index e20c86baa6df..e9f533644f78 100644
--- a/libcxx/include/strstream
+++ b/libcxx/include/strstream
@@ -129,7 +129,6 @@ private:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <istream>
#include <ostream>
diff --git a/libcxx/include/system_error b/libcxx/include/system_error
index a60c98492aac..eeab347788a9 100644
--- a/libcxx/include/system_error
+++ b/libcxx/include/system_error
@@ -144,7 +144,6 @@ template <> struct hash<std::error_condition>;
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__system_error/errc.h>
#include <__system_error/error_category.h>
diff --git a/libcxx/include/thread b/libcxx/include/thread
index 29c7e86785cd..ed70bde76094 100644
--- a/libcxx/include/thread
+++ b/libcxx/include/thread
@@ -92,7 +92,6 @@ void sleep_for(const chrono::duration<Rep, Period>& rel_time);
# error "<thread> is not supported since libc++ has been configured without support for threads."
#endif
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__thread/formatter.h>
#include <__thread/jthread.h>
diff --git a/libcxx/include/tuple b/libcxx/include/tuple
index 96cf3be85b76..0101d64aea4a 100644
--- a/libcxx/include/tuple
+++ b/libcxx/include/tuple
@@ -205,7 +205,6 @@ template <class... Types>
// clang-format on
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__compare/common_comparison_category.h>
#include <__compare/synth_three_way.h>
#include <__config>
diff --git a/libcxx/include/type_traits b/libcxx/include/type_traits
index 466aeb6e0ddd..0037c426560e 100644
--- a/libcxx/include/type_traits
+++ b/libcxx/include/type_traits
@@ -416,7 +416,7 @@ namespace std
}
*/
-#include <__assert> // all public C++ headers provide the assertion handler
+
#include <__config>
#include <__fwd/hash.h> // This is https://llvm.org/PR56938
#include <__type_traits/add_const.h>
diff --git a/libcxx/include/typeindex b/libcxx/include/typeindex
index e6ea12afd524..6398aa40d616 100644
--- a/libcxx/include/typeindex
+++ b/libcxx/include/typeindex
@@ -45,7 +45,6 @@ struct hash<type_index>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__functional/unary_function.h>
#include <typeinfo>
diff --git a/libcxx/include/typeinfo b/libcxx/include/typeinfo
index 1144b5b12913..dafc7b89248e 100644
--- a/libcxx/include/typeinfo
+++ b/libcxx/include/typeinfo
@@ -56,7 +56,6 @@ public:
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__config>
#include <__exception/exception.h>
diff --git a/libcxx/include/unordered_map b/libcxx/include/unordered_map
index d2a3b769821d..ca3d1a80bd57 100644
--- a/libcxx/include/unordered_map
+++ b/libcxx/include/unordered_map
@@ -584,7 +584,7 @@ template <class Key, class T, class Hash, class Pred, class Alloc>
*/
#include <__algorithm/is_permutation.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__config>
#include <__functional/is_transparent.h>
diff --git a/libcxx/include/unordered_set b/libcxx/include/unordered_set
index 50b616907f00..64a02de3cf55 100644
--- a/libcxx/include/unordered_set
+++ b/libcxx/include/unordered_set
@@ -532,7 +532,7 @@ template <class Value, class Hash, class Pred, class Alloc>
// clang-format on
#include <__algorithm/is_permutation.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__config>
#include <__functional/is_transparent.h>
diff --git a/libcxx/include/utility b/libcxx/include/utility
index 1deef3db2041..90713da621c5 100644
--- a/libcxx/include/utility
+++ b/libcxx/include/utility
@@ -246,7 +246,6 @@ template <class T>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__config>
#include <__utility/as_const.h>
#include <__utility/as_lvalue.h>
diff --git a/libcxx/include/valarray b/libcxx/include/valarray
index 88b161eccd33..3d45925a25be 100644
--- a/libcxx/include/valarray
+++ b/libcxx/include/valarray
@@ -350,7 +350,7 @@ template <class T> unspecified2 end(const valarray<T>& v);
#include <__algorithm/min.h>
#include <__algorithm/min_element.h>
#include <__algorithm/unwrap_iter.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__config>
#include <__functional/operations.h>
#include <__memory/addressof.h>
diff --git a/libcxx/include/variant b/libcxx/include/variant
index 6063739e52c8..5ce99250a8b4 100644
--- a/libcxx/include/variant
+++ b/libcxx/include/variant
@@ -210,7 +210,6 @@ namespace std {
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__compare/common_comparison_category.h>
#include <__compare/compare_three_way_result.h>
diff --git a/libcxx/include/vector b/libcxx/include/vector
index 579fadfb404c..89cbdf0b3ff7 100644
--- a/libcxx/include/vector
+++ b/libcxx/include/vector
@@ -315,7 +315,7 @@ template<class T, class charT> requires is-vector-bool-reference<T> // Since C++
#include <__algorithm/remove_if.h>
#include <__algorithm/rotate.h>
#include <__algorithm/unwrap_iter.h>
-#include <__assert> // all public C++ headers provide the assertion handler
+#include <__assert>
#include <__availability>
#include <__bit_reference>
#include <__concepts/same_as.h>
diff --git a/libcxx/include/version b/libcxx/include/version
index b18927a2bc38..055d0f30f9c4 100644
--- a/libcxx/include/version
+++ b/libcxx/include/version
@@ -170,6 +170,7 @@ __cpp_lib_ranges_as_const 202207L <ranges>
__cpp_lib_ranges_as_rvalue 202207L <ranges>
__cpp_lib_ranges_chunk 202202L <ranges>
__cpp_lib_ranges_chunk_by 202202L <ranges>
+__cpp_lib_ranges_contains 202207L <algorithm>
__cpp_lib_ranges_iota 202202L <numeric>
__cpp_lib_ranges_join_with 202202L <ranges>
__cpp_lib_ranges_repeat 202207L <ranges>
@@ -244,7 +245,6 @@ __cpp_lib_within_lifetime 202306L <type_traits>
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__config>
@@ -464,11 +464,12 @@ __cpp_lib_within_lifetime 202306L <type_traits>
# define __cpp_lib_ranges_as_rvalue 202207L
// # define __cpp_lib_ranges_chunk 202202L
# define __cpp_lib_ranges_chunk_by 202202L
+# define __cpp_lib_ranges_contains 202207L
// # define __cpp_lib_ranges_iota 202202L
// # define __cpp_lib_ranges_join_with 202202L
# define __cpp_lib_ranges_repeat 202207L
// # define __cpp_lib_ranges_slide 202202L
-// # define __cpp_lib_ranges_starts_ends_with 202106L
+# define __cpp_lib_ranges_starts_ends_with 202106L
# define __cpp_lib_ranges_to_container 202202L
// # define __cpp_lib_ranges_zip 202110L
// # define __cpp_lib_reference_from_temporary 202202L
diff --git a/libcxx/src/new.cpp b/libcxx/src/new.cpp
index 0869d90661dd..b0c731678cac 100644
--- a/libcxx/src/new.cpp
+++ b/libcxx/src/new.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "include/overridable_function.h"
+#include <__assert>
#include <__memory/aligned_alloc.h>
#include <cstddef>
#include <cstdlib>
diff --git a/libcxx/test/libcxx/assertions/customize_verbose_abort.link-time.pass.cpp b/libcxx/test/libcxx/assertions/customize_verbose_abort.link-time.pass.cpp
index 585ab73f2cb2..9298a1e365fc 100644
--- a/libcxx/test/libcxx/assertions/customize_verbose_abort.link-time.pass.cpp
+++ b/libcxx/test/libcxx/assertions/customize_verbose_abort.link-time.pass.cpp
@@ -12,6 +12,7 @@
// failures when back-deploying.
// XFAIL: availability-verbose_abort-missing
+#include <__verbose_abort>
#include <cstdlib>
void std::__libcpp_verbose_abort(char const*, ...) {
diff --git a/libcxx/test/libcxx/assertions/headers_declare_verbose_abort.gen.py b/libcxx/test/libcxx/assertions/headers_declare_verbose_abort.gen.py
deleted file mode 100644
index bd883aa0c145..000000000000
--- a/libcxx/test/libcxx/assertions/headers_declare_verbose_abort.gen.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#===----------------------------------------------------------------------===##
-#
-# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-# See https://llvm.org/LICENSE.txt for license information.
-# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-#
-#===----------------------------------------------------------------------===##
-
-# Test that all public C++ headers define the verbose termination function, which
-# is required for users to be able to include any public header and then override
-# the function using a strong definition.
-
-# RUN: %{python} %s %{libcxx-dir}/utils
-
-import sys
-sys.path.append(sys.argv[1])
-from libcxx.header_information import lit_header_restrictions, public_headers
-
-for header in public_headers:
- # Skip C compatibility headers.
- if header.endswith('.h'):
- continue
-
- BLOCKLIT = '' # block Lit from interpreting a RUN/XFAIL/etc inside the generation script
- print(f"""\
-//--- {header}.compile.pass.cpp
-{lit_header_restrictions.get(header, '')}
-
-// XFAIL{BLOCKLIT}: availability-verbose_abort-missing
-
-#include <{header}>
-using HandlerType = decltype(std::__libcpp_verbose_abort);
-""")
diff --git a/libcxx/test/libcxx/assertions/modes/none.pass.cpp b/libcxx/test/libcxx/assertions/modes/none.pass.cpp
index 4644c5692e70..8332848c1a8e 100644
--- a/libcxx/test/libcxx/assertions/modes/none.pass.cpp
+++ b/libcxx/test/libcxx/assertions/modes/none.pass.cpp
@@ -11,6 +11,7 @@
// REQUIRES: libcpp-hardening-mode=none
+#include <__assert>
#include <cassert>
bool executed_condition = false;
diff --git a/libcxx/test/libcxx/transitive_includes/cxx23.csv b/libcxx/test/libcxx/transitive_includes/cxx23.csv
index 44b5f78beea4..49b3ac265487 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx23.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx23.csv
@@ -30,7 +30,6 @@ array stdexcept
array version
atomic cstddef
atomic cstdint
-atomic cstdlib
atomic cstring
atomic ctime
atomic limits
diff --git a/libcxx/test/libcxx/transitive_includes/cxx26.csv b/libcxx/test/libcxx/transitive_includes/cxx26.csv
index 44b5f78beea4..49b3ac265487 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx26.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx26.csv
@@ -30,7 +30,6 @@ array stdexcept
array version
atomic cstddef
atomic cstdint
-atomic cstdlib
atomic cstring
atomic ctime
atomic limits
diff --git a/libcxx/test/std/language.support/support.limits/support.limits.general/algorithm.version.compile.pass.cpp b/libcxx/test/std/language.support/support.limits/support.limits.general/algorithm.version.compile.pass.cpp
index ec6503ec2375..ece13b0a232c 100644
--- a/libcxx/test/std/language.support/support.limits/support.limits.general/algorithm.version.compile.pass.cpp
+++ b/libcxx/test/std/language.support/support.limits/support.limits.general/algorithm.version.compile.pass.cpp
@@ -21,6 +21,7 @@
__cpp_lib_freestanding_algorithm 202311L [C++26]
__cpp_lib_parallel_algorithm 201603L [C++17]
__cpp_lib_ranges 202207L [C++20]
+ __cpp_lib_ranges_contains 202207L [C++23]
__cpp_lib_ranges_starts_ends_with 202106L [C++23]
__cpp_lib_robust_nonmodifying_seq_ops 201304L [C++14]
__cpp_lib_sample 201603L [C++17]
@@ -52,6 +53,10 @@
# error "__cpp_lib_ranges should not be defined before c++20"
# endif
+# ifdef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should not be defined before c++23"
+# endif
+
# ifdef __cpp_lib_ranges_starts_ends_with
# error "__cpp_lib_ranges_starts_ends_with should not be defined before c++23"
# endif
@@ -90,6 +95,10 @@
# error "__cpp_lib_ranges should not be defined before c++20"
# endif
+# ifdef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should not be defined before c++23"
+# endif
+
# ifdef __cpp_lib_ranges_starts_ends_with
# error "__cpp_lib_ranges_starts_ends_with should not be defined before c++23"
# endif
@@ -143,6 +152,10 @@
# error "__cpp_lib_ranges should not be defined before c++20"
# endif
+# ifdef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should not be defined before c++23"
+# endif
+
# ifdef __cpp_lib_ranges_starts_ends_with
# error "__cpp_lib_ranges_starts_ends_with should not be defined before c++23"
# endif
@@ -205,6 +218,10 @@
# error "__cpp_lib_ranges should have the value 202207L in c++20"
# endif
+# ifdef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should not be defined before c++23"
+# endif
+
# ifdef __cpp_lib_ranges_starts_ends_with
# error "__cpp_lib_ranges_starts_ends_with should not be defined before c++23"
# endif
@@ -270,17 +287,18 @@
# error "__cpp_lib_ranges should have the value 202207L in c++23"
# endif
-# if !defined(_LIBCPP_VERSION)
-# ifndef __cpp_lib_ranges_starts_ends_with
-# error "__cpp_lib_ranges_starts_ends_with should be defined in c++23"
-# endif
-# if __cpp_lib_ranges_starts_ends_with != 202106L
-# error "__cpp_lib_ranges_starts_ends_with should have the value 202106L in c++23"
-# endif
-# else // _LIBCPP_VERSION
-# ifdef __cpp_lib_ranges_starts_ends_with
-# error "__cpp_lib_ranges_starts_ends_with should not be defined because it is unimplemented in libc++!"
-# endif
+# ifndef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should be defined in c++23"
+# endif
+# if __cpp_lib_ranges_contains != 202207L
+# error "__cpp_lib_ranges_contains should have the value 202207L in c++23"
+# endif
+
+# ifndef __cpp_lib_ranges_starts_ends_with
+# error "__cpp_lib_ranges_starts_ends_with should be defined in c++23"
+# endif
+# if __cpp_lib_ranges_starts_ends_with != 202106L
+# error "__cpp_lib_ranges_starts_ends_with should have the value 202106L in c++23"
# endif
# ifndef __cpp_lib_robust_nonmodifying_seq_ops
@@ -353,17 +371,18 @@
# error "__cpp_lib_ranges should have the value 202207L in c++26"
# endif
-# if !defined(_LIBCPP_VERSION)
-# ifndef __cpp_lib_ranges_starts_ends_with
-# error "__cpp_lib_ranges_starts_ends_with should be defined in c++26"
-# endif
-# if __cpp_lib_ranges_starts_ends_with != 202106L
-# error "__cpp_lib_ranges_starts_ends_with should have the value 202106L in c++26"
-# endif
-# else // _LIBCPP_VERSION
-# ifdef __cpp_lib_ranges_starts_ends_with
-# error "__cpp_lib_ranges_starts_ends_with should not be defined because it is unimplemented in libc++!"
-# endif
+# ifndef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should be defined in c++26"
+# endif
+# if __cpp_lib_ranges_contains != 202207L
+# error "__cpp_lib_ranges_contains should have the value 202207L in c++26"
+# endif
+
+# ifndef __cpp_lib_ranges_starts_ends_with
+# error "__cpp_lib_ranges_starts_ends_with should be defined in c++26"
+# endif
+# if __cpp_lib_ranges_starts_ends_with != 202106L
+# error "__cpp_lib_ranges_starts_ends_with should have the value 202106L in c++26"
# endif
# ifndef __cpp_lib_robust_nonmodifying_seq_ops
diff --git a/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp b/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp
index 14271308624e..20804d835015 100644
--- a/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp
+++ b/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp
@@ -158,6 +158,7 @@
__cpp_lib_ranges_as_rvalue 202207L [C++23]
__cpp_lib_ranges_chunk 202202L [C++23]
__cpp_lib_ranges_chunk_by 202202L [C++23]
+ __cpp_lib_ranges_contains 202207L [C++23]
__cpp_lib_ranges_iota 202202L [C++23]
__cpp_lib_ranges_join_with 202202L [C++23]
__cpp_lib_ranges_repeat 202207L [C++23]
@@ -772,6 +773,10 @@
# error "__cpp_lib_ranges_chunk_by should not be defined before c++23"
# endif
+# ifdef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should not be defined before c++23"
+# endif
+
# ifdef __cpp_lib_ranges_iota
# error "__cpp_lib_ranges_iota should not be defined before c++23"
# endif
@@ -1604,6 +1609,10 @@
# error "__cpp_lib_ranges_chunk_by should not be defined before c++23"
# endif
+# ifdef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should not be defined before c++23"
+# endif
+
# ifdef __cpp_lib_ranges_iota
# error "__cpp_lib_ranges_iota should not be defined before c++23"
# endif
@@ -2607,6 +2616,10 @@
# error "__cpp_lib_ranges_chunk_by should not be defined before c++23"
# endif
+# ifdef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should not be defined before c++23"
+# endif
+
# ifdef __cpp_lib_ranges_iota
# error "__cpp_lib_ranges_iota should not be defined before c++23"
# endif
@@ -3889,6 +3902,10 @@
# error "__cpp_lib_ranges_chunk_by should not be defined before c++23"
# endif
+# ifdef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should not be defined before c++23"
+# endif
+
# ifdef __cpp_lib_ranges_iota
# error "__cpp_lib_ranges_iota should not be defined before c++23"
# endif
@@ -5357,6 +5374,13 @@
# error "__cpp_lib_ranges_chunk_by should have the value 202202L in c++23"
# endif
+# ifndef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should be defined in c++23"
+# endif
+# if __cpp_lib_ranges_contains != 202207L
+# error "__cpp_lib_ranges_contains should have the value 202207L in c++23"
+# endif
+
# if !defined(_LIBCPP_VERSION)
# ifndef __cpp_lib_ranges_iota
# error "__cpp_lib_ranges_iota should be defined in c++23"
@@ -5403,17 +5427,11 @@
# endif
# endif
-# if !defined(_LIBCPP_VERSION)
-# ifndef __cpp_lib_ranges_starts_ends_with
-# error "__cpp_lib_ranges_starts_ends_with should be defined in c++23"
-# endif
-# if __cpp_lib_ranges_starts_ends_with != 202106L
-# error "__cpp_lib_ranges_starts_ends_with should have the value 202106L in c++23"
-# endif
-# else // _LIBCPP_VERSION
-# ifdef __cpp_lib_ranges_starts_ends_with
-# error "__cpp_lib_ranges_starts_ends_with should not be defined because it is unimplemented in libc++!"
-# endif
+# ifndef __cpp_lib_ranges_starts_ends_with
+# error "__cpp_lib_ranges_starts_ends_with should be defined in c++23"
+# endif
+# if __cpp_lib_ranges_starts_ends_with != 202106L
+# error "__cpp_lib_ranges_starts_ends_with should have the value 202106L in c++23"
# endif
# ifndef __cpp_lib_ranges_to_container
@@ -7074,6 +7092,13 @@
# error "__cpp_lib_ranges_chunk_by should have the value 202202L in c++26"
# endif
+# ifndef __cpp_lib_ranges_contains
+# error "__cpp_lib_ranges_contains should be defined in c++26"
+# endif
+# if __cpp_lib_ranges_contains != 202207L
+# error "__cpp_lib_ranges_contains should have the value 202207L in c++26"
+# endif
+
# if !defined(_LIBCPP_VERSION)
# ifndef __cpp_lib_ranges_iota
# error "__cpp_lib_ranges_iota should be defined in c++26"
@@ -7120,17 +7145,11 @@
# endif
# endif
-# if !defined(_LIBCPP_VERSION)
-# ifndef __cpp_lib_ranges_starts_ends_with
-# error "__cpp_lib_ranges_starts_ends_with should be defined in c++26"
-# endif
-# if __cpp_lib_ranges_starts_ends_with != 202106L
-# error "__cpp_lib_ranges_starts_ends_with should have the value 202106L in c++26"
-# endif
-# else // _LIBCPP_VERSION
-# ifdef __cpp_lib_ranges_starts_ends_with
-# error "__cpp_lib_ranges_starts_ends_with should not be defined because it is unimplemented in libc++!"
-# endif
+# ifndef __cpp_lib_ranges_starts_ends_with
+# error "__cpp_lib_ranges_starts_ends_with should be defined in c++26"
+# endif
+# if __cpp_lib_ranges_starts_ends_with != 202106L
+# error "__cpp_lib_ranges_starts_ends_with should have the value 202106L in c++26"
# endif
# ifndef __cpp_lib_ranges_to_container
diff --git a/libcxx/test/std/time/time.clock/time.clock.file/to_from_sys.pass.cpp b/libcxx/test/std/time/time.clock/time.clock.file/to_from_sys.pass.cpp
index b1031c815610..5b1f46599111 100644
--- a/libcxx/test/std/time/time.clock/time.clock.file/to_from_sys.pass.cpp
+++ b/libcxx/test/std/time/time.clock/time.clock.file/to_from_sys.pass.cpp
@@ -10,9 +10,6 @@
// UNSUPPORTED: availability-filesystem-missing
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
//
// file_clock
diff --git a/libcxx/test/std/time/time.clock/time.clock.hires/now.pass.cpp b/libcxx/test/std/time/time.clock/time.clock.hires/now.pass.cpp
index 8625ac58bde5..db1fb55df907 100644
--- a/libcxx/test/std/time/time.clock/time.clock.hires/now.pass.cpp
+++ b/libcxx/test/std/time/time.clock/time.clock.hires/now.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
// high_resolution_clock
diff --git a/libcxx/test/std/time/time.clock/time.clock.system/from_time_t.pass.cpp b/libcxx/test/std/time/time.clock/time.clock.system/from_time_t.pass.cpp
index 5ff667445b1a..70dd8117e6ce 100644
--- a/libcxx/test/std/time/time.clock/time.clock.system/from_time_t.pass.cpp
+++ b/libcxx/test/std/time/time.clock/time.clock.system/from_time_t.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
// system_clock
diff --git a/libcxx/test/std/time/time.clock/time.clock.system/now.pass.cpp b/libcxx/test/std/time/time.clock/time.clock.system/now.pass.cpp
index 70fbe98d8dfd..dade6bafa471 100644
--- a/libcxx/test/std/time/time.clock/time.clock.system/now.pass.cpp
+++ b/libcxx/test/std/time/time.clock/time.clock.system/now.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
// system_clock
diff --git a/libcxx/test/std/time/time.clock/time.clock.system/to_time_t.pass.cpp b/libcxx/test/std/time/time.clock/time.clock.system/to_time_t.pass.cpp
index f3238f7bb1bb..bf4339c32d1c 100644
--- a/libcxx/test/std/time/time.clock/time.clock.system/to_time_t.pass.cpp
+++ b/libcxx/test/std/time/time.clock/time.clock.system/to_time_t.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
// system_clock
diff --git a/libcxx/test/std/time/time.point/time.point.nonmember/op_-duration.pass.cpp b/libcxx/test/std/time/time.point/time.point.nonmember/op_-duration.pass.cpp
index 199bdec66878..80e9d04a769f 100644
--- a/libcxx/test/std/time/time.point/time.point.nonmember/op_-duration.pass.cpp
+++ b/libcxx/test/std/time/time.point/time.point.nonmember/op_-duration.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
// time_point
diff --git a/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.pass.cpp b/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.pass.cpp
deleted file mode 100644
index ceeb4dd3eeec..000000000000
--- a/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.pass.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// <utility>
-
-// template<class T, T N>
-// using make_integer_sequence = integer_sequence<T, 0, 1, ..., N-1>;
-
-// UNSUPPORTED: c++03, c++11
-
-#define _LIBCPP_TESTING_FALLBACK_MAKE_INTEGER_SEQUENCE
-#include "make_integer_seq.pass.cpp"
-
-#include "test_macros.h"
diff --git a/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.verify.cpp b/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.verify.cpp
deleted file mode 100644
index 32a4a5431333..000000000000
--- a/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.verify.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// <utility>
-
-// template<class T, T N>
-// using make_integer_sequence = integer_sequence<T, 0, 1, ..., N-1>;
-
-// UNSUPPORTED: c++03, c++11
-
-// This test hangs during recursive template instantiation with libstdc++
-// UNSUPPORTED: stdlib=libstdc++
-
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_TESTING_FALLBACK_MAKE_INTEGER_SEQUENCE
-
-#include <utility>
-
-typedef std::make_integer_sequence<int, -3> MakeSeqT;
-MakeSeqT i; // expected-error-re@*:* {{static assertion failed{{.*}}std::make_integer_sequence must have a non-negative sequence length}}
diff --git a/libcxx/utils/generate_feature_test_macro_components.py b/libcxx/utils/generate_feature_test_macro_components.py
index b688a30cdb79..16d2fc6a532d 100755
--- a/libcxx/utils/generate_feature_test_macro_components.py
+++ b/libcxx/utils/generate_feature_test_macro_components.py
@@ -915,6 +915,11 @@ feature_test_macros = [
"headers": ["ranges"],
},
{
+ "name": "__cpp_lib_ranges_contains",
+ "values": {"c++23": 202207},
+ "headers": ["algorithm"],
+ },
+ {
"name": "__cpp_lib_ranges_iota",
"values": {"c++23": 202202},
"headers": ["numeric"],
@@ -941,7 +946,6 @@ feature_test_macros = [
"name": "__cpp_lib_ranges_starts_ends_with",
"values": {"c++23": 202106},
"headers": ["algorithm"],
- "unimplemented": True,
},
{
"name": "__cpp_lib_ranges_to_container",
@@ -1514,7 +1518,6 @@ def produce_version_header():
*/
-#include <__assert> // all public C++ headers provide the assertion handler
#include <__availability>
#include <__config>
diff --git a/libcxx/utils/generate_iwyu_mapping.py b/libcxx/utils/generate_iwyu_mapping.py
index 0a650250e747..6eb2c6095bf1 100644
--- a/libcxx/utils/generate_iwyu_mapping.py
+++ b/libcxx/utils/generate_iwyu_mapping.py
@@ -40,6 +40,8 @@ def IWYU_mapping(header: str) -> typing.Optional[typing.List[str]]:
return ["utility"]
elif header == "__fwd/subrange.h":
return ["ranges"]
+ elif re.match("__fwd/(fstream|ios|istream|ostream|sstream|streambuf)[.]h", header):
+ return ["iosfwd"]
# Handle remaining forward declaration headers
elif re.match("__fwd/(.+)[.]h", header):
return [re.match("__fwd/(.+)[.]h", header).group(1)]
diff --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp
index 49fd979bd0a5..e0f9abfb382a 100644
--- a/lld/ELF/Arch/LoongArch.cpp
+++ b/lld/ELF/Arch/LoongArch.cpp
@@ -41,6 +41,7 @@ public:
};
} // end anonymous namespace
+namespace {
enum Op {
SUB_W = 0x00110000,
SUB_D = 0x00118000,
@@ -65,6 +66,7 @@ enum Reg {
R_T2 = 14,
R_T3 = 15,
};
+} // namespace
// Mask out the input's lowest 12 bits for use with `pcalau12i`, in sequences
// like `pcalau12i + addi.[wd]` or `pcalau12i + {ld,st}.*` where the `pcalau12i`
diff --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp
index de52f6a79a40..019c073bd541 100644
--- a/lld/ELF/Arch/PPC64.cpp
+++ b/lld/ELF/Arch/PPC64.cpp
@@ -26,6 +26,7 @@ using namespace lld::elf;
constexpr uint64_t ppc64TocOffset = 0x8000;
constexpr uint64_t dynamicThreadPointerOffset = 0x8000;
+namespace {
// The instruction encoding of bits 21-30 from the ISA for the Xform and Dform
// instructions that can be used as part of the initial exec TLS sequence.
enum XFormOpcd {
@@ -139,6 +140,7 @@ enum class PPCPrefixedInsn : uint64_t {
PSTXV = PREFIX_8LS | 0xd8000000,
PSTXVP = PREFIX_8LS | 0xf8000000
};
+
static bool checkPPCLegacyInsn(uint32_t encoding) {
PPCLegacyInsn insn = static_cast<PPCLegacyInsn>(encoding);
if (insn == PPCLegacyInsn::NOINSN)
@@ -164,7 +166,6 @@ enum class LegacyToPrefixMask : uint64_t {
0x8000000003e00000, // S/T (6-10) - The [S/T]X bit moves from 28 to 5.
};
-namespace {
class PPC64 final : public TargetInfo {
public:
PPC64();
diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp
index 5fcab4d39d43..4798c86f7d1b 100644
--- a/lld/ELF/Arch/RISCV.cpp
+++ b/lld/ELF/Arch/RISCV.cpp
@@ -57,6 +57,7 @@ public:
const uint64_t dtpOffset = 0x800;
+namespace {
enum Op {
ADDI = 0x13,
AUIPC = 0x17,
@@ -78,6 +79,7 @@ enum Reg {
X_A0 = 10,
X_T3 = 28,
};
+} // namespace
static uint32_t hi20(uint32_t val) { return (val + 0x800) >> 12; }
static uint32_t lo12(uint32_t val) { return val & 4095; }
diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp
index 018ceec97f20..9edb6b9c60a1 100644
--- a/lld/MachO/Driver.cpp
+++ b/lld/MachO/Driver.cpp
@@ -340,7 +340,7 @@ static InputFile *addFile(StringRef path, LoadType loadType,
}
} else if (isCommandLineLoad && config->forceLoadObjC) {
for (const object::Archive::Symbol &sym : file->getArchive().symbols())
- if (sym.getName().starts_with(objc::klass))
+ if (sym.getName().starts_with(objc::symbol_names::klass))
file->fetch(sym);
// TODO: no need to look for ObjC sections for a given archive member if
@@ -395,7 +395,7 @@ static InputFile *addFile(StringRef path, LoadType loadType,
if ((isa<ObjFile>(newFile) || isa<BitcodeFile>(newFile)) && newFile->lazy &&
config->forceLoadObjC) {
for (Symbol *sym : newFile->symbols)
- if (sym && sym->getName().starts_with(objc::klass)) {
+ if (sym && sym->getName().starts_with(objc::symbol_names::klass)) {
extract(*newFile, "-ObjC");
break;
}
diff --git a/lld/MachO/InputFiles.cpp b/lld/MachO/InputFiles.cpp
index 158c3fbf7b0f..b36d390cc16a 100644
--- a/lld/MachO/InputFiles.cpp
+++ b/lld/MachO/InputFiles.cpp
@@ -1921,14 +1921,14 @@ DylibFile::DylibFile(const InterfaceFile &interface, DylibFile *umbrella,
case EncodeKind::ObjectiveCClass:
// XXX ld64 only creates these symbols when -ObjC is passed in. We may
// want to emulate that.
- addSymbol(*symbol, objc::klass + symbol->getName());
- addSymbol(*symbol, objc::metaclass + symbol->getName());
+ addSymbol(*symbol, objc::symbol_names::klass + symbol->getName());
+ addSymbol(*symbol, objc::symbol_names::metaclass + symbol->getName());
break;
case EncodeKind::ObjectiveCClassEHType:
- addSymbol(*symbol, objc::ehtype + symbol->getName());
+ addSymbol(*symbol, objc::symbol_names::ehtype + symbol->getName());
break;
case EncodeKind::ObjectiveCInstanceVariable:
- addSymbol(*symbol, objc::ivar + symbol->getName());
+ addSymbol(*symbol, objc::symbol_names::ivar + symbol->getName());
break;
}
}
diff --git a/lld/MachO/ObjC.h b/lld/MachO/ObjC.h
index 560c5cc0bc50..4c65f9a1f788 100644
--- a/lld/MachO/ObjC.h
+++ b/lld/MachO/ObjC.h
@@ -15,10 +15,12 @@ namespace lld::macho {
namespace objc {
+namespace symbol_names {
constexpr const char klass[] = "_OBJC_CLASS_$_";
constexpr const char metaclass[] = "_OBJC_METACLASS_$_";
constexpr const char ehtype[] = "_OBJC_EHTYPE_$_";
constexpr const char ivar[] = "_OBJC_IVAR_$_";
+} // namespace symbol_names
// Check for duplicate method names within related categories / classes.
void checkCategories();
diff --git a/lldb/bindings/CMakeLists.txt b/lldb/bindings/CMakeLists.txt
index b44ed59aa662..296eae1ae77f 100644
--- a/lldb/bindings/CMakeLists.txt
+++ b/lldb/bindings/CMakeLists.txt
@@ -23,7 +23,11 @@ endif()
set(SWIG_COMMON_FLAGS
-c++
- -w361,362 # Ignore warnings about ignored operator overloads
+ # Ignored warnings:
+ # 361: operator! ignored.
+ # 362: operator= ignored.
+ # 509: Overloaded method declaration effectively ignored, shadowed by previous declaration.
+ -w361,362,509
-features autodoc
-I${LLDB_SOURCE_DIR}/include
-I${CMAKE_CURRENT_SOURCE_DIR}
diff --git a/lldb/cmake/modules/AddLLDB.cmake b/lldb/cmake/modules/AddLLDB.cmake
index 328e883ddbe5..fdc4ee0c05d7 100644
--- a/lldb/cmake/modules/AddLLDB.cmake
+++ b/lldb/cmake/modules/AddLLDB.cmake
@@ -383,7 +383,7 @@ endfunction()
function(lldb_find_python_module module)
set(MODULE_FOUND PY_${module}_FOUND)
- if (DEFINED ${MODULE_FOUND})
+ if (${MODULE_FOUND})
return()
endif()
@@ -392,10 +392,10 @@ function(lldb_find_python_module module)
ERROR_QUIET)
if (status)
- set(${MODULE_FOUND} OFF CACHE BOOL "Failed to find python module '${module}'")
+ set(${MODULE_FOUND} OFF PARENT_SCOPE)
message(STATUS "Could NOT find Python module '${module}'")
else()
- set(${MODULE_FOUND} ON CACHE BOOL "Found python module '${module}'")
+ set(${MODULE_FOUND} ON PARENT_SCOPE)
message(STATUS "Found Python module '${module}'")
endif()
endfunction()
diff --git a/lldb/cmake/modules/LLDBConfig.cmake b/lldb/cmake/modules/LLDBConfig.cmake
index a758261073ac..93c8ffe4b7d8 100644
--- a/lldb/cmake/modules/LLDBConfig.cmake
+++ b/lldb/cmake/modules/LLDBConfig.cmake
@@ -67,6 +67,8 @@ option(LLDB_SKIP_STRIP "Whether to skip stripping of binaries when installing ll
option(LLDB_SKIP_DSYM "Whether to skip generating a dSYM when installing lldb." OFF)
option(LLDB_ENFORCE_STRICT_TEST_REQUIREMENTS
"Fail to configure if certain requirements are not met for testing." OFF)
+option(LLDB_TEST_USE_VENDOR_PACKAGES
+ "Use packages from lldb/third_party/Python/module instead of system deps." ON)
set(LLDB_GLOBAL_INIT_DIRECTORY "" CACHE STRING
"Path to the global lldbinit directory. Relative paths are resolved relative to the
diff --git a/lldb/include/lldb/Core/Debugger.h b/lldb/include/lldb/Core/Debugger.h
index 6ba90eb6ed8f..b65ec1029ab2 100644
--- a/lldb/include/lldb/Core/Debugger.h
+++ b/lldb/include/lldb/Core/Debugger.h
@@ -593,6 +593,7 @@ protected:
friend class CommandInterpreter;
friend class REPL;
friend class Progress;
+ friend class ProgressManager;
/// Report progress events.
///
@@ -623,10 +624,11 @@ protected:
/// debugger identifier that this progress should be delivered to. If this
/// optional parameter does not have a value, the progress will be
/// delivered to all debuggers.
- static void ReportProgress(uint64_t progress_id, std::string title,
- std::string details, uint64_t completed,
- uint64_t total,
- std::optional<lldb::user_id_t> debugger_id);
+ static void
+ ReportProgress(uint64_t progress_id, std::string title, std::string details,
+ uint64_t completed, uint64_t total,
+ std::optional<lldb::user_id_t> debugger_id,
+ uint32_t progress_category_bit = eBroadcastBitProgress);
static void ReportDiagnosticImpl(DiagnosticEventData::Type type,
std::string message,
diff --git a/lldb/include/lldb/Core/Progress.h b/lldb/include/lldb/Core/Progress.h
index eb4d9f9d7af0..c6fc861fb71d 100644
--- a/lldb/include/lldb/Core/Progress.h
+++ b/lldb/include/lldb/Core/Progress.h
@@ -9,10 +9,11 @@
#ifndef LLDB_CORE_PROGRESS_H
#define LLDB_CORE_PROGRESS_H
-#include "lldb/Utility/ConstString.h"
+#include "lldb/lldb-forward.h"
#include "lldb/lldb-types.h"
#include "llvm/ADT/StringMap.h"
#include <atomic>
+#include <cstdint>
#include <mutex>
#include <optional>
@@ -64,6 +65,9 @@ public:
///
/// @param [in] title The title of this progress activity.
///
+ /// @param [in] details Specific information about what the progress report
+ /// is currently working on.
+ ///
/// @param [in] total The total units of work to be done if specified, if
/// set to std::nullopt then an indeterminate progress indicator should be
/// displayed.
@@ -97,27 +101,36 @@ public:
/// Used to indicate a non-deterministic progress report
static constexpr uint64_t kNonDeterministicTotal = UINT64_MAX;
+ /// Data belonging to this Progress event that is used for bookkeeping by
+ /// ProgressManager.
+ struct ProgressData {
+ /// The title of the progress activity, also used as a category.
+ std::string title;
+ /// A unique integer identifier for progress reporting.
+ uint64_t progress_id;
+ /// The optional debugger ID to report progress to. If this has no value
+ /// then all debuggers will receive this event.
+ std::optional<lldb::user_id_t> debugger_id;
+ };
+
private:
void ReportProgress();
static std::atomic<uint64_t> g_id;
- /// The title of the progress activity.
- std::string m_title;
+ /// More specific information about the current file being displayed in the
+ /// report.
std::string m_details;
- std::mutex m_mutex;
- /// A unique integer identifier for progress reporting.
- const uint64_t m_id;
/// How much work ([0...m_total]) that has been completed.
uint64_t m_completed;
/// Total amount of work, use a std::nullopt in the constructor for non
/// deterministic progress.
uint64_t m_total;
- /// The optional debugger ID to report progress to. If this has no value then
- /// all debuggers will receive this event.
- std::optional<lldb::user_id_t> m_debugger_id;
+ std::mutex m_mutex;
/// Set to true when progress has been reported where m_completed == m_total
/// to ensure that we don't send progress updates after progress has
/// completed.
bool m_complete = false;
+ /// Data needed by the debugger to broadcast a progress event.
+ ProgressData m_progress_data;
};
/// A class used to group progress reports by category. This is done by using a
@@ -130,13 +143,16 @@ public:
~ProgressManager();
/// Control the refcount of the progress report category as needed.
- void Increment(std::string category);
- void Decrement(std::string category);
+ void Increment(const Progress::ProgressData &);
+ void Decrement(const Progress::ProgressData &);
static ProgressManager &Instance();
+ static void ReportProgress(const Progress::ProgressData &);
+
private:
- llvm::StringMap<uint64_t> m_progress_category_map;
+ llvm::StringMap<std::pair<uint64_t, Progress::ProgressData>>
+ m_progress_category_map;
std::mutex m_progress_map_mutex;
};
diff --git a/lldb/include/lldb/Interpreter/CommandOptionArgumentTable.h b/lldb/include/lldb/Interpreter/CommandOptionArgumentTable.h
index 9248e2ac8144..b5e989633ea3 100644
--- a/lldb/include/lldb/Interpreter/CommandOptionArgumentTable.h
+++ b/lldb/include/lldb/Interpreter/CommandOptionArgumentTable.h
@@ -50,6 +50,11 @@ static constexpr OptionEnumValueElement g_sort_option_enumeration[] = {
"name",
"Sort output by symbol name.",
},
+ {
+ eSortOrderBySize,
+ "size",
+ "Sort output by symbol byte size.",
+ },
};
// Note that the negation in the argument name causes a slightly confusing
diff --git a/lldb/include/lldb/lldb-private-enumerations.h b/lldb/include/lldb/lldb-private-enumerations.h
index 9e8ab56305be..b8f504529683 100644
--- a/lldb/include/lldb/lldb-private-enumerations.h
+++ b/lldb/include/lldb/lldb-private-enumerations.h
@@ -108,7 +108,12 @@ enum ArgumentRepetitionType {
// optional
};
-enum SortOrder { eSortOrderNone, eSortOrderByAddress, eSortOrderByName };
+enum SortOrder {
+ eSortOrderNone,
+ eSortOrderByAddress,
+ eSortOrderByName,
+ eSortOrderBySize
+};
// LazyBool is for boolean values that need to be calculated lazily. Values
// start off set to eLazyBoolCalculate, and then they can be calculated once
diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
index 288cc8cf9a48..23f650d2d36f 100644
--- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
+++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
@@ -6,6 +6,9 @@ from lldbsuite.test.lldbtest import *
class DAPTestCaseBase(TestBase):
+ # set timeout based on whether ASAN was enabled or not. Increase
+ # timeout by a factor of 10 if ASAN is enabled.
+ timeoutval = 10 * (10 if ('ASAN_OPTIONS' in os.environ) else 1)
NO_DEBUG_INFO_TESTCASE = True
def create_debug_adaptor(self, lldbDAPEnv=None):
diff --git a/lldb/source/Commands/CommandObjectTarget.cpp b/lldb/source/Commands/CommandObjectTarget.cpp
index 45265577e8b6..b2346c2402a8 100644
--- a/lldb/source/Commands/CommandObjectTarget.cpp
+++ b/lldb/source/Commands/CommandObjectTarget.cpp
@@ -3376,15 +3376,19 @@ protected:
case 'r': {
size_t ref_count = 0;
+ char in_shared_cache = 'Y';
+
ModuleSP module_sp(module->shared_from_this());
+ if (!ModuleList::ModuleIsInCache(module))
+ in_shared_cache = 'N';
if (module_sp) {
// Take one away to make sure we don't count our local "module_sp"
ref_count = module_sp.use_count() - 1;
}
if (width)
- strm.Printf("{%*" PRIu64 "}", width, (uint64_t)ref_count);
+ strm.Printf("{%c %*" PRIu64 "}", in_shared_cache, width, (uint64_t)ref_count);
else
- strm.Printf("{%" PRIu64 "}", (uint64_t)ref_count);
+ strm.Printf("{%c %" PRIu64 "}", in_shared_cache, (uint64_t)ref_count);
} break;
case 's':
diff --git a/lldb/source/Commands/CommandObjectThread.cpp b/lldb/source/Commands/CommandObjectThread.cpp
index 9cfff059d6bf..cf4f8ccaa0c4 100644
--- a/lldb/source/Commands/CommandObjectThread.cpp
+++ b/lldb/source/Commands/CommandObjectThread.cpp
@@ -67,13 +67,18 @@ public:
if (option_arg.getAsInteger(0, m_count)) {
m_count = UINT32_MAX;
error.SetErrorStringWithFormat(
- "invalid integer value for option '%c'", short_option);
+ "invalid integer value for option '%c': %s", short_option,
+ option_arg.data());
}
+ // A count of 0 means all frames.
+ if (m_count == 0)
+ m_count = UINT32_MAX;
break;
case 's':
if (option_arg.getAsInteger(0, m_start))
error.SetErrorStringWithFormat(
- "invalid integer value for option '%c'", short_option);
+ "invalid integer value for option '%c': %s", short_option,
+ option_arg.data());
break;
case 'e': {
bool success;
@@ -81,7 +86,8 @@ public:
OptionArgParser::ToBoolean(option_arg, false, &success);
if (!success)
error.SetErrorStringWithFormat(
- "invalid boolean value for option '%c'", short_option);
+ "invalid boolean value for option '%c': %s", short_option,
+ option_arg.data());
} break;
default:
llvm_unreachable("Unimplemented option");
@@ -228,9 +234,9 @@ protected:
thread->GetIndexID());
return false;
}
- if (m_options.m_extended_backtrace) {
- if (!INTERRUPT_REQUESTED(GetDebugger(),
- "Interrupt skipped extended backtrace")) {
+ if (m_options.m_extended_backtrace) {
+ if (!INTERRUPT_REQUESTED(GetDebugger(),
+ "Interrupt skipped extended backtrace")) {
DoExtendedBacktrace(thread, result);
}
}
@@ -272,8 +278,9 @@ public:
bool avoid_no_debug =
OptionArgParser::ToBoolean(option_arg, true, &success);
if (!success)
- error.SetErrorStringWithFormat("invalid boolean value for option '%c'",
- short_option);
+ error.SetErrorStringWithFormat(
+ "invalid boolean value for option '%c': %s", short_option,
+ option_arg.data());
else {
m_step_in_avoid_no_debug = avoid_no_debug ? eLazyBoolYes : eLazyBoolNo;
}
@@ -284,8 +291,9 @@ public:
bool avoid_no_debug =
OptionArgParser::ToBoolean(option_arg, true, &success);
if (!success)
- error.SetErrorStringWithFormat("invalid boolean value for option '%c'",
- short_option);
+ error.SetErrorStringWithFormat(
+ "invalid boolean value for option '%c': %s", short_option,
+ option_arg.data());
else {
m_step_out_avoid_no_debug = avoid_no_debug ? eLazyBoolYes : eLazyBoolNo;
}
@@ -293,8 +301,9 @@ public:
case 'c':
if (option_arg.getAsInteger(0, m_step_count))
- error.SetErrorStringWithFormat("invalid step count '%s'",
- option_arg.str().c_str());
+ error.SetErrorStringWithFormat(
+ "invalid integer value for option '%c': %s", short_option,
+ option_arg.data());
break;
case 'm': {
diff --git a/lldb/source/Commands/Options.td b/lldb/source/Commands/Options.td
index ad4321d9a386..62bbfdc11783 100644
--- a/lldb/source/Commands/Options.td
+++ b/lldb/source/Commands/Options.td
@@ -805,7 +805,7 @@ let Command = "script add" in {
def script_add_function : Option<"function", "f">, Group<1>,
Arg<"PythonFunction">,
Desc<"Name of the Python function to bind to this command name.">;
- def script_add_class : Option<"class", "c">, Groups<[2,3]>,
+ def script_add_class : Option<"class", "c">, Groups<[2,3]>,
Arg<"PythonClass">,
Desc<"Name of the Python class to bind to this command name.">;
def script_add_help : Option<"help", "h">, Group<1>, Arg<"HelpText">,
@@ -816,7 +816,7 @@ let Command = "script add" in {
EnumArg<"ScriptedCommandSynchronicity">,
Desc<"Set the synchronicity of this command's executions with regard to "
"LLDB event system.">;
- def script_add_completion_type : Option<"completion-type", "C">,
+ def script_add_completion_type : Option<"completion-type", "C">,
Groups<[1,2]>, EnumArg<"CompletionType">,
Desc<"Specify which completion type the command should use - if none is "
"specified, the command won't use auto-completion.">;
@@ -936,8 +936,8 @@ let Command = "target modules list" in {
OptionalArg<"Width">, Desc<"Display the modification time with optional "
"width of the module.">;
def target_modules_list_ref_count : Option<"ref-count", "r">, Group<1>,
- OptionalArg<"Width">, Desc<"Display the reference count if the module is "
- "still in the shared module cache.">;
+ OptionalArg<"Width">, Desc<"Display whether the module is still in the "
+ "the shared module cache (Y/N), and its shared pointer use_count.">;
def target_modules_list_pointer : Option<"pointer", "p">, Group<1>,
OptionalArg<"None">, Desc<"Display the module pointer.">;
def target_modules_list_global : Option<"global", "g">, Group<1>,
@@ -1037,7 +1037,7 @@ let Command = "target stop hook add" in {
let Command = "thread backtrace" in {
def thread_backtrace_count : Option<"count", "c">, Group<1>, Arg<"Count">,
- Desc<"How many frames to display (-1 for all)">;
+ Desc<"How many frames to display (0 for all)">;
def thread_backtrace_start : Option<"start", "s">, Group<1>,
Arg<"FrameIndex">, Desc<"Frame in which to start the backtrace">;
def thread_backtrace_extended : Option<"extended", "e">, Group<1>,
diff --git a/lldb/source/Core/Debugger.cpp b/lldb/source/Core/Debugger.cpp
index c3e603dbae89..217474d1060a 100644
--- a/lldb/source/Core/Debugger.cpp
+++ b/lldb/source/Core/Debugger.cpp
@@ -15,6 +15,7 @@
#include "lldb/Core/ModuleList.h"
#include "lldb/Core/ModuleSpec.h"
#include "lldb/Core/PluginManager.h"
+#include "lldb/Core/Progress.h"
#include "lldb/Core/StreamAsynchronousIO.h"
#include "lldb/DataFormatters/DataVisualization.h"
#include "lldb/Expression/REPL.h"
@@ -1433,13 +1434,14 @@ void Debugger::SetDestroyCallback(
static void PrivateReportProgress(Debugger &debugger, uint64_t progress_id,
std::string title, std::string details,
uint64_t completed, uint64_t total,
- bool is_debugger_specific) {
+ bool is_debugger_specific,
+ uint32_t progress_broadcast_bit) {
// Only deliver progress events if we have any progress listeners.
- const uint32_t event_type = Debugger::eBroadcastBitProgress;
- if (!debugger.GetBroadcaster().EventTypeHasListeners(event_type))
+ if (!debugger.GetBroadcaster().EventTypeHasListeners(progress_broadcast_bit))
return;
+
EventSP event_sp(new Event(
- event_type,
+ progress_broadcast_bit,
new ProgressEventData(progress_id, std::move(title), std::move(details),
completed, total, is_debugger_specific)));
debugger.GetBroadcaster().BroadcastEvent(event_sp);
@@ -1448,7 +1450,8 @@ static void PrivateReportProgress(Debugger &debugger, uint64_t progress_id,
void Debugger::ReportProgress(uint64_t progress_id, std::string title,
std::string details, uint64_t completed,
uint64_t total,
- std::optional<lldb::user_id_t> debugger_id) {
+ std::optional<lldb::user_id_t> debugger_id,
+ uint32_t progress_category_bit) {
// Check if this progress is for a specific debugger.
if (debugger_id) {
// It is debugger specific, grab it and deliver the event if the debugger
@@ -1457,7 +1460,8 @@ void Debugger::ReportProgress(uint64_t progress_id, std::string title,
if (debugger_sp)
PrivateReportProgress(*debugger_sp, progress_id, std::move(title),
std::move(details), completed, total,
- /*is_debugger_specific*/ true);
+ /*is_debugger_specific*/ true,
+ progress_category_bit);
return;
}
// The progress event is not debugger specific, iterate over all debuggers
@@ -1467,7 +1471,8 @@ void Debugger::ReportProgress(uint64_t progress_id, std::string title,
DebuggerList::iterator pos, end = g_debugger_list_ptr->end();
for (pos = g_debugger_list_ptr->begin(); pos != end; ++pos)
PrivateReportProgress(*(*pos), progress_id, title, details, completed,
- total, /*is_debugger_specific*/ false);
+ total, /*is_debugger_specific*/ false,
+ progress_category_bit);
}
}
diff --git a/lldb/source/Core/Progress.cpp b/lldb/source/Core/Progress.cpp
index 9e8deb1ad4e7..9dcd7cf75ae0 100644
--- a/lldb/source/Core/Progress.cpp
+++ b/lldb/source/Core/Progress.cpp
@@ -11,6 +11,7 @@
#include "lldb/Core/Debugger.h"
#include "lldb/Utility/StreamString.h"
+#include <cstdint>
#include <mutex>
#include <optional>
@@ -22,15 +23,19 @@ std::atomic<uint64_t> Progress::g_id(0);
Progress::Progress(std::string title, std::string details,
std::optional<uint64_t> total,
lldb_private::Debugger *debugger)
- : m_title(title), m_details(details), m_id(++g_id), m_completed(0),
- m_total(Progress::kNonDeterministicTotal) {
+ : m_details(details), m_completed(0),
+ m_total(Progress::kNonDeterministicTotal),
+ m_progress_data{title, ++g_id,
+ /*m_progress_data.debugger_id=*/std::nullopt} {
if (total)
m_total = *total;
if (debugger)
- m_debugger_id = debugger->GetID();
+ m_progress_data.debugger_id = debugger->GetID();
+
std::lock_guard<std::mutex> guard(m_mutex);
ReportProgress();
+ ProgressManager::Instance().Increment(m_progress_data);
}
Progress::~Progress() {
@@ -40,6 +45,7 @@ Progress::~Progress() {
if (!m_completed)
m_completed = m_total;
ReportProgress();
+ ProgressManager::Instance().Decrement(m_progress_data);
}
void Progress::Increment(uint64_t amount,
@@ -49,7 +55,7 @@ void Progress::Increment(uint64_t amount,
if (updated_detail)
m_details = std::move(updated_detail.value());
// Watch out for unsigned overflow and make sure we don't increment too
- // much and exceed m_total.
+ // much and exceed the total.
if (m_total && (amount > (m_total - m_completed)))
m_completed = m_total;
else
@@ -63,8 +69,9 @@ void Progress::ReportProgress() {
// Make sure we only send one notification that indicates the progress is
// complete
m_complete = m_completed == m_total;
- Debugger::ReportProgress(m_id, m_title, m_details, m_completed, m_total,
- m_debugger_id);
+ Debugger::ReportProgress(m_progress_data.progress_id, m_progress_data.title,
+ m_details, m_completed, m_total,
+ m_progress_data.debugger_id);
}
}
@@ -82,20 +89,41 @@ ProgressManager &ProgressManager::Instance() {
return *g_progress_manager;
}
-void ProgressManager::Increment(std::string title) {
+void ProgressManager::Increment(const Progress::ProgressData &progress_data) {
std::lock_guard<std::mutex> lock(m_progress_map_mutex);
- m_progress_category_map[title]++;
+ // If the current category exists in the map then it is not an initial report,
+ // therefore don't broadcast to the category bit. Also, store the current
+ // progress data in the map so that we have a note of the ID used for the
+ // initial progress report.
+ if (!m_progress_category_map.contains(progress_data.title)) {
+ m_progress_category_map[progress_data.title].second = progress_data;
+ ReportProgress(progress_data);
+ }
+ m_progress_category_map[progress_data.title].first++;
}
-void ProgressManager::Decrement(std::string title) {
+void ProgressManager::Decrement(const Progress::ProgressData &progress_data) {
std::lock_guard<std::mutex> lock(m_progress_map_mutex);
- auto pos = m_progress_category_map.find(title);
+ auto pos = m_progress_category_map.find(progress_data.title);
if (pos == m_progress_category_map.end())
return;
- if (pos->second <= 1)
- m_progress_category_map.erase(title);
- else
- --pos->second;
+ if (pos->second.first <= 1) {
+ ReportProgress(pos->second.second);
+ m_progress_category_map.erase(progress_data.title);
+ } else {
+ --pos->second.first;
+ }
+}
+
+void ProgressManager::ReportProgress(
+ const Progress::ProgressData &progress_data) {
+ // The category bit only keeps track of when progress report categories have
+ // started and ended, so clear the details and reset other fields when
+ // broadcasting to it since that bit doesn't need that information.
+ Debugger::ReportProgress(
+ progress_data.progress_id, progress_data.title, "",
+ Progress::kNonDeterministicTotal, Progress::kNonDeterministicTotal,
+ progress_data.debugger_id, Debugger::eBroadcastBitProgressCategory);
}
diff --git a/lldb/source/Host/common/Editline.cpp b/lldb/source/Host/common/Editline.cpp
index ce707e530d00..e66271e8a6ee 100644
--- a/lldb/source/Host/common/Editline.cpp
+++ b/lldb/source/Host/common/Editline.cpp
@@ -1029,8 +1029,11 @@ unsigned char Editline::TabCommand(int ch) {
case CompletionMode::Normal: {
std::string to_add = completion.GetCompletion();
// Terminate the current argument with a quote if it started with a quote.
- if (!request.GetParsedLine().empty() && request.GetParsedArg().IsQuoted())
+ Args &parsedLine = request.GetParsedLine();
+ if (!parsedLine.empty() && request.GetCursorIndex() < parsedLine.size() &&
+ request.GetParsedArg().IsQuoted()) {
to_add.push_back(request.GetParsedArg().GetQuoteChar());
+ }
to_add.push_back(' ');
el_deletestr(m_editline, request.GetCursorArgumentPrefix().size());
el_insertstr(m_editline, to_add.c_str());
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
index 62a30c14912b..30b50df79da9 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
@@ -10,9 +10,11 @@
#include "lldb/Utility/LLDBAssert.h"
#include "lldb/Utility/LLDBLog.h"
#include "lldb/Utility/Log.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Sema.h"
#include "llvm/Support/raw_ostream.h"
@@ -26,6 +28,7 @@
#include <memory>
#include <optional>
+#include <type_traits>
using namespace lldb_private;
using namespace clang;
@@ -517,6 +520,236 @@ bool ClangASTImporter::CompleteType(const CompilerType &compiler_type) {
return false;
}
+/// Copy layout information from \ref source_map to the \ref destination_map.
+///
+/// In the process of copying over layout info, we may need to import
+/// decls from the \ref source_map. This function will use the supplied
+/// \ref importer to import the necessary decls into \ref dest_ctx.
+///
+/// \param[in,out] dest_ctx Destination ASTContext into which we import
+/// decls from the \ref source_map.
+/// \param[out] destination_map A map from decls in \ref dest_ctx to an
+/// integral offest, which will be copies
+/// of the decl/offest pairs in \ref source_map
+/// if successful.
+/// \param[in] source_map A map from decls to integral offests. These will
+/// be copied into \ref destination_map.
+/// \param[in,out] importer Used to import decls into \ref dest_ctx.
+///
+/// \returns On success, will return 'true' and the offsets in \ref
+/// destination_map
+/// are usable copies of \ref source_map.
+template <class D, class O>
+static bool ImportOffsetMap(clang::ASTContext *dest_ctx,
+ llvm::DenseMap<const D *, O> &destination_map,
+ llvm::DenseMap<const D *, O> &source_map,
+ ClangASTImporter &importer) {
+ // When importing fields into a new record, clang has a hard requirement that
+ // fields be imported in field offset order. Since they are stored in a
+ // DenseMap with a pointer as the key type, this means we cannot simply
+ // iterate over the map, as the order will be non-deterministic. Instead we
+ // have to sort by the offset and then insert in sorted order.
+ typedef llvm::DenseMap<const D *, O> MapType;
+ typedef typename MapType::value_type PairType;
+ std::vector<PairType> sorted_items;
+ sorted_items.reserve(source_map.size());
+ sorted_items.assign(source_map.begin(), source_map.end());
+ llvm::sort(sorted_items, llvm::less_second());
+
+ for (const auto &item : sorted_items) {
+ DeclFromUser<D> user_decl(const_cast<D *>(item.first));
+ DeclFromParser<D> parser_decl(user_decl.Import(dest_ctx, importer));
+ if (parser_decl.IsInvalid())
+ return false;
+ destination_map.insert(
+ std::pair<const D *, O>(parser_decl.decl, item.second));
+ }
+
+ return true;
+}
+
+/// Given a CXXRecordDecl, will calculate and populate \ref base_offsets
+/// with the integral offsets of any of its (possibly virtual) base classes.
+///
+/// \param[in] record_layout ASTRecordLayout of \ref record.
+/// \param[in] record The record that we're calculating the base layouts of.
+/// \param[out] base_offsets Map of base-class decl to integral offset which
+/// this function will fill in.
+///
+/// \returns On success, will return 'true' and the offsets in \ref base_offsets
+/// are usable.
+template <bool IsVirtual>
+bool ExtractBaseOffsets(const ASTRecordLayout &record_layout,
+ DeclFromUser<const CXXRecordDecl> &record,
+ llvm::DenseMap<const clang::CXXRecordDecl *,
+ clang::CharUnits> &base_offsets) {
+ for (CXXRecordDecl::base_class_const_iterator
+ bi = (IsVirtual ? record->vbases_begin() : record->bases_begin()),
+ be = (IsVirtual ? record->vbases_end() : record->bases_end());
+ bi != be; ++bi) {
+ if (!IsVirtual && bi->isVirtual())
+ continue;
+
+ const clang::Type *origin_base_type = bi->getType().getTypePtr();
+ const clang::RecordType *origin_base_record_type =
+ origin_base_type->getAs<RecordType>();
+
+ if (!origin_base_record_type)
+ return false;
+
+ DeclFromUser<RecordDecl> origin_base_record(
+ origin_base_record_type->getDecl());
+
+ if (origin_base_record.IsInvalid())
+ return false;
+
+ DeclFromUser<CXXRecordDecl> origin_base_cxx_record(
+ DynCast<CXXRecordDecl>(origin_base_record));
+
+ if (origin_base_cxx_record.IsInvalid())
+ return false;
+
+ CharUnits base_offset;
+
+ if (IsVirtual)
+ base_offset =
+ record_layout.getVBaseClassOffset(origin_base_cxx_record.decl);
+ else
+ base_offset =
+ record_layout.getBaseClassOffset(origin_base_cxx_record.decl);
+
+ base_offsets.insert(std::pair<const CXXRecordDecl *, CharUnits>(
+ origin_base_cxx_record.decl, base_offset));
+ }
+
+ return true;
+}
+
+bool ClangASTImporter::importRecordLayoutFromOrigin(
+ const RecordDecl *record, uint64_t &size, uint64_t &alignment,
+ llvm::DenseMap<const clang::FieldDecl *, uint64_t> &field_offsets,
+ llvm::DenseMap<const clang::CXXRecordDecl *, clang::CharUnits>
+ &base_offsets,
+ llvm::DenseMap<const clang::CXXRecordDecl *, clang::CharUnits>
+ &vbase_offsets) {
+
+ Log *log = GetLog(LLDBLog::Expressions);
+
+ clang::ASTContext &dest_ctx = record->getASTContext();
+ LLDB_LOG(log,
+ "LayoutRecordType on (ASTContext*){0} '{1}' for (RecordDecl*)"
+ "{2} [name = '{3}']",
+ &dest_ctx,
+ TypeSystemClang::GetASTContext(&dest_ctx)->getDisplayName(), record,
+ record->getName());
+
+ DeclFromParser<const RecordDecl> parser_record(record);
+ DeclFromUser<const RecordDecl> origin_record(parser_record.GetOrigin(*this));
+
+ if (origin_record.IsInvalid())
+ return false;
+
+ std::remove_reference_t<decltype(field_offsets)> origin_field_offsets;
+ std::remove_reference_t<decltype(base_offsets)> origin_base_offsets;
+ std::remove_reference_t<decltype(vbase_offsets)> origin_virtual_base_offsets;
+
+ TypeSystemClang::GetCompleteDecl(
+ &origin_record->getASTContext(),
+ const_cast<RecordDecl *>(origin_record.decl));
+
+ clang::RecordDecl *definition = origin_record.decl->getDefinition();
+ if (!definition || !definition->isCompleteDefinition())
+ return false;
+
+ const ASTRecordLayout &record_layout(
+ origin_record->getASTContext().getASTRecordLayout(origin_record.decl));
+
+ int field_idx = 0, field_count = record_layout.getFieldCount();
+
+ for (RecordDecl::field_iterator fi = origin_record->field_begin(),
+ fe = origin_record->field_end();
+ fi != fe; ++fi) {
+ if (field_idx >= field_count)
+ return false; // Layout didn't go well. Bail out.
+
+ uint64_t field_offset = record_layout.getFieldOffset(field_idx);
+
+ origin_field_offsets.insert(
+ std::pair<const FieldDecl *, uint64_t>(*fi, field_offset));
+
+ field_idx++;
+ }
+
+ DeclFromUser<const CXXRecordDecl> origin_cxx_record(
+ DynCast<const CXXRecordDecl>(origin_record));
+
+ if (origin_cxx_record.IsValid()) {
+ if (!ExtractBaseOffsets<false>(record_layout, origin_cxx_record,
+ origin_base_offsets) ||
+ !ExtractBaseOffsets<true>(record_layout, origin_cxx_record,
+ origin_virtual_base_offsets))
+ return false;
+ }
+
+ if (!ImportOffsetMap(&dest_ctx, field_offsets, origin_field_offsets, *this) ||
+ !ImportOffsetMap(&dest_ctx, base_offsets, origin_base_offsets, *this) ||
+ !ImportOffsetMap(&dest_ctx, vbase_offsets, origin_virtual_base_offsets,
+ *this))
+ return false;
+
+ size = record_layout.getSize().getQuantity() * dest_ctx.getCharWidth();
+ alignment =
+ record_layout.getAlignment().getQuantity() * dest_ctx.getCharWidth();
+
+ if (log) {
+ LLDB_LOG(log, "LRT returned:");
+ LLDB_LOG(log, "LRT Original = (RecordDecl*){0}",
+ static_cast<const void *>(origin_record.decl));
+ LLDB_LOG(log, "LRT Size = {0}", size);
+ LLDB_LOG(log, "LRT Alignment = {0}", alignment);
+ LLDB_LOG(log, "LRT Fields:");
+ for (RecordDecl::field_iterator fi = record->field_begin(),
+ fe = record->field_end();
+ fi != fe; ++fi) {
+ LLDB_LOG(log,
+ "LRT (FieldDecl*){0}, Name = '{1}', Type = '{2}', Offset = "
+ "{3} bits",
+ *fi, fi->getName(), fi->getType().getAsString(),
+ field_offsets[*fi]);
+ }
+ DeclFromParser<const CXXRecordDecl> parser_cxx_record =
+ DynCast<const CXXRecordDecl>(parser_record);
+ if (parser_cxx_record.IsValid()) {
+ LLDB_LOG(log, "LRT Bases:");
+ for (CXXRecordDecl::base_class_const_iterator
+ bi = parser_cxx_record->bases_begin(),
+ be = parser_cxx_record->bases_end();
+ bi != be; ++bi) {
+ bool is_virtual = bi->isVirtual();
+
+ QualType base_type = bi->getType();
+ const RecordType *base_record_type = base_type->getAs<RecordType>();
+ DeclFromParser<RecordDecl> base_record(base_record_type->getDecl());
+ DeclFromParser<CXXRecordDecl> base_cxx_record =
+ DynCast<CXXRecordDecl>(base_record);
+
+ LLDB_LOG(log,
+ "LRT {0}(CXXRecordDecl*){1}, Name = '{2}', Offset = "
+ "{3} chars",
+ (is_virtual ? "Virtual " : ""), base_cxx_record.decl,
+ base_cxx_record.decl->getName(),
+ (is_virtual
+ ? vbase_offsets[base_cxx_record.decl].getQuantity()
+ : base_offsets[base_cxx_record.decl].getQuantity()));
+ }
+ } else {
+ LLDB_LOG(log, "LRD Not a CXXRecord, so no bases");
+ }
+ }
+
+ return true;
+}
+
bool ClangASTImporter::LayoutRecordType(
const clang::RecordDecl *record_decl, uint64_t &bit_size,
uint64_t &alignment,
@@ -527,7 +760,6 @@ bool ClangASTImporter::LayoutRecordType(
&vbase_offsets) {
RecordDeclToLayoutMap::iterator pos =
m_record_decl_to_layout_map.find(record_decl);
- bool success = false;
base_offsets.clear();
vbase_offsets.clear();
if (pos != m_record_decl_to_layout_map.end()) {
@@ -537,13 +769,23 @@ bool ClangASTImporter::LayoutRecordType(
base_offsets.swap(pos->second.base_offsets);
vbase_offsets.swap(pos->second.vbase_offsets);
m_record_decl_to_layout_map.erase(pos);
- success = true;
- } else {
- bit_size = 0;
- alignment = 0;
- field_offsets.clear();
+ return true;
}
- return success;
+
+ // It's possible that we calculated the layout in a different
+ // ClangASTImporter instance. Try to import such layout if
+ // our decl has an origin.
+ if (auto origin = GetDeclOrigin(record_decl); origin.Valid())
+ if (importRecordLayoutFromOrigin(record_decl, bit_size, alignment,
+ field_offsets, base_offsets,
+ vbase_offsets))
+ return true;
+
+ bit_size = 0;
+ alignment = 0;
+ field_offsets.clear();
+
+ return false;
}
void ClangASTImporter::SetRecordLayout(clang::RecordDecl *decl,
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.h b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.h
index e565a96b217f..bc962e544d2f 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.h
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.h
@@ -14,6 +14,7 @@
#include <set>
#include <vector>
+#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTImporter.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
@@ -127,6 +128,27 @@ public:
llvm::DenseMap<const clang::CXXRecordDecl *, clang::CharUnits>
&vbase_offsets);
+ /// If \ref record has a valid origin, this function copies that
+ /// origin's layout into this ClangASTImporter instance.
+ ///
+ /// \param[in] record The decl whose layout we're calculating.
+ /// \param[out] size Size of \ref record in bytes.
+ /// \param[out] alignment Alignment of \ref record in bytes.
+ /// \param[out] field_offsets Offsets of fields of \ref record.
+ /// \param[out] base_offsets Offsets of base classes of \ref record.
+ /// \param[out] vbase_offsets Offsets of virtual base classes of \ref record.
+ ///
+ /// \returns Returns 'false' if no valid origin was found for \ref record or
+ /// this function failed to import the layout from the origin. Otherwise,
+ /// returns 'true' and the offsets/size/alignment are valid for use.
+ bool importRecordLayoutFromOrigin(
+ const clang::RecordDecl *record, uint64_t &size, uint64_t &alignment,
+ llvm::DenseMap<const clang::FieldDecl *, uint64_t> &field_offsets,
+ llvm::DenseMap<const clang::CXXRecordDecl *, clang::CharUnits>
+ &base_offsets,
+ llvm::DenseMap<const clang::CXXRecordDecl *, clang::CharUnits>
+ &vbase_offsets);
+
/// Returns true iff the given type was copied from another TypeSystemClang
/// and the original type in this other TypeSystemClang might contain
/// additional information (e.g., the definition of a 'class' type) that could
@@ -456,6 +478,58 @@ public:
RecordDeclToLayoutMap m_record_decl_to_layout_map;
};
+template <class D> class TaggedASTDecl {
+public:
+ TaggedASTDecl() : decl(nullptr) {}
+ TaggedASTDecl(D *_decl) : decl(_decl) {}
+ bool IsValid() const { return (decl != nullptr); }
+ bool IsInvalid() const { return !IsValid(); }
+ D *operator->() const { return decl; }
+ D *decl;
+};
+
+template <class D2, template <class D> class TD, class D1>
+TD<D2> DynCast(TD<D1> source) {
+ return TD<D2>(llvm::dyn_cast<D2>(source.decl));
+}
+
+template <class D = clang::Decl> class DeclFromParser;
+template <class D = clang::Decl> class DeclFromUser;
+
+template <class D> class DeclFromParser : public TaggedASTDecl<D> {
+public:
+ DeclFromParser() : TaggedASTDecl<D>() {}
+ DeclFromParser(D *_decl) : TaggedASTDecl<D>(_decl) {}
+
+ DeclFromUser<D> GetOrigin(ClangASTImporter &importer);
+};
+
+template <class D> class DeclFromUser : public TaggedASTDecl<D> {
+public:
+ DeclFromUser() : TaggedASTDecl<D>() {}
+ DeclFromUser(D *_decl) : TaggedASTDecl<D>(_decl) {}
+
+ DeclFromParser<D> Import(clang::ASTContext *dest_ctx,
+ ClangASTImporter &importer);
+};
+
+template <class D>
+DeclFromUser<D> DeclFromParser<D>::GetOrigin(ClangASTImporter &importer) {
+ ClangASTImporter::DeclOrigin origin = importer.GetDeclOrigin(this->decl);
+ if (!origin.Valid())
+ return DeclFromUser<D>();
+ return DeclFromUser<D>(llvm::dyn_cast<D>(origin.decl));
+}
+
+template <class D>
+DeclFromParser<D> DeclFromUser<D>::Import(clang::ASTContext *dest_ctx,
+ ClangASTImporter &importer) {
+ DeclFromParser<> parser_generic_decl(importer.CopyDecl(dest_ctx, this->decl));
+ if (parser_generic_decl.IsInvalid())
+ return DeclFromParser<D>();
+ return DeclFromParser<D>(llvm::dyn_cast<D>(parser_generic_decl.decl));
+}
+
} // namespace lldb_private
#endif // LLDB_SOURCE_PLUGINS_EXPRESSIONPARSER_CLANG_CLANGASTIMPORTER_H
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp
index 79dd306f7627..a95a9e9f01e3 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp
@@ -21,7 +21,6 @@
#include "lldb/Utility/LLDBLog.h"
#include "lldb/Utility/Log.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/RecordLayout.h"
#include "clang/Basic/SourceManager.h"
#include "Plugins/ExpressionParser/Clang/ClangUtil.h"
@@ -705,56 +704,6 @@ void ClangASTSource::FillNamespaceMap(
}
}
-template <class D> class TaggedASTDecl {
-public:
- TaggedASTDecl() : decl(nullptr) {}
- TaggedASTDecl(D *_decl) : decl(_decl) {}
- bool IsValid() const { return (decl != nullptr); }
- bool IsInvalid() const { return !IsValid(); }
- D *operator->() const { return decl; }
- D *decl;
-};
-
-template <class D2, template <class D> class TD, class D1>
-TD<D2> DynCast(TD<D1> source) {
- return TD<D2>(dyn_cast<D2>(source.decl));
-}
-
-template <class D = Decl> class DeclFromParser;
-template <class D = Decl> class DeclFromUser;
-
-template <class D> class DeclFromParser : public TaggedASTDecl<D> {
-public:
- DeclFromParser() : TaggedASTDecl<D>() {}
- DeclFromParser(D *_decl) : TaggedASTDecl<D>(_decl) {}
-
- DeclFromUser<D> GetOrigin(ClangASTSource &source);
-};
-
-template <class D> class DeclFromUser : public TaggedASTDecl<D> {
-public:
- DeclFromUser() : TaggedASTDecl<D>() {}
- DeclFromUser(D *_decl) : TaggedASTDecl<D>(_decl) {}
-
- DeclFromParser<D> Import(ClangASTSource &source);
-};
-
-template <class D>
-DeclFromUser<D> DeclFromParser<D>::GetOrigin(ClangASTSource &source) {
- ClangASTImporter::DeclOrigin origin = source.GetDeclOrigin(this->decl);
- if (!origin.Valid())
- return DeclFromUser<D>();
- return DeclFromUser<D>(dyn_cast<D>(origin.decl));
-}
-
-template <class D>
-DeclFromParser<D> DeclFromUser<D>::Import(ClangASTSource &source) {
- DeclFromParser<> parser_generic_decl(source.CopyDecl(this->decl));
- if (parser_generic_decl.IsInvalid())
- return DeclFromParser<D>();
- return DeclFromParser<D>(dyn_cast<D>(parser_generic_decl.decl));
-}
-
bool ClangASTSource::FindObjCMethodDeclsWithOrigin(
NameSearchContext &context, ObjCInterfaceDecl *original_interface_decl,
const char *log_info) {
@@ -1188,8 +1137,8 @@ void ClangASTSource::FindObjCMethodDecls(NameSearchContext &context) {
} while (false);
}
-static bool FindObjCPropertyAndIvarDeclsWithOrigin(
- NameSearchContext &context, ClangASTSource &source,
+bool ClangASTSource::FindObjCPropertyAndIvarDeclsWithOrigin(
+ NameSearchContext &context,
DeclFromUser<const ObjCInterfaceDecl> &origin_iface_decl) {
Log *log = GetLog(LLDBLog::Expressions);
@@ -1209,7 +1158,7 @@ static bool FindObjCPropertyAndIvarDeclsWithOrigin(
if (origin_property_decl.IsValid()) {
DeclFromParser<ObjCPropertyDecl> parser_property_decl(
- origin_property_decl.Import(source));
+ origin_property_decl.Import(m_ast_context, *m_ast_importer_sp));
if (parser_property_decl.IsValid()) {
LLDB_LOG(log, " CAS::FOPD found\n{0}",
ClangUtil::DumpDecl(parser_property_decl.decl));
@@ -1224,7 +1173,7 @@ static bool FindObjCPropertyAndIvarDeclsWithOrigin(
if (origin_ivar_decl.IsValid()) {
DeclFromParser<ObjCIvarDecl> parser_ivar_decl(
- origin_ivar_decl.Import(source));
+ origin_ivar_decl.Import(m_ast_context, *m_ast_importer_sp));
if (parser_ivar_decl.IsValid()) {
LLDB_LOG(log, " CAS::FOPD found\n{0}",
ClangUtil::DumpDecl(parser_ivar_decl.decl));
@@ -1243,7 +1192,7 @@ void ClangASTSource::FindObjCPropertyAndIvarDecls(NameSearchContext &context) {
DeclFromParser<const ObjCInterfaceDecl> parser_iface_decl(
cast<ObjCInterfaceDecl>(context.m_decl_context));
DeclFromUser<const ObjCInterfaceDecl> origin_iface_decl(
- parser_iface_decl.GetOrigin(*this));
+ parser_iface_decl.GetOrigin(*m_ast_importer_sp));
ConstString class_name(parser_iface_decl->getNameAsString().c_str());
@@ -1253,7 +1202,7 @@ void ClangASTSource::FindObjCPropertyAndIvarDecls(NameSearchContext &context) {
m_ast_context, m_clang_ast_context->getDisplayName(),
parser_iface_decl->getName(), context.m_decl_name.getAsString());
- if (FindObjCPropertyAndIvarDeclsWithOrigin(context, *this, origin_iface_decl))
+ if (FindObjCPropertyAndIvarDeclsWithOrigin(context, origin_iface_decl))
return;
LLDB_LOG(log,
@@ -1286,7 +1235,7 @@ void ClangASTSource::FindObjCPropertyAndIvarDecls(NameSearchContext &context) {
"(ObjCInterfaceDecl*){0}/(ASTContext*){1}...",
complete_iface_decl.decl, &complete_iface_decl->getASTContext());
- FindObjCPropertyAndIvarDeclsWithOrigin(context, *this, complete_iface_decl);
+ FindObjCPropertyAndIvarDeclsWithOrigin(context, complete_iface_decl);
return;
} while (false);
@@ -1320,7 +1269,7 @@ void ClangASTSource::FindObjCPropertyAndIvarDecls(NameSearchContext &context) {
interface_decl_from_modules.decl,
&interface_decl_from_modules->getASTContext());
- if (FindObjCPropertyAndIvarDeclsWithOrigin(context, *this,
+ if (FindObjCPropertyAndIvarDeclsWithOrigin(context,
interface_decl_from_modules))
return;
} while (false);
@@ -1364,7 +1313,7 @@ void ClangASTSource::FindObjCPropertyAndIvarDecls(NameSearchContext &context) {
interface_decl_from_runtime.decl,
&interface_decl_from_runtime->getASTContext());
- if (FindObjCPropertyAndIvarDeclsWithOrigin(context, *this,
+ if (FindObjCPropertyAndIvarDeclsWithOrigin(context,
interface_decl_from_runtime))
return;
} while (false);
@@ -1395,205 +1344,16 @@ void ClangASTSource::LookupInNamespace(NameSearchContext &context) {
}
}
-typedef llvm::DenseMap<const FieldDecl *, uint64_t> FieldOffsetMap;
-typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetMap;
-
-template <class D, class O>
-static bool ImportOffsetMap(llvm::DenseMap<const D *, O> &destination_map,
- llvm::DenseMap<const D *, O> &source_map,
- ClangASTSource &source) {
- // When importing fields into a new record, clang has a hard requirement that
- // fields be imported in field offset order. Since they are stored in a
- // DenseMap with a pointer as the key type, this means we cannot simply
- // iterate over the map, as the order will be non-deterministic. Instead we
- // have to sort by the offset and then insert in sorted order.
- typedef llvm::DenseMap<const D *, O> MapType;
- typedef typename MapType::value_type PairType;
- std::vector<PairType> sorted_items;
- sorted_items.reserve(source_map.size());
- sorted_items.assign(source_map.begin(), source_map.end());
- llvm::sort(sorted_items, llvm::less_second());
-
- for (const auto &item : sorted_items) {
- DeclFromUser<D> user_decl(const_cast<D *>(item.first));
- DeclFromParser<D> parser_decl(user_decl.Import(source));
- if (parser_decl.IsInvalid())
- return false;
- destination_map.insert(
- std::pair<const D *, O>(parser_decl.decl, item.second));
- }
-
- return true;
-}
-
-template <bool IsVirtual>
-bool ExtractBaseOffsets(const ASTRecordLayout &record_layout,
- DeclFromUser<const CXXRecordDecl> &record,
- BaseOffsetMap &base_offsets) {
- for (CXXRecordDecl::base_class_const_iterator
- bi = (IsVirtual ? record->vbases_begin() : record->bases_begin()),
- be = (IsVirtual ? record->vbases_end() : record->bases_end());
- bi != be; ++bi) {
- if (!IsVirtual && bi->isVirtual())
- continue;
-
- const clang::Type *origin_base_type = bi->getType().getTypePtr();
- const clang::RecordType *origin_base_record_type =
- origin_base_type->getAs<RecordType>();
-
- if (!origin_base_record_type)
- return false;
-
- DeclFromUser<RecordDecl> origin_base_record(
- origin_base_record_type->getDecl());
-
- if (origin_base_record.IsInvalid())
- return false;
-
- DeclFromUser<CXXRecordDecl> origin_base_cxx_record(
- DynCast<CXXRecordDecl>(origin_base_record));
-
- if (origin_base_cxx_record.IsInvalid())
- return false;
-
- CharUnits base_offset;
-
- if (IsVirtual)
- base_offset =
- record_layout.getVBaseClassOffset(origin_base_cxx_record.decl);
- else
- base_offset =
- record_layout.getBaseClassOffset(origin_base_cxx_record.decl);
-
- base_offsets.insert(std::pair<const CXXRecordDecl *, CharUnits>(
- origin_base_cxx_record.decl, base_offset));
- }
-
- return true;
-}
-
-bool ClangASTSource::layoutRecordType(const RecordDecl *record, uint64_t &size,
- uint64_t &alignment,
- FieldOffsetMap &field_offsets,
- BaseOffsetMap &base_offsets,
- BaseOffsetMap &virtual_base_offsets) {
-
- Log *log = GetLog(LLDBLog::Expressions);
-
- LLDB_LOG(log,
- "LayoutRecordType on (ASTContext*){0} '{1}' for (RecordDecl*)"
- "{2} [name = '{3}']",
- m_ast_context, m_clang_ast_context->getDisplayName(), record,
- record->getName());
-
- DeclFromParser<const RecordDecl> parser_record(record);
- DeclFromUser<const RecordDecl> origin_record(
- parser_record.GetOrigin(*this));
-
- if (origin_record.IsInvalid())
- return false;
-
- FieldOffsetMap origin_field_offsets;
- BaseOffsetMap origin_base_offsets;
- BaseOffsetMap origin_virtual_base_offsets;
-
- TypeSystemClang::GetCompleteDecl(
- &origin_record->getASTContext(),
- const_cast<RecordDecl *>(origin_record.decl));
-
- clang::RecordDecl *definition = origin_record.decl->getDefinition();
- if (!definition || !definition->isCompleteDefinition())
- return false;
-
- const ASTRecordLayout &record_layout(
- origin_record->getASTContext().getASTRecordLayout(origin_record.decl));
-
- int field_idx = 0, field_count = record_layout.getFieldCount();
-
- for (RecordDecl::field_iterator fi = origin_record->field_begin(),
- fe = origin_record->field_end();
- fi != fe; ++fi) {
- if (field_idx >= field_count)
- return false; // Layout didn't go well. Bail out.
-
- uint64_t field_offset = record_layout.getFieldOffset(field_idx);
-
- origin_field_offsets.insert(
- std::pair<const FieldDecl *, uint64_t>(*fi, field_offset));
-
- field_idx++;
- }
-
- lldbassert(&record->getASTContext() == m_ast_context);
-
- DeclFromUser<const CXXRecordDecl> origin_cxx_record(
- DynCast<const CXXRecordDecl>(origin_record));
-
- if (origin_cxx_record.IsValid()) {
- if (!ExtractBaseOffsets<false>(record_layout, origin_cxx_record,
- origin_base_offsets) ||
- !ExtractBaseOffsets<true>(record_layout, origin_cxx_record,
- origin_virtual_base_offsets))
- return false;
- }
-
- if (!ImportOffsetMap(field_offsets, origin_field_offsets, *this) ||
- !ImportOffsetMap(base_offsets, origin_base_offsets, *this) ||
- !ImportOffsetMap(virtual_base_offsets, origin_virtual_base_offsets,
- *this))
- return false;
-
- size = record_layout.getSize().getQuantity() * m_ast_context->getCharWidth();
- alignment = record_layout.getAlignment().getQuantity() *
- m_ast_context->getCharWidth();
-
- if (log) {
- LLDB_LOG(log, "LRT returned:");
- LLDB_LOG(log, "LRT Original = (RecordDecl*){0}",
- static_cast<const void *>(origin_record.decl));
- LLDB_LOG(log, "LRT Size = {0}", size);
- LLDB_LOG(log, "LRT Alignment = {0}", alignment);
- LLDB_LOG(log, "LRT Fields:");
- for (RecordDecl::field_iterator fi = record->field_begin(),
- fe = record->field_end();
- fi != fe; ++fi) {
- LLDB_LOG(log,
- "LRT (FieldDecl*){0}, Name = '{1}', Type = '{2}', Offset = "
- "{3} bits",
- *fi, fi->getName(), fi->getType().getAsString(),
- field_offsets[*fi]);
- }
- DeclFromParser<const CXXRecordDecl> parser_cxx_record =
- DynCast<const CXXRecordDecl>(parser_record);
- if (parser_cxx_record.IsValid()) {
- LLDB_LOG(log, "LRT Bases:");
- for (CXXRecordDecl::base_class_const_iterator
- bi = parser_cxx_record->bases_begin(),
- be = parser_cxx_record->bases_end();
- bi != be; ++bi) {
- bool is_virtual = bi->isVirtual();
-
- QualType base_type = bi->getType();
- const RecordType *base_record_type = base_type->getAs<RecordType>();
- DeclFromParser<RecordDecl> base_record(base_record_type->getDecl());
- DeclFromParser<CXXRecordDecl> base_cxx_record =
- DynCast<CXXRecordDecl>(base_record);
-
- LLDB_LOG(log,
- "LRT {0}(CXXRecordDecl*){1}, Name = '{2}', Offset = "
- "{3} chars",
- (is_virtual ? "Virtual " : ""), base_cxx_record.decl,
- base_cxx_record.decl->getName(),
- (is_virtual
- ? virtual_base_offsets[base_cxx_record.decl].getQuantity()
- : base_offsets[base_cxx_record.decl].getQuantity()));
- }
- } else {
- LLDB_LOG(log, "LRD Not a CXXRecord, so no bases");
- }
- }
-
- return true;
+bool ClangASTSource::layoutRecordType(
+ const RecordDecl *record, uint64_t &size, uint64_t &alignment,
+ llvm::DenseMap<const clang::FieldDecl *, uint64_t> &field_offsets,
+ llvm::DenseMap<const clang::CXXRecordDecl *, clang::CharUnits>
+ &base_offsets,
+ llvm::DenseMap<const clang::CXXRecordDecl *, clang::CharUnits>
+ &virtual_base_offsets) {
+ return m_ast_importer_sp->importRecordLayoutFromOrigin(
+ record, size, alignment, field_offsets, base_offsets,
+ virtual_base_offsets);
}
void ClangASTSource::CompleteNamespaceMap(
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.h b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.h
index f3fec3f944a1..f34e4661a81c 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.h
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.h
@@ -352,6 +352,11 @@ public:
/// ExternalASTSource.
TypeSystemClang *GetTypeSystem() const { return m_clang_ast_context; }
+private:
+ bool FindObjCPropertyAndIvarDeclsWithOrigin(
+ NameSearchContext &context,
+ DeclFromUser<const clang::ObjCInterfaceDecl> &origin_iface_decl);
+
protected:
bool FindObjCMethodDeclsWithOrigin(
NameSearchContext &context,
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
index a1ad3f569ec7..ce52f3595247 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
@@ -1417,7 +1417,7 @@ bool ScriptInterpreterPythonImpl::GenerateScriptAliasFunction(
sstr.Printf("def %s (debugger, args, exe_ctx, result, internal_dict):",
auto_generated_function_name.c_str());
- if (!GenerateFunction(sstr.GetData(), user_input, /*is_callback=*/true)
+ if (!GenerateFunction(sstr.GetData(), user_input, /*is_callback=*/false)
.Success())
return false;
diff --git a/lldb/source/Symbol/Symtab.cpp b/lldb/source/Symbol/Symtab.cpp
index 564a3a94cfa2..b7837892d7e2 100644
--- a/lldb/source/Symbol/Symtab.cpp
+++ b/lldb/source/Symbol/Symtab.cpp
@@ -124,12 +124,8 @@ void Symtab::Dump(Stream *s, Target *target, SortOrder sort_order,
DumpSymbolHeader(s);
std::multimap<llvm::StringRef, const Symbol *> name_map;
- for (const_iterator pos = m_symbols.begin(), end = m_symbols.end();
- pos != end; ++pos) {
- const char *name = pos->GetName().AsCString();
- if (name && name[0])
- name_map.insert(std::make_pair(name, &(*pos)));
- }
+ for (const Symbol &symbol : m_symbols)
+ name_map.emplace(llvm::StringRef(symbol.GetName()), &symbol);
for (const auto &name_to_symbol : name_map) {
const Symbol *symbol = name_to_symbol.second;
@@ -138,6 +134,21 @@ void Symtab::Dump(Stream *s, Target *target, SortOrder sort_order,
}
} break;
+ case eSortOrderBySize: {
+ s->PutCString(" (sorted by size):\n");
+ DumpSymbolHeader(s);
+
+ std::multimap<size_t, const Symbol *, std::greater<size_t>> size_map;
+ for (const Symbol &symbol : m_symbols)
+ size_map.emplace(symbol.GetByteSize(), &symbol);
+
+ for (const auto &size_to_symbol : size_map) {
+ const Symbol *symbol = size_to_symbol.second;
+ s->Indent();
+ symbol->Dump(s, target, symbol - &m_symbols[0], name_preference);
+ }
+ } break;
+
case eSortOrderByAddress:
s->PutCString(" (sorted by address):\n");
DumpSymbolHeader(s);
diff --git a/lldb/source/Symbol/Variable.cpp b/lldb/source/Symbol/Variable.cpp
index 2bb2ff7db4b7..a33c3433d9e2 100644
--- a/lldb/source/Symbol/Variable.cpp
+++ b/lldb/source/Symbol/Variable.cpp
@@ -509,15 +509,17 @@ static void PrivateAutoCompleteMembers(
CompilerType member_compiler_type = compiler_type.GetFieldAtIndex(
i, member_name, nullptr, nullptr, nullptr);
- if (partial_member_name.empty() ||
- llvm::StringRef(member_name).starts_with(partial_member_name)) {
+ if (partial_member_name.empty()) {
+ request.AddCompletion((prefix_path + member_name).str());
+ } else if (llvm::StringRef(member_name)
+ .starts_with(partial_member_name)) {
if (member_name == partial_member_name) {
PrivateAutoComplete(
frame, partial_path,
prefix_path + member_name, // Anything that has been resolved
// already will be in here
member_compiler_type.GetCanonicalType(), request);
- } else {
+ } else if (partial_path.empty()) {
request.AddCompletion((prefix_path + member_name).str());
}
}
diff --git a/lldb/test/API/commands/command/script/TestCommandScript.py b/lldb/test/API/commands/command/script/TestCommandScript.py
index 850552032902..fdd5216a1c6c 100644
--- a/lldb/test/API/commands/command/script/TestCommandScript.py
+++ b/lldb/test/API/commands/command/script/TestCommandScript.py
@@ -216,3 +216,17 @@ class CmdPythonTestCase(TestBase):
# The result object will be replaced by an empty result object (in the
# "Started" state).
self.expect("script str(persistence.result_copy)", substrs=["Started"])
+
+ def test_interactive(self):
+ """
+ Test that we can add multiple lines interactively.
+ """
+ interp = self.dbg.GetCommandInterpreter()
+ cmd_file = self.getSourcePath("cmd_file.lldb")
+ result = lldb.SBCommandReturnObject()
+ interp.HandleCommand(f"command source {cmd_file}", result)
+ self.assertCommandReturn(result, "Sourcing the command should cause no errors.")
+ self.assertTrue(interp.UserCommandExists("my_cmd"), "Command defined.")
+ interp.HandleCommand("my_cmd", result)
+ self.assertCommandReturn(result, "Running the command succeeds")
+ self.assertIn("My Command Result", result.GetOutput(), "Command was correct")
diff --git a/lldb/test/API/commands/command/script/cmd_file.lldb b/lldb/test/API/commands/command/script/cmd_file.lldb
new file mode 100644
index 000000000000..1589a7cfe0b8
--- /dev/null
+++ b/lldb/test/API/commands/command/script/cmd_file.lldb
@@ -0,0 +1,4 @@
+command script add my_cmd
+result.PutCString("My Command Result")
+result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
+DONE
diff --git a/lldb/test/API/functionalities/completion/TestCompletion.py b/lldb/test/API/functionalities/completion/TestCompletion.py
index f71bc73928f0..0d6907e0c3d2 100644
--- a/lldb/test/API/functionalities/completion/TestCompletion.py
+++ b/lldb/test/API/functionalities/completion/TestCompletion.py
@@ -60,10 +60,12 @@ class CommandLineCompletionTestCase(TestBase):
def do_test_variable_completion(self, command):
self.complete_from_to(f"{command} fo", f"{command} fooo")
- self.complete_from_to(f"{command} fooo.", f"{command} fooo.")
+ self.complete_from_to(f"{command} fooo.", f"{command} fooo.t")
+ self.complete_from_to(f"{command} fooo.t.", f"{command} fooo.t.x")
self.complete_from_to(f"{command} fooo.dd", f"{command} fooo.dd")
- self.complete_from_to(f"{command} ptr_fooo->", f"{command} ptr_fooo->")
+ self.complete_from_to(f"{command} ptr_fooo->", f"{command} ptr_fooo->t")
+ self.complete_from_to(f"{command} ptr_fooo->t.", f"{command} ptr_fooo->t.x")
self.complete_from_to(f"{command} ptr_fooo->dd", f"{command} ptr_fooo->dd")
self.complete_from_to(f"{command} cont", f"{command} container")
diff --git a/lldb/test/API/functionalities/completion/main.cpp b/lldb/test/API/functionalities/completion/main.cpp
index 06ff5773e8a9..f925c1d5acf3 100644
--- a/lldb/test/API/functionalities/completion/main.cpp
+++ b/lldb/test/API/functionalities/completion/main.cpp
@@ -1,12 +1,17 @@
#include <iostream>
+class Baz {
+public:
+ int x;
+};
+
class Foo
{
public:
- int Bar(int x, int y)
- {
- return x + y;
- }
+ Baz t;
+ int temp;
+
+ int Bar(int x, int y) { return x + y; }
};
namespace { int Quux (void) { return 0; } }
diff --git a/lldb/test/API/lang/c/local_variables/TestLocalVariables.py b/lldb/test/API/lang/c/local_variables/TestLocalVariables.py
index cccb8cac013f..686636119314 100644
--- a/lldb/test/API/lang/c/local_variables/TestLocalVariables.py
+++ b/lldb/test/API/lang/c/local_variables/TestLocalVariables.py
@@ -19,7 +19,6 @@ class LocalVariablesTestCase(TestBase):
self.source = "main.c"
self.line = line_number(self.source, "// Set break point at this line.")
- @skipIfWindows
def test_c_local_variables(self):
"""Test local variable value."""
self.build()
diff --git a/lldb/test/API/lang/cpp/gmodules/alignment/Makefile b/lldb/test/API/lang/cpp/gmodules/alignment/Makefile
new file mode 100644
index 000000000000..a6c3e8ca84a3
--- /dev/null
+++ b/lldb/test/API/lang/cpp/gmodules/alignment/Makefile
@@ -0,0 +1,4 @@
+PCH_CXX_SOURCE = pch.h
+CXX_SOURCES = main.cpp
+
+include Makefile.rules
diff --git a/lldb/test/API/lang/cpp/gmodules/alignment/TestPchAlignment.py b/lldb/test/API/lang/cpp/gmodules/alignment/TestPchAlignment.py
new file mode 100644
index 000000000000..535dd13d0ada
--- /dev/null
+++ b/lldb/test/API/lang/cpp/gmodules/alignment/TestPchAlignment.py
@@ -0,0 +1,60 @@
+"""
+Tests that we correctly track AST layout info
+(specifically alignment) when moving AST nodes
+between ClangASTImporter instances (in this case,
+from pch to executable to expression AST).
+"""
+
+import lldb
+import os
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class TestPchAlignment(TestBase):
+ @add_test_categories(["gmodules"])
+ def test_expr(self):
+ self.build()
+ lldbutil.run_to_source_breakpoint(
+ self, "return data", lldb.SBFileSpec("main.cpp")
+ )
+
+ self.expect(
+ "frame variable data",
+ substrs=["row = 1", "col = 2", "row = 3", "col = 4", "stride = 5"],
+ )
+
+ @add_test_categories(["gmodules"])
+ def test_frame_var(self):
+ self.build()
+ lldbutil.run_to_source_breakpoint(
+ self, "return data", lldb.SBFileSpec("main.cpp")
+ )
+
+ self.expect_expr(
+ "data",
+ result_type="MatrixData",
+ result_children=[
+ ValueCheck(
+ name="section",
+ children=[
+ ValueCheck(
+ name="origin",
+ children=[
+ ValueCheck(name="row", value="1"),
+ ValueCheck(name="col", value="2"),
+ ],
+ ),
+ ValueCheck(
+ name="size",
+ children=[
+ ValueCheck(name="row", value="3"),
+ ValueCheck(name="col", value="4"),
+ ],
+ ),
+ ],
+ ),
+ ValueCheck(name="stride", value="5"),
+ ],
+ )
diff --git a/lldb/test/API/lang/cpp/gmodules/alignment/main.cpp b/lldb/test/API/lang/cpp/gmodules/alignment/main.cpp
new file mode 100644
index 000000000000..5481f3fad1ff
--- /dev/null
+++ b/lldb/test/API/lang/cpp/gmodules/alignment/main.cpp
@@ -0,0 +1,10 @@
+int main(int argc, const char *argv[]) {
+ struct MatrixData data = {0};
+ data.section.origin.row = 1;
+ data.section.origin.col = 2;
+ data.section.size.row = 3;
+ data.section.size.col = 4;
+ data.stride = 5;
+
+ return data.section.size.row;
+}
diff --git a/lldb/test/API/lang/cpp/gmodules/alignment/pch.h b/lldb/test/API/lang/cpp/gmodules/alignment/pch.h
new file mode 100644
index 000000000000..f0be272aa601
--- /dev/null
+++ b/lldb/test/API/lang/cpp/gmodules/alignment/pch.h
@@ -0,0 +1,21 @@
+#ifndef PCH_H_IN
+#define PCH_H_IN
+
+static const int kAlignment = 64;
+
+struct [[gnu::aligned(kAlignment)]] RowCol {
+ unsigned row;
+ unsigned col;
+};
+
+struct [[gnu::aligned(kAlignment)]] Submatrix {
+ struct RowCol origin;
+ struct RowCol size;
+};
+
+struct [[gnu::aligned(kAlignment)]] MatrixData {
+ struct Submatrix section;
+ unsigned stride;
+};
+
+#endif // _H_IN
diff --git a/lldb/test/API/lit.cfg.py b/lldb/test/API/lit.cfg.py
index 12675edc0fd3..f9497b632fc5 100644
--- a/lldb/test/API/lit.cfg.py
+++ b/lldb/test/API/lit.cfg.py
@@ -309,3 +309,6 @@ if "FREEBSD_LEGACY_PLUGIN" in os.environ:
# Propagate XDG_CACHE_HOME
if "XDG_CACHE_HOME" in os.environ:
config.environment["XDG_CACHE_HOME"] = os.environ["XDG_CACHE_HOME"]
+
+if is_configured("use_vendor_packages"):
+ config.environment["LLDB_TEST_USE_VENDOR_PACKAGES"] = "1"
diff --git a/lldb/test/API/lit.site.cfg.py.in b/lldb/test/API/lit.site.cfg.py.in
index 053331dc4881..c2602acd2ef8 100644
--- a/lldb/test/API/lit.site.cfg.py.in
+++ b/lldb/test/API/lit.site.cfg.py.in
@@ -38,6 +38,7 @@ config.libcxx_include_target_dir = "@LIBCXX_GENERATED_INCLUDE_TARGET_DIR@"
# The API tests use their own module caches.
config.lldb_module_cache = os.path.join("@LLDB_TEST_MODULE_CACHE_LLDB@", "lldb-api")
config.clang_module_cache = os.path.join("@LLDB_TEST_MODULE_CACHE_CLANG@", "lldb-api")
+config.use_vendor_packages = @LLDB_TEST_USE_VENDOR_PACKAGES@
# Plugins
lldb_build_intel_pt = '@LLDB_BUILD_INTEL_PT@'
diff --git a/lldb/test/API/macosx/nslog/TestDarwinNSLogOutput.py b/lldb/test/API/macosx/nslog/TestDarwinNSLogOutput.py
index d7560156e057..15d9feb54389 100644
--- a/lldb/test/API/macosx/nslog/TestDarwinNSLogOutput.py
+++ b/lldb/test/API/macosx/nslog/TestDarwinNSLogOutput.py
@@ -56,8 +56,9 @@ class DarwinNSLogOutputTestCase(TestBase):
# So that the child gets torn down after the test.
import pexpect
- self.child = pexpect.spawnu(
- "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
+ self.child = pexpect.spawn(
+ "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe),
+ encoding="utf-8",
)
child = self.child
diff --git a/lldb/test/API/repl/clang/TestClangREPL.py b/lldb/test/API/repl/clang/TestClangREPL.py
index 0b67955a7833..c37557fb9473 100644
--- a/lldb/test/API/repl/clang/TestClangREPL.py
+++ b/lldb/test/API/repl/clang/TestClangREPL.py
@@ -1,7 +1,6 @@
-import lldb
from lldbsuite.test.decorators import *
-from lldbsuite.test.lldbtest import *
from lldbsuite.test.lldbpexpect import PExpectTest
+from lldbsuite.test.lldbtest import *
class TestCase(PExpectTest):
@@ -17,13 +16,7 @@ class TestCase(PExpectTest):
self.current_repl_line_number += 1
self.child.expect_exact(str(self.current_repl_line_number) + ">")
- # PExpect uses many timeouts internally and doesn't play well
- # under ASAN on a loaded machine..
- @skipIfAsan
- @skipIf(oslist=["linux"], archs=["arm", "aarch64"]) # Randomly fails on buildbot
- @skipIfEditlineSupportMissing
- def test_basic_completion(self):
- """Test that we can complete a simple multiline expression"""
+ def start_repl(self):
self.build()
self.current_repl_line_number = 1
@@ -41,6 +34,14 @@ class TestCase(PExpectTest):
self.child.send("expression --repl -l c --\n")
self.child.expect_exact("1>")
+ # PExpect uses many timeouts internally and doesn't play well
+ # under ASAN on a loaded machine..
+ @skipIfAsan
+ @skipIf(oslist=["linux"], archs=["arm", "aarch64"]) # Randomly fails on buildbot
+ @skipIfEditlineSupportMissing
+ def test_basic_completion(self):
+ """Test that we can complete a simple multiline expression"""
+ self.start_repl()
# Try evaluating a simple expression.
self.expect_repl("3 + 3", substrs=["(int) $0 = 6"])
@@ -54,3 +55,16 @@ class TestCase(PExpectTest):
self.expect_repl("$persistent + 10", substrs=["(long) $2 = 17"])
self.quit()
+
+ # PExpect uses many timeouts internally and doesn't play well
+ # under ASAN on a loaded machine..
+ @skipIfAsan
+ @skipIf(oslist=["linux"], archs=["arm", "aarch64"]) # Randomly fails on buildbot
+ @skipIfEditlineSupportMissing
+ def test_completion_with_space_only_line(self):
+ """Test that we don't crash when completing lines with spaces only"""
+ self.start_repl()
+
+ self.child.send(" ")
+ self.child.send("\t")
+ self.expect_repl("3 + 3", substrs=["(int) $0 = 6"])
diff --git a/lldb/test/API/terminal/TestSTTYBeforeAndAfter.py b/lldb/test/API/terminal/TestSTTYBeforeAndAfter.py
index e9b5940ff1ad..31b960859fa2 100644
--- a/lldb/test/API/terminal/TestSTTYBeforeAndAfter.py
+++ b/lldb/test/API/terminal/TestSTTYBeforeAndAfter.py
@@ -37,7 +37,7 @@ class TestSTTYBeforeAndAfter(TestBase):
lldb_prompt = "(lldb) "
# So that the child gets torn down after the test.
- self.child = pexpect.spawnu("expect")
+ self.child = pexpect.spawn("expect", encoding="utf-8")
child = self.child
child.expect(expect_prompt)
diff --git a/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py b/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
index 17cdad89aa6d..52c0bbfb33da 100644
--- a/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
+++ b/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
@@ -14,6 +14,51 @@ class TestDAP_setDataBreakpoints(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
@skipIfRemote
+ def test_duplicate_start_addresses(self):
+ """Test setDataBreakpoints with multiple watchpoints starting at the same addresses."""
+ program = self.getBuildArtifact("a.out")
+ self.build_and_launch(program)
+ source = "main.cpp"
+ first_loop_break_line = line_number(source, "// first loop breakpoint")
+ self.set_source_breakpoints(source, [first_loop_break_line])
+ self.continue_to_next_stop()
+ self.dap_server.get_stackFrame()
+ # Test setting write watchpoint using expressions: &x, arr+2
+ response_x = self.dap_server.request_dataBreakpointInfo(0, "&x")
+ response_arr_2 = self.dap_server.request_dataBreakpointInfo(0, "arr+2")
+ # Test response from dataBreakpointInfo request.
+ self.assertEquals(response_x["body"]["dataId"].split("/")[1], "4")
+ self.assertEquals(response_x["body"]["accessTypes"], self.accessTypes)
+ self.assertEquals(response_arr_2["body"]["dataId"].split("/")[1], "4")
+ self.assertEquals(response_arr_2["body"]["accessTypes"], self.accessTypes)
+ # The first one should be overwritten by the third one as they start at
+ # the same address. This is indicated by returning {verified: False} for
+ # the first one.
+ dataBreakpoints = [
+ {"dataId": response_x["body"]["dataId"], "accessType": "read"},
+ {"dataId": response_arr_2["body"]["dataId"], "accessType": "write"},
+ {"dataId": response_x["body"]["dataId"], "accessType": "write"},
+ ]
+ set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints)
+ self.assertEquals(
+ set_response["body"]["breakpoints"],
+ [{"verified": False}, {"verified": True}, {"verified": True}],
+ )
+
+ self.continue_to_next_stop()
+ x_val = self.dap_server.get_local_variable_value("x")
+ i_val = self.dap_server.get_local_variable_value("i")
+ self.assertEquals(x_val, "2")
+ self.assertEquals(i_val, "1")
+
+ self.continue_to_next_stop()
+ arr_2 = self.dap_server.get_local_variable_child("arr", "[2]")
+ i_val = self.dap_server.get_local_variable_value("i")
+ self.assertEquals(arr_2["value"], "42")
+ self.assertEquals(i_val, "2")
+
+ @skipIfWindows
+ @skipIfRemote
def test_expression(self):
"""Tests setting data breakpoints on expression."""
program = self.getBuildArtifact("a.out")
diff --git a/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py b/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
index 04d741c1d472..0760d358d9c0 100644
--- a/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
+++ b/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
@@ -44,7 +44,7 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.dap_server.request_disconnect()
# Wait until the underlying lldb-dap process dies.
- self.dap_server.process.wait(timeout=10)
+ self.dap_server.process.wait(timeout=lldbdap_testcase.DAPTestCaseBase.timeoutval)
# Check the return code
self.assertEqual(self.dap_server.process.poll(), 0)
@@ -334,14 +334,14 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
# Get output from the console. This should contain both the
# "stopCommands" that were run after the first breakpoint was hit
self.continue_to_breakpoints(breakpoint_ids)
- output = self.get_console(timeout=1.0)
+ output = self.get_console(timeout=lldbdap_testcase.DAPTestCaseBase.timeoutval)
self.verify_commands("stopCommands", output, stopCommands)
# Continue again and hit the second breakpoint.
# Get output from the console. This should contain both the
# "stopCommands" that were run after the second breakpoint was hit
self.continue_to_breakpoints(breakpoint_ids)
- output = self.get_console(timeout=1.0)
+ output = self.get_console(timeout=lldbdap_testcase.DAPTestCaseBase.timeoutval)
self.verify_commands("stopCommands", output, stopCommands)
# Continue until the program exits
@@ -402,21 +402,21 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.verify_commands("launchCommands", output, launchCommands)
# Verify the "stopCommands" here
self.continue_to_next_stop()
- output = self.get_console(timeout=1.0)
+ output = self.get_console(timeout=lldbdap_testcase.DAPTestCaseBase.timeoutval)
self.verify_commands("stopCommands", output, stopCommands)
# Continue and hit the second breakpoint.
# Get output from the console. This should contain both the
# "stopCommands" that were run after the first breakpoint was hit
self.continue_to_next_stop()
- output = self.get_console(timeout=1.0)
+ output = self.get_console(timeout=lldbdap_testcase.DAPTestCaseBase.timeoutval)
self.verify_commands("stopCommands", output, stopCommands)
# Continue until the program exits
self.continue_to_exit()
# Get output from the console. This should contain both the
# "exitCommands" that were run after the second breakpoint was hit
- output = self.get_console(timeout=1.0)
+ output = self.get_console(timeout=lldbdap_testcase.DAPTestCaseBase.timeoutval)
self.verify_commands("exitCommands", output, exitCommands)
@skipIfWindows
diff --git a/lldb/test/CMakeLists.txt b/lldb/test/CMakeLists.txt
index 1aa8843b6a2e..950643a5b8cc 100644
--- a/lldb/test/CMakeLists.txt
+++ b/lldb/test/CMakeLists.txt
@@ -11,9 +11,14 @@ endif()
if(LLDB_ENFORCE_STRICT_TEST_REQUIREMENTS)
message(STATUS "Enforcing strict test requirements for LLDB")
- set(useful_python_modules
- psutil # Lit uses psutil to do per-test timeouts.
- )
+ # Lit uses psutil to do per-test timeouts.
+ set(useful_python_modules psutil)
+
+ if(NOT WIN32)
+ # We no longer vendor pexpect and it is not used on Windows.
+ list(APPEND pexpect)
+ endif()
+
foreach(module ${useful_python_modules})
lldb_find_python_module(${module})
if (NOT PY_${module}_FOUND)
@@ -26,6 +31,21 @@ if(LLDB_ENFORCE_STRICT_TEST_REQUIREMENTS)
endforeach()
endif()
+# The "pexpect" package should come from the system environment, not from the
+# LLDB tree. However, we delay the deletion of it from the tree in case
+# users/buildbots don't have the package yet and need some time to install it.
+if (NOT LLDB_TEST_USE_VENDOR_PACKAGES)
+ unset(PY_pexpect_FOUND CACHE)
+ lldb_find_python_module(pexpect)
+ if (NOT PY_pexpect_FOUND)
+ message(FATAL_ERROR
+ "Python module 'pexpect' not found. Please install it via pip or via "
+ "your operating system's package manager. For a temporary workaround, "
+ "use a version from the LLDB tree with "
+ "`LLDB_TEST_USE_VENDOR_PACKAGES=ON`")
+ endif()
+endif()
+
if(LLDB_BUILT_STANDALONE)
# In order to run check-lldb-* we need the correct map_config directives in
# llvm-lit. Because this is a standalone build, LLVM doesn't know about LLDB,
@@ -240,7 +260,8 @@ llvm_canonicalize_cmake_booleans(
LLDB_HAS_LIBCXX
LLDB_TOOL_LLDB_SERVER_BUILD
LLDB_USE_SYSTEM_DEBUGSERVER
- LLDB_IS_64_BITS)
+ LLDB_IS_64_BITS
+ LLDB_TEST_USE_VENDOR_PACKAGES)
# Configure the individual test suites.
add_subdirectory(API)
diff --git a/lldb/test/Shell/Commands/command-thread-backtrace.test b/lldb/test/Shell/Commands/command-thread-backtrace.test
new file mode 100644
index 000000000000..dacef8d7fa6a
--- /dev/null
+++ b/lldb/test/Shell/Commands/command-thread-backtrace.test
@@ -0,0 +1,14 @@
+# RUN: %clang_host -g %S/Inputs/main.c -o %t
+
+# RUN: not %lldb %t -b -o 'b foo' -o 'r' -o 'thread backtrace --count -1' 2>&1 | FileCheck %s --check-prefix COUNT
+# COUNT: error: invalid integer value for option 'c': -1
+
+# RUN: not %lldb %t -b -o 'b foo' -o 'r' -o 'thread backtrace --extended nah' 2>&1 | FileCheck %s --check-prefix EXTENDED
+# EXTENDED: error: invalid boolean value for option 'e': nah
+
+# RUN: not %lldb %t -b -o 'b foo' -o 'r' -o 'thread backtrace --start -1' 2>&1 | FileCheck %s --check-prefix START
+# START: error: invalid integer value for option 's': -1
+
+# RUN: %lldb %t -b -o 'b foo' -o 'r' -o 'thread backtrace --count 0' | FileCheck %s
+# CHECK: frame #0:
+# CHECK: frame #1:
diff --git a/lldb/test/Shell/Driver/TestHelp.test b/lldb/test/Shell/Driver/TestHelp.test
index 0f73fdf0374f..2521b31a6188 100644
--- a/lldb/test/Shell/Driver/TestHelp.test
+++ b/lldb/test/Shell/Driver/TestHelp.test
@@ -37,8 +37,6 @@ CHECK: --arch
CHECK: -a
CHECK: --core
CHECK: -c
-CHECK: --debug
-CHECK: -d
CHECK: --editor
CHECK: -e
CHECK: --file
diff --git a/lldb/test/Shell/SymbolFile/Breakpad/symtab-sorted-by-size.test b/lldb/test/Shell/SymbolFile/Breakpad/symtab-sorted-by-size.test
new file mode 100644
index 000000000000..a9b6c0b1ef09
--- /dev/null
+++ b/lldb/test/Shell/SymbolFile/Breakpad/symtab-sorted-by-size.test
@@ -0,0 +1,11 @@
+# RUN: yaml2obj %S/Inputs/basic-elf.yaml -o %T/symtab.out
+# RUN: %lldb %T/symtab.out -o "target symbols add -s symtab.out %S/Inputs/symtab.syms" \
+# RUN: -s %s | FileCheck %s
+
+# CHECK: num_symbols = 4 (sorted by size):
+# CHECK: [ 0] 0 SX Code 0x0000000000400000 0x00000000000000b0 0x00000000 ___lldb_unnamed_symbol0
+# CHECK: [ 3] 0 X Code 0x00000000004000d0 0x0000000000000022 0x00000000 _start
+# CHECK: [ 1] 0 X Code 0x00000000004000b0 0x0000000000000010 0x00000000 f1
+# CHECK: [ 2] 0 X Code 0x00000000004000c0 0x0000000000000010 0x00000000 f2
+
+image dump symtab -s size symtab.out
diff --git a/lldb/tools/debugserver/source/RNBRemote.cpp b/lldb/tools/debugserver/source/RNBRemote.cpp
index 03d427d3fc59..f22d626c4af2 100644
--- a/lldb/tools/debugserver/source/RNBRemote.cpp
+++ b/lldb/tools/debugserver/source/RNBRemote.cpp
@@ -143,6 +143,39 @@ uint64_t decode_uint64(const char *p, int base, char **end = nullptr,
return addr;
}
+void append_hex_value(std::ostream &ostrm, const void *buf, size_t buf_size,
+ bool swap) {
+ int i;
+ const uint8_t *p = (const uint8_t *)buf;
+ if (swap) {
+ for (i = static_cast<int>(buf_size) - 1; i >= 0; i--)
+ ostrm << RAWHEX8(p[i]);
+ } else {
+ for (size_t i = 0; i < buf_size; i++)
+ ostrm << RAWHEX8(p[i]);
+ }
+}
+
+std::string cstring_to_asciihex_string(const char *str) {
+ std::string hex_str;
+ hex_str.reserve(strlen(str) * 2);
+ while (str && *str) {
+ uint8_t c = *str++;
+ char hexbuf[5];
+ snprintf(hexbuf, sizeof(hexbuf), "%02x", c);
+ hex_str += hexbuf;
+ }
+ return hex_str;
+}
+
+void append_hexified_string(std::ostream &ostrm, const std::string &string) {
+ size_t string_size = string.size();
+ const char *string_buf = string.c_str();
+ for (size_t i = 0; i < string_size; i++) {
+ ostrm << RAWHEX8(*(string_buf + i));
+ }
+}
+
extern void ASLLogCallback(void *baton, uint32_t flags, const char *format,
va_list args);
@@ -171,7 +204,8 @@ RNBRemote::RNBRemote()
m_extended_mode(false), m_noack_mode(false),
m_thread_suffix_supported(false), m_list_threads_in_stop_reply(false),
m_compression_minsize(384), m_enable_compression_next_send_packet(false),
- m_compression_mode(compression_types::none) {
+ m_compression_mode(compression_types::none),
+ m_enable_error_strings(false) {
DNBLogThreadedIf(LOG_RNB_REMOTE, "%s", __PRETTY_FUNCTION__);
CreatePacketTable();
}
@@ -365,6 +399,11 @@ void RNBRemote::CreatePacketTable() {
t.push_back(Packet(
query_symbol_lookup, &RNBRemote::HandlePacket_qSymbol, NULL, "qSymbol:",
"Notify that host debugger is ready to do symbol lookups"));
+ t.push_back(Packet(enable_error_strings,
+ &RNBRemote::HandlePacket_QEnableErrorStrings, NULL,
+ "QEnableErrorStrings",
+ "Tell " DEBUGSERVER_PROGRAM_NAME
+ " it can append descriptive error messages in replies."));
t.push_back(Packet(json_query_thread_extended_info,
&RNBRemote::HandlePacket_jThreadExtendedInfo, NULL,
"jThreadExtendedInfo",
@@ -769,6 +808,15 @@ rnb_err_t RNBRemote::SendPacket(const std::string &s) {
return rnb_err;
}
+rnb_err_t RNBRemote::SendErrorPacket(std::string errcode,
+ const std::string &errmsg) {
+ if (m_enable_error_strings && !errmsg.empty()) {
+ errcode += ";";
+ errcode += cstring_to_asciihex_string(errmsg.c_str());
+ }
+ return SendPacket(errcode);
+}
+
/* Get a packet via gdb remote protocol.
Strip off the prefix/suffix, verify the checksum to make sure
a valid packet was received, send an ACK if they match. */
@@ -884,7 +932,7 @@ rnb_err_t RNBRemote::HandlePacket_ILLFORMED(const char *file, int line,
DNBLogThreadedIf(LOG_RNB_PACKETS, "%8u %s:%i ILLFORMED: '%s' (%s)",
(uint32_t)m_comm.Timer().ElapsedMicroSeconds(true), file,
line, __FUNCTION__, p);
- return SendPacket("E03");
+ return SendErrorPacket("E03");
}
rnb_err_t RNBRemote::GetPacket(std::string &packet_payload,
@@ -1565,13 +1613,8 @@ rnb_err_t RNBRemote::HandlePacket_H(const char *p) {
rnb_err_t RNBRemote::HandlePacket_qLaunchSuccess(const char *p) {
if (m_ctx.HasValidProcessID() || m_ctx.LaunchStatus().Status() == 0)
return SendPacket("OK");
- std::ostringstream ret_str;
std::string status_str;
- std::string error_quoted = binary_encode_string
- (m_ctx.LaunchStatusAsString(status_str));
- ret_str << "E" << error_quoted;
-
- return SendPacket(ret_str.str());
+ return SendErrorPacket("E89", m_ctx.LaunchStatusAsString(status_str));
}
rnb_err_t RNBRemote::HandlePacket_qShlibInfoAddr(const char *p) {
@@ -1584,7 +1627,7 @@ rnb_err_t RNBRemote::HandlePacket_qShlibInfoAddr(const char *p) {
return SendPacket(ostrm.str());
}
}
- return SendPacket("E44");
+ return SendErrorPacket("E44");
}
rnb_err_t RNBRemote::HandlePacket_qStepPacketSupported(const char *p) {
@@ -1758,7 +1801,7 @@ rnb_err_t RNBRemote::HandlePacket_qRcmd(const char *p) {
DNBLogSetLogCallback(FileLogCallback, log_file);
return SendPacket("OK");
}
- return SendPacket("E71");
+ return SendErrorPacket("E71");
} else if (variable == "logmask") {
char *end;
errno = 0;
@@ -1776,13 +1819,13 @@ rnb_err_t RNBRemote::HandlePacket_qRcmd(const char *p) {
DNBLogSetLogMask(logmask);
return SendPacket("OK");
}
- return SendPacket("E72");
+ return SendErrorPacket("E72");
}
- return SendPacket("E70");
+ return SendErrorPacket("E70");
}
- return SendPacket("E69");
+ return SendErrorPacket("E69");
}
- return SendPacket("E73");
+ return SendErrorPacket("E73");
}
rnb_err_t RNBRemote::HandlePacket_qC(const char *p) {
@@ -1974,7 +2017,7 @@ rnb_err_t RNBRemote::HandlePacket_qRegisterInfo(const char *p) {
return SendPacket(ostrm.str());
}
- return SendPacket("E45");
+ return SendErrorPacket("E45");
}
/* This expects a packet formatted like
@@ -2222,7 +2265,7 @@ rnb_err_t set_logging(const char *p) {
rnb_err_t RNBRemote::HandlePacket_QSetIgnoredExceptions(const char *p) {
// We can't set the ignored exceptions if we have a running process:
if (m_ctx.HasValidProcessID())
- return SendPacket("E35");
+ return SendErrorPacket("E35");
p += sizeof("QSetIgnoredExceptions:") - 1;
bool success = true;
@@ -2247,7 +2290,7 @@ rnb_err_t RNBRemote::HandlePacket_QSetIgnoredExceptions(const char *p) {
if (success)
return SendPacket("OK");
else
- return SendPacket("E36");
+ return SendErrorPacket("E36");
}
rnb_err_t RNBRemote::HandlePacket_QThreadSuffixSupported(const char *p) {
@@ -2268,7 +2311,7 @@ rnb_err_t RNBRemote::HandlePacket_QSetLogging(const char *p) {
if (result == rnb_success)
return SendPacket("OK");
else
- return SendPacket("E35");
+ return SendErrorPacket("E35");
}
rnb_err_t RNBRemote::HandlePacket_QSetDisableASLR(const char *p) {
@@ -2282,7 +2325,7 @@ rnb_err_t RNBRemote::HandlePacket_QSetDisableASLR(const char *p) {
g_disable_aslr = 1;
break;
default:
- return SendPacket("E56");
+ return SendErrorPacket("E56");
}
return SendPacket("OK");
}
@@ -2322,9 +2365,9 @@ rnb_err_t RNBRemote::HandlePacket_QSetSTDIO(const char *p) {
}
if (success)
return SendPacket("OK");
- return SendPacket("E57");
+ return SendErrorPacket("E57");
}
- return SendPacket("E58");
+ return SendErrorPacket("E58");
}
rnb_err_t RNBRemote::HandlePacket_QSetWorkingDir(const char *p) {
@@ -2335,15 +2378,15 @@ rnb_err_t RNBRemote::HandlePacket_QSetWorkingDir(const char *p) {
struct stat working_dir_stat;
if (::stat(m_ctx.GetWorkingDirPath(), &working_dir_stat) == -1) {
m_ctx.GetWorkingDir().clear();
- return SendPacket("E61"); // Working directory doesn't exist...
+ return SendErrorPacket("E61"); // Working directory doesn't exist...
} else if ((working_dir_stat.st_mode & S_IFMT) == S_IFDIR) {
return SendPacket("OK");
} else {
m_ctx.GetWorkingDir().clear();
- return SendPacket("E62"); // Working directory isn't a directory...
+ return SendErrorPacket("E62"); // Working directory isn't a directory...
}
}
- return SendPacket("E59"); // Invalid path
+ return SendErrorPacket("E59"); // Invalid path
}
return SendPacket(
"E60"); // Already had a process, too late to set working dir
@@ -2368,7 +2411,7 @@ rnb_err_t RNBRemote::HandlePacket_QSyncThreadState(const char *p) {
if (DNBProcessSyncThreadState(m_ctx.ProcessID(), tid))
return SendPacket("OK");
else
- return SendPacket("E61");
+ return SendErrorPacket("E61");
}
rnb_err_t RNBRemote::HandlePacket_QSetDetachOnError(const char *p) {
@@ -2516,7 +2559,7 @@ rnb_err_t RNBRemote::HandlePacket_QLaunchArch(const char *p) {
p += sizeof("QLaunchArch:") - 1;
if (DNBSetArchitecture(p))
return SendPacket("OK");
- return SendPacket("E63");
+ return SendErrorPacket("E63");
}
rnb_err_t RNBRemote::HandlePacket_QSetProcessEvent(const char *p) {
@@ -2527,46 +2570,13 @@ rnb_err_t RNBRemote::HandlePacket_QSetProcessEvent(const char *p) {
if (DNBProcessSendEvent(Context().ProcessID(), p))
return SendPacket("OK");
else
- return SendPacket("E80");
+ return SendErrorPacket("E80");
} else {
Context().PushProcessEvent(p);
}
return SendPacket("OK");
}
-void append_hex_value(std::ostream &ostrm, const void *buf, size_t buf_size,
- bool swap) {
- int i;
- const uint8_t *p = (const uint8_t *)buf;
- if (swap) {
- for (i = static_cast<int>(buf_size) - 1; i >= 0; i--)
- ostrm << RAWHEX8(p[i]);
- } else {
- for (size_t i = 0; i < buf_size; i++)
- ostrm << RAWHEX8(p[i]);
- }
-}
-
-std::string cstring_to_asciihex_string(const char *str) {
- std::string hex_str;
- hex_str.reserve (strlen (str) * 2);
- while (str && *str) {
- uint8_t c = *str++;
- char hexbuf[5];
- snprintf (hexbuf, sizeof(hexbuf), "%02x", c);
- hex_str += hexbuf;
- }
- return hex_str;
-}
-
-void append_hexified_string(std::ostream &ostrm, const std::string &string) {
- size_t string_size = string.size();
- const char *string_buf = string.c_str();
- for (size_t i = 0; i < string_size; i++) {
- ostrm << RAWHEX8(*(string_buf + i));
- }
-}
-
void register_value_in_hex_fixed_width(std::ostream &ostrm, nub_process_t pid,
nub_thread_t tid,
const register_map_entry_t *reg,
@@ -2690,7 +2700,7 @@ static void ReadStackMemory(nub_process_t pid, nub_thread_t tid,
rnb_err_t RNBRemote::SendStopReplyPacketForThread(nub_thread_t tid) {
const nub_process_t pid = m_ctx.ProcessID();
if (pid == INVALID_NUB_PROCESS)
- return SendPacket("E50");
+ return SendErrorPacket("E50");
struct DNBThreadStopInfo tid_stop_info;
@@ -2944,7 +2954,7 @@ rnb_err_t RNBRemote::SendStopReplyPacketForThread(nub_thread_t tid) {
return SendPacket(ostrm.str());
}
- return SendPacket("E51");
+ return SendErrorPacket("E51");
}
/* '?'
@@ -2954,7 +2964,7 @@ rnb_err_t RNBRemote::SendStopReplyPacketForThread(nub_thread_t tid) {
rnb_err_t RNBRemote::HandlePacket_last_signal(const char *unused) {
if (!m_ctx.HasValidProcessID()) {
// Inferior is not yet specified/running
- return SendPacket("E02");
+ return SendErrorPacket("E02");
}
nub_process_t pid = m_ctx.ProcessID();
@@ -3092,7 +3102,7 @@ rnb_err_t RNBRemote::HandlePacket_M(const char *p) {
nub_size_t wrote =
DNBProcessMemoryWrite(m_ctx.ProcessID(), addr, length, buf);
if (wrote != length)
- return SendPacket("E09");
+ return SendErrorPacket("E09");
else
return SendPacket("OK");
}
@@ -3130,12 +3140,12 @@ rnb_err_t RNBRemote::HandlePacket_m(const char *p) {
std::string buf(length, '\0');
if (buf.empty()) {
- return SendPacket("E78");
+ return SendErrorPacket("E78");
}
nub_size_t bytes_read =
DNBProcessMemoryRead(m_ctx.ProcessID(), addr, buf.size(), &buf[0]);
if (bytes_read == 0) {
- return SendPacket("E08");
+ return SendErrorPacket("E08");
}
// "The reply may contain fewer bytes than requested if the server was able
@@ -3196,12 +3206,12 @@ rnb_err_t RNBRemote::HandlePacket_x(const char *p) {
std::vector<uint8_t> buf(length);
if (buf.capacity() != length) {
- return SendPacket("E79");
+ return SendErrorPacket("E79");
}
nub_size_t bytes_read =
DNBProcessMemoryRead(m_ctx.ProcessID(), addr, buf.size(), &buf[0]);
if (bytes_read == 0) {
- return SendPacket("E80");
+ return SendErrorPacket("E80");
}
std::vector<uint8_t> buf_quoted;
@@ -3272,7 +3282,7 @@ rnb_err_t RNBRemote::HandlePacket_X(const char *p) {
nub_size_t wrote =
DNBProcessMemoryWrite(m_ctx.ProcessID(), addr, data.size(), buf);
if (wrote != data.size())
- return SendPacket("E08");
+ return SendErrorPacket("E08");
return SendPacket("OK");
}
@@ -3285,7 +3295,7 @@ rnb_err_t RNBRemote::HandlePacket_X(const char *p) {
rnb_err_t RNBRemote::HandlePacket_g(const char *p) {
std::ostringstream ostrm;
if (!m_ctx.HasValidProcessID()) {
- return SendPacket("E11");
+ return SendErrorPacket("E11");
}
if (g_num_reg_entries == 0)
@@ -3311,7 +3321,7 @@ rnb_err_t RNBRemote::HandlePacket_g(const char *p) {
return SendPacket(ostrm.str());
}
}
- return SendPacket("E74");
+ return SendErrorPacket("E74");
}
/* 'G XXX...' -- write registers
@@ -3320,7 +3330,7 @@ rnb_err_t RNBRemote::HandlePacket_g(const char *p) {
rnb_err_t RNBRemote::HandlePacket_G(const char *p) {
if (!m_ctx.HasValidProcessID()) {
- return SendPacket("E11");
+ return SendErrorPacket("E11");
}
if (g_num_reg_entries == 0)
@@ -3351,15 +3361,15 @@ rnb_err_t RNBRemote::HandlePacket_G(const char *p) {
if (reg_ctx_size == reg_ctx.size())
return SendPacket("OK");
else
- return SendPacket("E55");
+ return SendErrorPacket("E55");
} else {
DNBLogError("RNBRemote::HandlePacket_G(%s): extracted %llu of %llu "
"bytes, size mismatch\n",
p, (uint64_t)bytes_extracted, (uint64_t)reg_ctx_size);
- return SendPacket("E64");
+ return SendErrorPacket("E64");
}
}
- return SendPacket("E65");
+ return SendErrorPacket("E65");
}
static bool RNBRemoteShouldCancelCallback(void *not_used) {
@@ -3422,7 +3432,7 @@ rnb_err_t RNBRemote::HandlePacket_AllocateMemory(const char *p) {
}
}
}
- return SendPacket("E53");
+ return SendErrorPacket("E53");
}
// FORMAT: _mXXXXXX
@@ -3445,7 +3455,7 @@ rnb_err_t RNBRemote::HandlePacket_DeallocateMemory(const char *p) {
if (DNBProcessMemoryDeallocate(m_ctx.ProcessID(), addr))
return SendPacket("OK");
}
- return SendPacket("E54");
+ return SendErrorPacket("E54");
}
// FORMAT: QSaveRegisterState;thread:TTTT; (when thread suffix is supported)
@@ -3483,7 +3493,7 @@ rnb_err_t RNBRemote::HandlePacket_SaveRegisterState(const char *p) {
snprintf(response, sizeof(response), "%u", save_id);
return SendPacket(response);
} else {
- return SendPacket("E75");
+ return SendErrorPacket("E75");
}
}
// FORMAT: QRestoreRegisterState:SAVEID;thread:TTTT; (when thread suffix is
@@ -3527,9 +3537,9 @@ rnb_err_t RNBRemote::HandlePacket_RestoreRegisterState(const char *p) {
if (DNBThreadRestoreRegisterState(pid, tid, save_id))
return SendPacket("OK");
else
- return SendPacket("E77");
+ return SendErrorPacket("E77");
}
- return SendPacket("E76");
+ return SendErrorPacket("E76");
}
static bool GetProcessNameFrom_vAttach(const char *&p,
@@ -3908,12 +3918,8 @@ rnb_err_t RNBRemote::HandlePacket_v(const char *p) {
if (attach_pid == INVALID_NUB_PROCESS_ARCH) {
DNBLogError("debugserver is x86_64 binary running in translation, attach "
"failed.");
- std::string return_message = "E96;";
- return_message +=
- cstring_to_asciihex_string("debugserver is x86_64 binary running in "
- "translation, attach failed.");
- SendPacket(return_message.c_str());
- return rnb_err;
+ return SendErrorPacket("E96", "debugserver is x86_64 binary running in "
+ "translation, attach failed.");
}
if (attach_pid != INVALID_NUB_PROCESS) {
@@ -3944,16 +3950,12 @@ rnb_err_t RNBRemote::HandlePacket_v(const char *p) {
// The order of these checks is important.
if (process_does_not_exist (pid_attaching_to)) {
DNBLogError("Tried to attach to pid that doesn't exist");
- std::string return_message = "E96;";
- return_message += cstring_to_asciihex_string("no such process.");
- return SendPacket(return_message);
+ return SendErrorPacket("E96", "no such process");
}
if (process_is_already_being_debugged (pid_attaching_to)) {
DNBLogError("Tried to attach to process already being debugged");
- std::string return_message = "E96;";
- return_message += cstring_to_asciihex_string("tried to attach to "
- "process already being debugged");
- return SendPacket(return_message);
+ return SendErrorPacket("E96", "tried to attach to "
+ "process already being debugged");
}
uid_t my_uid, process_uid;
if (attach_failed_due_to_uid_mismatch (pid_attaching_to,
@@ -3969,31 +3971,27 @@ rnb_err_t RNBRemote::HandlePacket_v(const char *p) {
process_username = pw->pw_name;
}
DNBLogError("Tried to attach to process with uid mismatch");
- std::string return_message = "E96;";
- std::string msg = "tried to attach to process as user '"
- + my_username + "' and process is running "
- "as user '" + process_username + "'";
- return_message += cstring_to_asciihex_string(msg.c_str());
- return SendPacket(return_message);
+ std::string msg = "tried to attach to process as user '" +
+ my_username +
+ "' and process is running "
+ "as user '" +
+ process_username + "'";
+ return SendErrorPacket("E96", msg);
}
if (!login_session_has_gui_access() && !developer_mode_enabled()) {
DNBLogError("Developer mode is not enabled and this is a "
"non-interactive session");
- std::string return_message = "E96;";
- return_message += cstring_to_asciihex_string("developer mode is "
- "not enabled on this machine "
- "and this is a non-interactive "
- "debug session.");
- return SendPacket(return_message);
+ return SendErrorPacket("E96", "developer mode is "
+ "not enabled on this machine "
+ "and this is a non-interactive "
+ "debug session.");
}
if (!login_session_has_gui_access()) {
DNBLogError("This is a non-interactive session");
- std::string return_message = "E96;";
- return_message += cstring_to_asciihex_string("this is a "
- "non-interactive debug session, "
- "cannot get permission to debug "
- "processes.");
- return SendPacket(return_message);
+ return SendErrorPacket("E96", "this is a "
+ "non-interactive debug session, "
+ "cannot get permission to debug "
+ "processes.");
}
}
@@ -4013,12 +4011,8 @@ rnb_err_t RNBRemote::HandlePacket_v(const char *p) {
error_explainer += err_str;
error_explainer += ")";
}
- std::string default_return_msg = "E96;";
- default_return_msg += cstring_to_asciihex_string
- (error_explainer.c_str());
- SendPacket (default_return_msg);
DNBLogError("Attach failed: \"%s\".", err_str);
- return rnb_err;
+ return SendErrorPacket("E96", error_explainer);
}
}
@@ -4037,7 +4031,7 @@ rnb_err_t RNBRemote::HandlePacket_T(const char *p) {
"No thread specified in T packet");
}
if (!m_ctx.HasValidProcessID()) {
- return SendPacket("E15");
+ return SendErrorPacket("E15");
}
errno = 0;
nub_thread_t tid = strtoul(p, NULL, 16);
@@ -4049,7 +4043,7 @@ rnb_err_t RNBRemote::HandlePacket_T(const char *p) {
nub_state_t state = DNBThreadGetState(m_ctx.ProcessID(), tid);
if (state == eStateInvalid || state == eStateExited ||
state == eStateCrashed) {
- return SendPacket("E16");
+ return SendErrorPacket("E16");
}
return SendPacket("OK");
@@ -4061,7 +4055,7 @@ rnb_err_t RNBRemote::HandlePacket_z(const char *p) {
"No thread specified in z packet");
if (!m_ctx.HasValidProcessID())
- return SendPacket("E15");
+ return SendErrorPacket("E15");
char packet_cmd = *p++;
char break_type = *p++;
@@ -4105,7 +4099,7 @@ rnb_err_t RNBRemote::HandlePacket_z(const char *p) {
return SendPacket("OK");
} else {
// We failed to set the software breakpoint
- return SendPacket("E09");
+ return SendErrorPacket("E09");
}
} break;
@@ -4126,7 +4120,7 @@ rnb_err_t RNBRemote::HandlePacket_z(const char *p) {
return SendPacket("OK");
} else {
// We failed to set the watchpoint
- return SendPacket("E09");
+ return SendErrorPacket("E09");
}
} break;
@@ -4141,7 +4135,7 @@ rnb_err_t RNBRemote::HandlePacket_z(const char *p) {
if (DNBBreakpointClear(pid, addr)) {
return SendPacket("OK");
} else {
- return SendPacket("E08");
+ return SendErrorPacket("E08");
}
break;
@@ -4151,7 +4145,7 @@ rnb_err_t RNBRemote::HandlePacket_z(const char *p) {
if (DNBWatchpointClear(pid, addr)) {
return SendPacket("OK");
} else {
- return SendPacket("E08");
+ return SendErrorPacket("E08");
}
break;
@@ -4193,7 +4187,7 @@ rnb_err_t RNBRemote::HandlePacket_p(const char *p) {
"No thread specified in p packet");
}
if (!m_ctx.HasValidProcessID()) {
- return SendPacket("E15");
+ return SendErrorPacket("E15");
}
nub_process_t pid = m_ctx.ProcessID();
errno = 0;
@@ -4245,7 +4239,7 @@ rnb_err_t RNBRemote::HandlePacket_P(const char *p) {
return HandlePacket_ILLFORMED(__FILE__, __LINE__, p, "Empty P packet");
}
if (!m_ctx.HasValidProcessID()) {
- return SendPacket("E28");
+ return SendErrorPacket("E28");
}
nub_process_t pid = m_ctx.ProcessID();
@@ -4262,15 +4256,15 @@ rnb_err_t RNBRemote::HandlePacket_P(const char *p) {
"Improperly formed P packet");
if (reg == UINT32_MAX)
- return SendPacket("E29");
+ return SendErrorPacket("E29");
if (equal_char != '=')
- return SendPacket("E30");
+ return SendErrorPacket("E30");
const register_map_entry_t *reg_entry;
if (reg >= g_num_reg_entries)
- return SendPacket("E47");
+ return SendErrorPacket("E47");
reg_entry = &g_reg_entries[reg];
@@ -4279,7 +4273,7 @@ rnb_err_t RNBRemote::HandlePacket_P(const char *p) {
DNBLogError(
"RNBRemote::HandlePacket_P(%s): unknown register number %u requested\n",
p, reg);
- return SendPacket("E48");
+ return SendErrorPacket("E48");
}
DNBRegisterValue reg_value;
@@ -4293,7 +4287,7 @@ rnb_err_t RNBRemote::HandlePacket_P(const char *p) {
if (!DNBThreadSetRegisterValueByID(pid, tid, reg_entry->nub_info.set,
reg_entry->nub_info.reg, &reg_value)) {
- return SendPacket("E32");
+ return SendErrorPacket("E32");
}
return SendPacket("OK");
}
@@ -4305,7 +4299,7 @@ rnb_err_t RNBRemote::HandlePacket_c(const char *p) {
const nub_process_t pid = m_ctx.ProcessID();
if (pid == INVALID_NUB_PROCESS)
- return SendPacket("E23");
+ return SendErrorPacket("E23");
DNBThreadResumeAction action = {INVALID_NUB_THREAD, eStateRunning, 0,
INVALID_NUB_ADDRESS};
@@ -4324,7 +4318,7 @@ rnb_err_t RNBRemote::HandlePacket_c(const char *p) {
thread_actions.SetDefaultThreadActionIfNeeded(eStateRunning, 0);
if (!DNBProcessResume(pid, thread_actions.GetFirst(),
thread_actions.GetSize()))
- return SendPacket("E25");
+ return SendErrorPacket("E25");
// Don't send an "OK" packet; response is the stopped/exited message.
return rnb_success;
}
@@ -4362,7 +4356,7 @@ rnb_err_t RNBRemote::HandlePacket_MemoryRegionInfo(const char *p) {
if (*p == '\0')
return SendPacket("OK");
if (*p++ != ':')
- return SendPacket("E67");
+ return SendErrorPacket("E67");
if (*p == '0' && (*(p + 1) == 'x' || *(p + 1) == 'X'))
p += 2;
@@ -4502,7 +4496,7 @@ rnb_err_t RNBRemote::HandlePacket_QEnableCompression(const char *p) {
return SendPacket("OK");
}
- return SendPacket("E88");
+ return SendErrorPacket("E88");
}
rnb_err_t RNBRemote::HandlePacket_qSpeedTest(const char *p) {
@@ -4521,7 +4515,7 @@ rnb_err_t RNBRemote::HandlePacket_qSpeedTest(const char *p) {
g_data[response_size + 5] = '\0';
return SendPacket(g_data);
} else {
- return SendPacket("E79");
+ return SendErrorPacket("E79");
}
}
@@ -4540,7 +4534,7 @@ rnb_err_t RNBRemote::HandlePacket_WatchpointSupportInfo(const char *p) {
if (*p == '\0')
return SendPacket("OK");
if (*p++ != ':')
- return SendPacket("E67");
+ return SendErrorPacket("E67");
errno = 0;
uint32_t num = DNBWatchpointGetNumSupportedHWP(m_ctx.ProcessID());
@@ -4558,7 +4552,7 @@ rnb_err_t RNBRemote::HandlePacket_C(const char *p) {
const nub_process_t pid = m_ctx.ProcessID();
if (pid == INVALID_NUB_PROCESS)
- return SendPacket("E36");
+ return SendErrorPacket("E36");
DNBThreadResumeAction action = {INVALID_NUB_THREAD, eStateRunning, 0,
INVALID_NUB_ADDRESS};
@@ -4584,10 +4578,10 @@ rnb_err_t RNBRemote::HandlePacket_C(const char *p) {
thread_actions.Append(action);
thread_actions.SetDefaultThreadActionIfNeeded(eStateRunning, action.signal);
if (!DNBProcessSignal(pid, process_signo))
- return SendPacket("E52");
+ return SendErrorPacket("E52");
if (!DNBProcessResume(pid, thread_actions.GetFirst(),
thread_actions.GetSize()))
- return SendPacket("E38");
+ return SendErrorPacket("E38");
/* Don't send an "OK" packet; response is the stopped/exited message. */
return rnb_success;
}
@@ -4602,10 +4596,10 @@ rnb_err_t RNBRemote::HandlePacket_D(const char *p) {
else {
DNBLog("error while detaching from pid %u due to D packet",
m_ctx.ProcessID());
- SendPacket("E");
+ SendErrorPacket("E01");
}
} else {
- SendPacket("E");
+ SendErrorPacket("E04");
}
return rnb_success;
}
@@ -4647,7 +4641,7 @@ rnb_err_t RNBRemote::HandlePacket_stop_process(const char *p) {
rnb_err_t RNBRemote::HandlePacket_s(const char *p) {
const nub_process_t pid = m_ctx.ProcessID();
if (pid == INVALID_NUB_PROCESS)
- return SendPacket("E32");
+ return SendErrorPacket("E32");
// Hardware supported stepping not supported on arm
nub_thread_t tid = GetContinueThread();
@@ -4655,7 +4649,7 @@ rnb_err_t RNBRemote::HandlePacket_s(const char *p) {
tid = GetCurrentThread();
if (tid == INVALID_NUB_THREAD)
- return SendPacket("E33");
+ return SendErrorPacket("E33");
DNBThreadResumeActions thread_actions;
thread_actions.AppendAction(tid, eStateStepping);
@@ -4664,7 +4658,7 @@ rnb_err_t RNBRemote::HandlePacket_s(const char *p) {
thread_actions.SetDefaultThreadActionIfNeeded(eStateStopped, 0);
if (!DNBProcessResume(pid, thread_actions.GetFirst(),
thread_actions.GetSize()))
- return SendPacket("E49");
+ return SendErrorPacket("E49");
// Don't send an "OK" packet; response is the stopped/exited message.
return rnb_success;
}
@@ -4675,7 +4669,7 @@ rnb_err_t RNBRemote::HandlePacket_s(const char *p) {
rnb_err_t RNBRemote::HandlePacket_S(const char *p) {
const nub_process_t pid = m_ctx.ProcessID();
if (pid == INVALID_NUB_PROCESS)
- return SendPacket("E36");
+ return SendErrorPacket("E36");
DNBThreadResumeAction action = {INVALID_NUB_THREAD, eStateStepping, 0,
INVALID_NUB_ADDRESS};
@@ -4699,11 +4693,11 @@ rnb_err_t RNBRemote::HandlePacket_S(const char *p) {
action.tid = GetContinueThread();
if (action.tid == 0 || action.tid == (nub_thread_t)-1)
- return SendPacket("E40");
+ return SendErrorPacket("E40");
nub_state_t tstate = DNBThreadGetState(pid, action.tid);
if (tstate == eStateInvalid || tstate == eStateExited)
- return SendPacket("E37");
+ return SendErrorPacket("E37");
DNBThreadResumeActions thread_actions;
thread_actions.Append(action);
@@ -4712,7 +4706,7 @@ rnb_err_t RNBRemote::HandlePacket_S(const char *p) {
thread_actions.SetDefaultThreadActionIfNeeded(eStateStopped, 0);
if (!DNBProcessResume(pid, thread_actions.GetFirst(),
thread_actions.GetSize()))
- return SendPacket("E39");
+ return SendErrorPacket("E39");
// Don't send an "OK" packet; response is the stopped/exited message.
return rnb_success;
@@ -5242,7 +5236,7 @@ rnb_err_t RNBRemote::HandlePacket_qXfer(const char *command) {
UpdateTargetXML();
if (g_target_xml.empty())
- return SendPacket("E83");
+ return SendErrorPacket("E83");
if (length > g_target_xml.size()) {
xml_out << 'l'; // No more data
@@ -5273,13 +5267,13 @@ rnb_err_t RNBRemote::HandlePacket_qXfer(const char *command) {
}
}
} else {
- SendPacket("E85");
+ SendErrorPacket("E85");
}
} else {
- SendPacket("E86");
+ SendErrorPacket("E86");
}
}
- return SendPacket("E82");
+ return SendErrorPacket("E82");
}
rnb_err_t RNBRemote::HandlePacket_qGDBServerVersion(const char *p) {
@@ -5298,7 +5292,7 @@ rnb_err_t RNBRemote::HandlePacket_qGDBServerVersion(const char *p) {
rnb_err_t RNBRemote::HandlePacket_jGetDyldProcessState(const char *p) {
const nub_process_t pid = m_ctx.ProcessID();
if (pid == INVALID_NUB_PROCESS)
- return SendPacket("E87");
+ return SendErrorPacket("E87");
JSONGenerator::ObjectSP dyld_state_sp = DNBGetDyldProcessState(pid);
if (dyld_state_sp) {
@@ -5308,7 +5302,7 @@ rnb_err_t RNBRemote::HandlePacket_jGetDyldProcessState(const char *p) {
if (strm.str().size() > 0)
return SendPacket(strm.str());
}
- return SendPacket("E88");
+ return SendErrorPacket("E88");
}
// A helper function that retrieves a single integer value from
@@ -5648,7 +5642,7 @@ rnb_err_t RNBRemote::HandlePacket_jThreadsInfo(const char *p) {
return SendPacket(strm.str());
}
}
- return SendPacket("E85");
+ return SendErrorPacket("E85");
}
rnb_err_t RNBRemote::HandlePacket_jThreadExtendedInfo(const char *p) {
@@ -5656,7 +5650,7 @@ rnb_err_t RNBRemote::HandlePacket_jThreadExtendedInfo(const char *p) {
std::ostringstream json;
// If we haven't run the process yet, return an error.
if (!m_ctx.HasValidProcessID()) {
- return SendPacket("E81");
+ return SendErrorPacket("E81");
}
pid = m_ctx.ProcessID();
@@ -5925,7 +5919,7 @@ RNBRemote::HandlePacket_jGetLoadedDynamicLibrariesInfos(const char *p) {
nub_process_t pid;
// If we haven't run the process yet, return an error.
if (!m_ctx.HasValidProcessID()) {
- return SendPacket("E83");
+ return SendErrorPacket("E83");
}
pid = m_ctx.ProcessID();
@@ -5960,7 +5954,7 @@ RNBRemote::HandlePacket_jGetLoadedDynamicLibrariesInfos(const char *p) {
if (json_str.str().size() > 0) {
return SendPacket(json_str.str());
} else {
- SendPacket("E84");
+ SendErrorPacket("E84");
}
}
}
@@ -5976,7 +5970,7 @@ rnb_err_t RNBRemote::HandlePacket_jGetSharedCacheInfo(const char *p) {
nub_process_t pid;
// If we haven't run the process yet, return an error.
if (!m_ctx.HasValidProcessID()) {
- return SendPacket("E85");
+ return SendErrorPacket("E85");
}
pid = m_ctx.ProcessID();
@@ -5993,7 +5987,7 @@ rnb_err_t RNBRemote::HandlePacket_jGetSharedCacheInfo(const char *p) {
if (json_str.str().size() > 0) {
return SendPacket(json_str.str());
} else {
- SendPacket("E86");
+ SendErrorPacket("E86");
}
}
}
@@ -6195,6 +6189,11 @@ rnb_err_t RNBRemote::HandlePacket_qSymbol(const char *command) {
}
}
+rnb_err_t RNBRemote::HandlePacket_QEnableErrorStrings(const char *p) {
+ m_enable_error_strings = true;
+ return SendPacket("OK");
+}
+
static std::pair<cpu_type_t, cpu_subtype_t>
GetCPUTypesFromHost(nub_process_t pid) {
cpu_type_t cputype = DNBProcessGetCPUType(pid);
diff --git a/lldb/tools/debugserver/source/RNBRemote.h b/lldb/tools/debugserver/source/RNBRemote.h
index dad390ae0b63..a95bece79b46 100644
--- a/lldb/tools/debugserver/source/RNBRemote.h
+++ b/lldb/tools/debugserver/source/RNBRemote.h
@@ -33,6 +33,7 @@ enum class compression_types { zlib_deflate, lz4, lzma, lzfse, none };
class RNBRemote {
public:
+ // clang-format off
enum PacketEnum {
invalid_packet = 0,
ack, // '+'
@@ -137,8 +138,10 @@ public:
set_detach_on_error, // 'QSetDetachOnError:'
query_transfer, // 'qXfer:'
json_query_dyld_process_state, // 'jGetDyldProcessState'
+ enable_error_strings, // 'QEnableErrorStrings'
unknown_type
};
+ // clang-format on
typedef rnb_err_t (RNBRemote::*HandlePacketCallback)(const char *p);
@@ -196,6 +199,7 @@ public:
rnb_err_t HandlePacket_qGDBServerVersion(const char *p);
rnb_err_t HandlePacket_qProcessInfo(const char *p);
rnb_err_t HandlePacket_qSymbol(const char *p);
+ rnb_err_t HandlePacket_QEnableErrorStrings(const char *p);
rnb_err_t HandlePacket_QStartNoAckMode(const char *p);
rnb_err_t HandlePacket_QThreadSuffixSupported(const char *p);
rnb_err_t HandlePacket_QSetLogging(const char *p);
@@ -356,6 +360,8 @@ protected:
rnb_err_t GetPacket(std::string &packet_data, RNBRemote::Packet &packet_info,
bool wait);
rnb_err_t SendPacket(const std::string &);
+ rnb_err_t SendErrorPacket(std::string errcode,
+ const std::string &errmsg = "");
std::string CompressString(const std::string &);
void CreatePacketTable();
@@ -405,6 +411,9 @@ protected:
bool m_enable_compression_next_send_packet;
compression_types m_compression_mode;
+
+ bool m_enable_error_strings; // Whether we can append asciihex error strings
+ // after Exx error replies
};
/* We translate the /usr/include/mach/exception_types.h exception types
diff --git a/lldb/tools/driver/Driver.cpp b/lldb/tools/driver/Driver.cpp
index c63ff0ff597e..9286abb27e13 100644
--- a/lldb/tools/driver/Driver.cpp
+++ b/lldb/tools/driver/Driver.cpp
@@ -188,7 +188,6 @@ SBError Driver::ProcessArgs(const opt::InputArgList &args, bool &exiting) {
if (args.hasArg(OPT_no_use_colors)) {
m_debugger.SetUseColor(false);
WithColor::setAutoDetectFunction(disable_color);
- m_option_data.m_debug_mode = true;
}
if (args.hasArg(OPT_version)) {
@@ -455,16 +454,7 @@ int Driver::MainLoop() {
// Process lldbinit files before handling any options from the command line.
SBCommandReturnObject result;
sb_interpreter.SourceInitFileInGlobalDirectory(result);
- if (m_option_data.m_debug_mode) {
- result.PutError(m_debugger.GetErrorFile());
- result.PutOutput(m_debugger.GetOutputFile());
- }
-
sb_interpreter.SourceInitFileInHomeDirectory(result, m_option_data.m_repl);
- if (m_option_data.m_debug_mode) {
- result.PutError(m_debugger.GetErrorFile());
- result.PutOutput(m_debugger.GetOutputFile());
- }
// Source the local .lldbinit file if it exists and we're allowed to source.
// Here we want to always print the return object because it contains the
@@ -536,11 +526,6 @@ int Driver::MainLoop() {
"or -s) are ignored in REPL mode.\n";
}
- if (m_option_data.m_debug_mode) {
- result.PutError(m_debugger.GetErrorFile());
- result.PutOutput(m_debugger.GetOutputFile());
- }
-
const bool handle_events = true;
const bool spawn_thread = false;
diff --git a/lldb/tools/driver/Driver.h b/lldb/tools/driver/Driver.h
index d5779b3c2c91..83e0d8a41cfd 100644
--- a/lldb/tools/driver/Driver.h
+++ b/lldb/tools/driver/Driver.h
@@ -75,7 +75,6 @@ public:
std::vector<InitialCmdEntry> m_after_file_commands;
std::vector<InitialCmdEntry> m_after_crash_commands;
- bool m_debug_mode = false;
bool m_source_quietly = false;
bool m_print_version = false;
bool m_print_python_path = false;
diff --git a/lldb/tools/lldb-dap/Watchpoint.cpp b/lldb/tools/lldb-dap/Watchpoint.cpp
index 2f176e0da84f..217655094491 100644
--- a/lldb/tools/lldb-dap/Watchpoint.cpp
+++ b/lldb/tools/lldb-dap/Watchpoint.cpp
@@ -16,17 +16,11 @@ Watchpoint::Watchpoint(const llvm::json::Object &obj) : BreakpointBase(obj) {
llvm::StringRef dataId = GetString(obj, "dataId");
std::string accessType = GetString(obj, "accessType").str();
auto [addr_str, size_str] = dataId.split('/');
- lldb::addr_t addr;
- size_t size;
llvm::to_integer(addr_str, addr, 16);
llvm::to_integer(size_str, size);
- lldb::SBWatchpointOptions options;
options.SetWatchpointTypeRead(accessType != "write");
if (accessType != "read")
options.SetWatchpointTypeWrite(lldb::eWatchpointWriteTypeOnModify);
- wp = g_dap.target.WatchpointCreateByAddress(addr, size, options, error);
- SetCondition();
- SetHitCondition();
}
void Watchpoint::SetCondition() { wp.SetCondition(condition.c_str()); }
@@ -38,11 +32,20 @@ void Watchpoint::SetHitCondition() {
}
void Watchpoint::CreateJsonObject(llvm::json::Object &object) {
- if (error.Success()) {
- object.try_emplace("verified", true);
- } else {
+ if (!error.IsValid() || error.Fail()) {
object.try_emplace("verified", false);
- EmplaceSafeString(object, "message", error.GetCString());
+ if (error.Fail())
+ EmplaceSafeString(object, "message", error.GetCString());
+ } else {
+ object.try_emplace("verified", true);
}
}
+
+void Watchpoint::SetWatchpoint() {
+ wp = g_dap.target.WatchpointCreateByAddress(addr, size, options, error);
+ if (!condition.empty())
+ SetCondition();
+ if (!hitCondition.empty())
+ SetHitCondition();
+}
} // namespace lldb_dap
diff --git a/lldb/tools/lldb-dap/Watchpoint.h b/lldb/tools/lldb-dap/Watchpoint.h
index 026b07d67241..4d2e58ed7533 100644
--- a/lldb/tools/lldb-dap/Watchpoint.h
+++ b/lldb/tools/lldb-dap/Watchpoint.h
@@ -17,6 +17,9 @@
namespace lldb_dap {
struct Watchpoint : public BreakpointBase {
+ lldb::addr_t addr;
+ size_t size;
+ lldb::SBWatchpointOptions options;
// The LLDB breakpoint associated wit this watchpoint.
lldb::SBWatchpoint wp;
lldb::SBError error;
@@ -28,6 +31,8 @@ struct Watchpoint : public BreakpointBase {
void SetCondition() override;
void SetHitCondition() override;
void CreateJsonObject(llvm::json::Object &object) override;
+
+ void SetWatchpoint();
};
} // namespace lldb_dap
diff --git a/lldb/tools/lldb-dap/lldb-dap.cpp b/lldb/tools/lldb-dap/lldb-dap.cpp
index c6a275bcf814..55f8c920e600 100644
--- a/lldb/tools/lldb-dap/lldb-dap.cpp
+++ b/lldb/tools/lldb-dap/lldb-dap.cpp
@@ -2880,15 +2880,29 @@ void request_setDataBreakpoints(const llvm::json::Object &request) {
const auto *breakpoints = arguments->getArray("breakpoints");
llvm::json::Array response_breakpoints;
g_dap.target.DeleteAllWatchpoints();
+ std::vector<Watchpoint> watchpoints;
if (breakpoints) {
for (const auto &bp : *breakpoints) {
const auto *bp_obj = bp.getAsObject();
if (bp_obj) {
Watchpoint wp(*bp_obj);
- AppendBreakpoint(&wp, response_breakpoints);
+ watchpoints.push_back(wp);
}
}
}
+ // If two watchpoints start at the same address, the latter overwrite the
+ // former. So, we only enable those at first-seen addresses when iterating
+ // backward.
+ std::set<lldb::addr_t> addresses;
+ for (auto iter = watchpoints.rbegin(); iter != watchpoints.rend(); ++iter) {
+ if (addresses.count(iter->addr) == 0) {
+ iter->SetWatchpoint();
+ addresses.insert(iter->addr);
+ }
+ }
+ for (auto wp : watchpoints)
+ AppendBreakpoint(&wp, response_breakpoints);
+
llvm::json::Object body;
body.try_emplace("breakpoints", std::move(response_breakpoints));
response.try_emplace("body", std::move(body));
diff --git a/lldb/unittests/Core/ProgressReportTest.cpp b/lldb/unittests/Core/ProgressReportTest.cpp
index 559f3ef1ae46..98cbc475ce28 100644
--- a/lldb/unittests/Core/ProgressReportTest.cpp
+++ b/lldb/unittests/Core/ProgressReportTest.cpp
@@ -16,8 +16,8 @@
#include "lldb/Host/HostInfo.h"
#include "lldb/Utility/Listener.h"
#include "gtest/gtest.h"
+#include <memory>
#include <mutex>
-#include <thread>
using namespace lldb;
using namespace lldb_private;
@@ -126,3 +126,94 @@ TEST_F(ProgressReportTest, TestReportCreation) {
ASSERT_FALSE(data->IsFinite());
ASSERT_EQ(data->GetMessage(), "Progress report 1: Starting report 1");
}
+
+TEST_F(ProgressReportTest, TestProgressManager) {
+ std::chrono::milliseconds timeout(100);
+
+ // Set up the debugger, make sure that was done properly.
+ ArchSpec arch("x86_64-apple-macosx-");
+ Platform::SetHostPlatform(PlatformRemoteMacOSX::CreateInstance(true, &arch));
+
+ DebuggerSP debugger_sp = Debugger::CreateInstance();
+ ASSERT_TRUE(debugger_sp);
+
+ // Get the debugger's broadcaster.
+ Broadcaster &broadcaster = debugger_sp->GetBroadcaster();
+
+ // Create a listener, make sure it can receive events and that it's
+ // listening to the correct broadcast bit.
+ ListenerSP listener_sp = Listener::MakeListener("progress-category-listener");
+
+ listener_sp->StartListeningForEvents(&broadcaster,
+ Debugger::eBroadcastBitProgressCategory);
+ EXPECT_TRUE(broadcaster.EventTypeHasListeners(
+ Debugger::eBroadcastBitProgressCategory));
+
+ EventSP event_sp;
+ const ProgressEventData *data;
+
+ // Create three progress events with the same category then try to pop 2
+ // events from the queue in a row before the progress reports are destroyed.
+ // Since only 1 event should've been broadcast for this category, the second
+ // GetEvent() call should return false.
+ {
+ Progress progress1("Progress report 1", "Starting report 1");
+ Progress progress2("Progress report 1", "Starting report 2");
+ Progress progress3("Progress report 1", "Starting report 3");
+ EXPECT_TRUE(listener_sp->GetEvent(event_sp, timeout));
+ EXPECT_FALSE(listener_sp->GetEvent(event_sp, timeout));
+ }
+
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+
+ ASSERT_EQ(data->GetDetails(), "");
+ ASSERT_FALSE(data->IsFinite());
+ ASSERT_TRUE(data->GetCompleted());
+ ASSERT_EQ(data->GetTotal(), Progress::kNonDeterministicTotal);
+ ASSERT_EQ(data->GetMessage(), "Progress report 1");
+
+ // Pop another event from the queue, this should be the event for the final
+ // report for this category.
+ EXPECT_TRUE(listener_sp->GetEvent(event_sp, timeout));
+
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+ ASSERT_EQ(data->GetDetails(), "");
+ ASSERT_FALSE(data->IsFinite());
+ ASSERT_TRUE(data->GetCompleted());
+ ASSERT_EQ(data->GetTotal(), Progress::kNonDeterministicTotal);
+ ASSERT_EQ(data->GetMessage(), "Progress report 1");
+
+ // Create two progress reports of the same category that overlap with each
+ // other. Here we want to ensure that the ID broadcasted for the initial and
+ // final reports for this category are the same.
+ std::unique_ptr<Progress> overlap_progress1 =
+ std::make_unique<Progress>("Overlapping report 1", "Starting report 1");
+ std::unique_ptr<Progress> overlap_progress2 =
+ std::make_unique<Progress>("Overlapping report 1", "Starting report 2");
+ overlap_progress1.reset();
+
+ EXPECT_TRUE(listener_sp->GetEvent(event_sp, timeout));
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+ // Get the ID used in the first report for this category.
+ uint64_t expected_progress_id = data->GetID();
+
+ ASSERT_EQ(data->GetDetails(), "");
+ ASSERT_FALSE(data->IsFinite());
+ ASSERT_TRUE(data->GetCompleted());
+ ASSERT_EQ(data->GetTotal(), Progress::kNonDeterministicTotal);
+ ASSERT_EQ(data->GetMessage(), "Overlapping report 1");
+
+ overlap_progress2.reset();
+
+ EXPECT_TRUE(listener_sp->GetEvent(event_sp, timeout));
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+
+ ASSERT_EQ(data->GetDetails(), "");
+ ASSERT_FALSE(data->IsFinite());
+ ASSERT_TRUE(data->GetCompleted());
+ ASSERT_EQ(data->GetTotal(), Progress::kNonDeterministicTotal);
+ ASSERT_EQ(data->GetMessage(), "Overlapping report 1");
+ // The progress ID for the final report should be the same as that for the
+ // initial report.
+ ASSERT_EQ(data->GetID(), expected_progress_id);
+}
diff --git a/lldb/use_lldb_suite_root.py b/lldb/use_lldb_suite_root.py
index fd42f63a3c7f..b8f8acf5dd94 100644
--- a/lldb/use_lldb_suite_root.py
+++ b/lldb/use_lldb_suite_root.py
@@ -21,5 +21,7 @@ def add_lldbsuite_packages_dir(lldb_root):
lldb_root = os.path.dirname(inspect.getfile(inspect.currentframe()))
-add_third_party_module_dirs(lldb_root)
+# Use environment variables to avoid plumbing flags, lit configs, etc.
+if os.getenv("LLDB_TEST_USE_VENDOR_PACKAGES"):
+ add_third_party_module_dirs(lldb_root)
add_lldbsuite_packages_dir(lldb_root)
diff --git a/lldb/utils/lldb-dotest/CMakeLists.txt b/lldb/utils/lldb-dotest/CMakeLists.txt
index 09f41dbce421..2ba40f009cc9 100644
--- a/lldb/utils/lldb-dotest/CMakeLists.txt
+++ b/lldb/utils/lldb-dotest/CMakeLists.txt
@@ -10,6 +10,7 @@ set(LLDB_LIBS_DIR "${LLVM_LIBRARY_OUTPUT_INTDIR}")
llvm_canonicalize_cmake_booleans(
LLDB_BUILD_INTEL_PT
LLDB_HAS_LIBCXX
+ LLDB_TEST_USE_VENDOR_PACKAGES
)
if ("libcxx" IN_LIST LLVM_ENABLE_RUNTIMES)
diff --git a/lldb/utils/lldb-dotest/lldb-dotest.in b/lldb/utils/lldb-dotest/lldb-dotest.in
index 5cd49d253b99..9291f59b4198 100755
--- a/lldb/utils/lldb-dotest/lldb-dotest.in
+++ b/lldb/utils/lldb-dotest/lldb-dotest.in
@@ -1,4 +1,5 @@
#!@Python3_EXECUTABLE@
+import os
import subprocess
import sys
@@ -17,8 +18,12 @@ has_libcxx = @LLDB_HAS_LIBCXX@
libcxx_libs_dir = "@LIBCXX_LIBRARY_DIR@"
libcxx_include_dir = "@LIBCXX_GENERATED_INCLUDE_DIR@"
libcxx_include_target_dir = "@LIBCXX_GENERATED_INCLUDE_TARGET_DIR@"
+use_vendor_packages = @LLDB_TEST_USE_VENDOR_PACKAGES@
if __name__ == '__main__':
+ if use_vendor_packages:
+ os.putenv("LLDB_TEST_USE_VENDOR_PACKAGES", "1")
+
wrapper_args = sys.argv[1:]
dotest_args = []
# split on an empty string will produce [''] and if you
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index f5f7d3f3253f..651f17879fad 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -718,6 +718,8 @@ if(LLVM_INDIVIDUAL_TEST_COVERAGE)
endif()
set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
+option(LLVM_PARALLEL_LIT "Enable multiple lit suites to run in parallel" OFF)
+
# On Win32 hosts, provide an option to specify the path to the GnuWin32 tools.
if( WIN32 AND NOT CYGWIN )
set(LLVM_LIT_TOOLS_DIR "" CACHE PATH "Path to GnuWin32 tools")
diff --git a/llvm/cmake/modules/AddLLVM.cmake b/llvm/cmake/modules/AddLLVM.cmake
index 3bc78b0dc935..0f1734a64ee6 100644
--- a/llvm/cmake/modules/AddLLVM.cmake
+++ b/llvm/cmake/modules/AddLLVM.cmake
@@ -1947,11 +1947,18 @@ function(add_lit_target target comment)
list(APPEND LIT_COMMAND --param ${param})
endforeach()
if (ARG_UNPARSED_ARGUMENTS)
- add_custom_target(${target}
- COMMAND ${LIT_COMMAND} ${ARG_UNPARSED_ARGUMENTS}
- COMMENT "${comment}"
- USES_TERMINAL
- )
+ if (LLVM_PARALLEL_LIT)
+ add_custom_target(${target}
+ COMMAND ${LIT_COMMAND} ${ARG_UNPARSED_ARGUMENTS}
+ COMMENT "${comment}"
+ )
+ else()
+ add_custom_target(${target}
+ COMMAND ${LIT_COMMAND} ${ARG_UNPARSED_ARGUMENTS}
+ COMMENT "${comment}"
+ USES_TERMINAL
+ )
+ endif()
else()
add_custom_target(${target}
COMMAND ${CMAKE_COMMAND} -E echo "${target} does nothing, no tools built.")
diff --git a/llvm/docs/CMake.rst b/llvm/docs/CMake.rst
index abef4f810314..35c47989a7ee 100644
--- a/llvm/docs/CMake.rst
+++ b/llvm/docs/CMake.rst
@@ -762,6 +762,12 @@ enabled sub-projects. Nearly all of these variable names begin with
**LLVM_PARALLEL_LINK_JOBS**:STRING
Define the maximum number of concurrent link jobs.
+**LLVM_PARALLEL_LIT**:BOOL
+ Defaults to ``OFF``. If set to ``OFF``, lit testsuites will be configured
+ with CMake's ``USES_TERMINAL`` flag to give direct access to the terminal. If
+ set to ``ON``, that flag will be removed allowing Ninja to schedule multiple
+ lit testsuites in parallel.
+
**LLVM_RAM_PER_COMPILE_JOB**:STRING
Calculates the amount of Ninja compile jobs according to available resources.
Value has to be in MB, overwrites LLVM_PARALLEL_COMPILE_JOBS. Compile jobs
diff --git a/llvm/docs/CodeOfConduct.rst b/llvm/docs/CodeOfConduct.rst
index 0e986b25df3c..08fe7b3bcf65 100644
--- a/llvm/docs/CodeOfConduct.rst
+++ b/llvm/docs/CodeOfConduct.rst
@@ -135,6 +135,17 @@ events as part of each events' information. In person reports will still be
kept confidential exactly as above, but also feel free to (anonymously if
needed) email conduct@llvm.org.
+Bans
+====
+
+The code of conduct committee may decide to ban an individual from the
+community for violating the code of conduct. The goal of a ban is to protect
+community members from having to interact with people who are consistently not
+respecting the code of conduct. Please refer to the
+:doc:`Developer Policy<DeveloperPolicy>` section on Bans for how to handle
+interactions with former community members. If you need further guidance,
+please contact conduct@llvm.org.
+
Code of Conduct Committee
=========================
diff --git a/llvm/docs/CommandGuide/llvm-objcopy.rst b/llvm/docs/CommandGuide/llvm-objcopy.rst
index 755291676abf..9d0cb7ad1195 100644
--- a/llvm/docs/CommandGuide/llvm-objcopy.rst
+++ b/llvm/docs/CommandGuide/llvm-objcopy.rst
@@ -455,6 +455,15 @@ them.
Set the start address of the output to ``<addr>``. Overrides any previously
specified :option:`--change-start` or :option:`--adjust-start` options.
+.. option:: --set-symbol-visibility <symbol>=<visibility>
+
+ Change the visibility of a symbol to the specified value.
+
+.. option:: --set-symbols-visibility <filename>=<visibility>
+
+ Read a list of symbols from <filename> and change their visibility to the
+ specified value. Visibility values: default, internal, hidden, protected.
+
.. option:: --split-dwo <dwo-file>
Equivalent to running :program:`llvm-objcopy` with :option:`--extract-dwo` and
diff --git a/llvm/docs/CommandGuide/llvm-readobj.rst b/llvm/docs/CommandGuide/llvm-readobj.rst
index 09dabb28cfa7..ca7fb253f00a 100644
--- a/llvm/docs/CommandGuide/llvm-readobj.rst
+++ b/llvm/docs/CommandGuide/llvm-readobj.rst
@@ -61,6 +61,11 @@ file formats.
Dump decompressed section content when used with ``-x`` or ``-p``.
If the section(s) are not compressed, they are displayed as is.
+.. option:: --demangle, -C
+
+ Display demangled symbol names in the output. This option is only for ELF and
+ XCOFF file formats.
+
.. option:: --expand-relocs
When used with :option:`--relocs`, display each relocation in an expanded
@@ -94,6 +99,11 @@ file formats.
Display the needed libraries.
+.. option:: --no-demangle
+
+ Do not demangle symbol names in the output. This option is only for ELF and
+ XCOFF file formats. The option is enabled by default.
+
.. option:: --relocations, --relocs, -r
Display the relocation entries in the file.
@@ -175,10 +185,6 @@ The following options are implemented only for the ELF file format.
Requires :option:`--bb-addr-map` to have an effect.
-.. option:: --demangle, -C
-
- Display demangled symbol names in the output.
-
.. option:: --dependent-libraries
Display the dependent libraries section.
diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst
index 26ff34376fb8..33b0152bd7b4 100644
--- a/llvm/docs/GlobalISel/GenericOpcode.rst
+++ b/llvm/docs/GlobalISel/GenericOpcode.rst
@@ -536,15 +536,15 @@ G_FMINIMUM
^^^^^^^^^^
NaN-propagating minimum that also treat -0.0 as less than 0.0. While
-FMINNUM_IEEE follow IEEE 754-2008 semantics, FMINIMUM follows IEEE 754-2018
-draft semantics.
+FMINNUM_IEEE follow IEEE 754-2008 semantics, FMINIMUM follows IEEE
+754-2019 semantics.
G_FMAXIMUM
^^^^^^^^^^
NaN-propagating maximum that also treat -0.0 as less than 0.0. While
-FMAXNUM_IEEE follow IEEE 754-2008 semantics, FMAXIMUM follows IEEE 754-2018
-draft semantics.
+FMAXNUM_IEEE follow IEEE 754-2008 semantics, FMAXIMUM follows IEEE
+754-2019 semantics.
G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FREM
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 60e682ae328a..f56d4ed28f28 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15581,7 +15581,7 @@ Semantics:
If either operand is a NaN, returns NaN. Otherwise returns the lesser
of the two arguments. -0.0 is considered to be less than +0.0 for this
intrinsic. Note that these are the semantics specified in the draft of
-IEEE 754-2018.
+IEEE 754-2019.
.. _i_maximum:
@@ -15621,7 +15621,7 @@ Semantics:
If either operand is a NaN, returns NaN. Otherwise returns the greater
of the two arguments. -0.0 is considered to be less than +0.0 for this
intrinsic. Note that these are the semantics specified in the draft of
-IEEE 754-2018.
+IEEE 754-2019.
.. _int_copysign:
@@ -26000,7 +26000,7 @@ The third argument specifies the exception behavior as described above.
Semantics:
""""""""""
-This function follows semantics specified in the draft of IEEE 754-2018.
+This function follows semantics specified in the draft of IEEE 754-2019.
'``llvm.experimental.constrained.minimum``' Intrinsic
@@ -26032,7 +26032,7 @@ The third argument specifies the exception behavior as described above.
Semantics:
""""""""""
-This function follows semantics specified in the draft of IEEE 754-2018.
+This function follows semantics specified in the draft of IEEE 754-2019.
'``llvm.experimental.constrained.ceil``' Intrinsic
diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index ed443596897a..8d293b021443 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -117,6 +117,7 @@ on support follow.
``V`` Supported
``Za128rs`` Supported (`See note <#riscv-profiles-extensions-note>`__)
``Za64rs`` Supported (`See note <#riscv-profiles-extensions-note>`__)
+ ``Zacas`` Supported (`See note <#riscv-zacas-note>`__)
``Zawrs`` Assembly Support
``Zba`` Supported
``Zbb`` Supported
@@ -236,6 +237,11 @@ Supported
``Za128rs``, ``Za64rs``, ``Zic64b``, ``Ziccamoa``, ``Ziccif``, ``Zicclsm``, ``Ziccrse``, ``Shcounterenvw``, ``Shgatpa``, ``Shtvala``, ``Shvsatpa``, ``Shvstvala``, ``Shvstvecd``, ``Ssccptr``, ``Sscounterenw``, ``Ssstateen``, ``Ssstrict``, ``Sstvala``, ``Sstvecd``, ``Ssu64xl``, ``Svade``, ``Svbare``
These extensions are defined as part of the `RISC-V Profiles specification <https://github.com/riscv/riscv-profiles/releases/tag/v1.0>`__. They do not introduce any new features themselves, but instead describe existing hardware features.
+ .. _riscv-zacas-note:
+
+``Zacas``
+ amocas.w will be used for i32 cmpxchg. amocas.d will be used i64 cmpxchg on RV64. The compiler will not generate amocas.d on RV32 or amocas.q on RV64 due to ABI compatibilty. These can only be used in the assembler.
+
Experimental Extensions
=======================
@@ -252,9 +258,6 @@ The primary goal of experimental support is to assist in the process of ratifica
``experimental-zabha``
LLVM implements the `v1.0-rc1 draft specification <https://github.com/riscv/riscv-zabha/tree/v1.0-rc1>`__.
-``experimental-zacas``
- LLVM implements the `1.0-rc1 draft specification <https://github.com/riscv/riscv-zacas/releases/tag/v1.0-rc1>`__.
-
``experimental-zalasr``
LLVM implements the `0.0.5 draft specification <https://github.com/mehnadnerd/riscv-zalasr>`__.
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 8a3a0ec66ed8..8ce6ee5cebb2 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -103,6 +103,7 @@ Changes to the RISC-V Backend
* Codegen support was added for the Zimop (May-Be-Operations) extension.
* The experimental Ssnpm, Smnpm, Smmpm, Sspm, and Supm 0.8.1 Pointer Masking extensions are supported.
* The experimental Ssqosid extension is supported.
+* Zacas is no longer experimental.
Changes to the WebAssembly Backend
----------------------------------
@@ -147,6 +148,10 @@ Changes to the LLVM tools
if it's not specified with the ``--format`` argument and cannot be inferred from
input files.
+* llvm-objcopy now supports ``--set-symbol-visibility`` and
+ ``--set-symbols-visibility`` options for ELF input to change the
+ visibility of symbols.
+
Changes to LLDB
---------------------------------
diff --git a/llvm/include/llvm/ADT/APFloat.h b/llvm/include/llvm/ADT/APFloat.h
index 8c247bbcec90..deb74cb2fdeb 100644
--- a/llvm/include/llvm/ADT/APFloat.h
+++ b/llvm/include/llvm/ADT/APFloat.h
@@ -1389,29 +1389,35 @@ inline APFloat neg(APFloat X) {
return X;
}
-/// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
-/// both are not NaN. If either argument is a NaN, returns the other argument.
+/// Implements IEEE-754 2019 minimumNumber semantics. Returns the smaller of the
+/// 2 arguments if both are not NaN. If either argument is a NaN, returns the
+/// other argument. -0 is treated as ordered less than +0.
LLVM_READONLY
inline APFloat minnum(const APFloat &A, const APFloat &B) {
if (A.isNaN())
return B;
if (B.isNaN())
return A;
+ if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
+ return A.isNegative() ? A : B;
return B < A ? B : A;
}
-/// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
-/// both are not NaN. If either argument is a NaN, returns the other argument.
+/// Implements IEEE-754 2019 maximumNumber semantics. Returns the larger of the
+/// 2 arguments if both are not NaN. If either argument is a NaN, returns the
+/// other argument. +0 is treated as ordered greater than -0.
LLVM_READONLY
inline APFloat maxnum(const APFloat &A, const APFloat &B) {
if (A.isNaN())
return B;
if (B.isNaN())
return A;
+ if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
+ return A.isNegative() ? B : A;
return A < B ? B : A;
}
-/// Implements IEEE 754-2018 minimum semantics. Returns the smaller of 2
+/// Implements IEEE 754-2019 minimum semantics. Returns the smaller of 2
/// arguments, propagating NaNs and treating -0 as less than +0.
LLVM_READONLY
inline APFloat minimum(const APFloat &A, const APFloat &B) {
@@ -1424,7 +1430,7 @@ inline APFloat minimum(const APFloat &A, const APFloat &B) {
return B < A ? B : A;
}
-/// Implements IEEE 754-2018 maximum semantics. Returns the larger of 2
+/// Implements IEEE 754-2019 maximum semantics. Returns the larger of 2
/// arguments, propagating NaNs and treating -0 as less than +0.
LLVM_READONLY
inline APFloat maximum(const APFloat &A, const APFloat &B) {
diff --git a/llvm/include/llvm/BinaryFormat/DXContainer.h b/llvm/include/llvm/BinaryFormat/DXContainer.h
index c3dcd568216b..a28e19edb4c6 100644
--- a/llvm/include/llvm/BinaryFormat/DXContainer.h
+++ b/llvm/include/llvm/BinaryFormat/DXContainer.h
@@ -141,7 +141,7 @@ enum class PartType {
#include "DXContainerConstants.def"
};
-#define SHADER_FLAG(Num, Val, Str) Val = 1ull << Num,
+#define SHADER_FEATURE_FLAG(Num, Val, Str) Val = 1ull << Num,
enum class FeatureFlags : uint64_t {
#include "DXContainerConstants.def"
};
diff --git a/llvm/include/llvm/BinaryFormat/DXContainerConstants.def b/llvm/include/llvm/BinaryFormat/DXContainerConstants.def
index 87dd0a5cb6ba..80ed86bc3a49 100644
--- a/llvm/include/llvm/BinaryFormat/DXContainerConstants.def
+++ b/llvm/include/llvm/BinaryFormat/DXContainerConstants.def
@@ -11,43 +11,43 @@ CONTAINER_PART(PSG1)
#undef CONTAINER_PART
#endif
-#ifdef SHADER_FLAG
-
-SHADER_FLAG(0, Doubles, "Double-precision floating point")
-SHADER_FLAG(1, ComputeShadersPlusRawAndStructuredBuffers, "Raw and Structured buffers")
-SHADER_FLAG(2, UAVsAtEveryStage, "UAVs at every shader stage")
-SHADER_FLAG(3, Max64UAVs, "64 UAV slots")
-SHADER_FLAG(4, MinimumPrecision, "Minimum-precision data types")
-SHADER_FLAG(5, DX11_1_DoubleExtensions, "Double-precision extensions for 11.1")
-SHADER_FLAG(6, DX11_1_ShaderExtensions, "Shader extensions for 11.1")
-SHADER_FLAG(7, LEVEL9ComparisonFiltering, "Comparison filtering for feature level 9")
-SHADER_FLAG(8, TiledResources, "Tiled resources")
-SHADER_FLAG(9, StencilRef, "PS Output Stencil Ref")
-SHADER_FLAG(10, InnerCoverage, "PS Inner Coverage")
-SHADER_FLAG(11, TypedUAVLoadAdditionalFormats, "Typed UAV Load Additional Formats")
-SHADER_FLAG(12, ROVs, "Raster Ordered UAVs")
-SHADER_FLAG(13, ViewportAndRTArrayIndexFromAnyShaderFeedingRasterizer, "SV_RenderTargetArrayIndex or SV_ViewportArrayIndex from any shader feeding rasterizer")
-SHADER_FLAG(14, WaveOps, "Wave level operations")
-SHADER_FLAG(15, Int64Ops, "64-Bit integer")
-SHADER_FLAG(16, ViewID, "View Instancing")
-SHADER_FLAG(17, Barycentrics, "Barycentrics")
-SHADER_FLAG(18, NativeLowPrecision, "Use native low precision")
-SHADER_FLAG(19, ShadingRate, "Shading Rate")
-SHADER_FLAG(20, Raytracing_Tier_1_1, "Raytracing tier 1.1 features")
-SHADER_FLAG(21, SamplerFeedback, "Sampler feedback")
-SHADER_FLAG(22, AtomicInt64OnTypedResource, "64-bit Atomics on Typed Resources")
-SHADER_FLAG(23, AtomicInt64OnGroupShared, "64-bit Atomics on Group Shared")
-SHADER_FLAG(24, DerivativesInMeshAndAmpShaders, "Derivatives in mesh and amplification shaders")
-SHADER_FLAG(25, ResourceDescriptorHeapIndexing, "Resource descriptor heap indexing")
-SHADER_FLAG(26, SamplerDescriptorHeapIndexing, "Sampler descriptor heap indexing")
-SHADER_FLAG(27, RESERVED, "<RESERVED>")
-SHADER_FLAG(28, AtomicInt64OnHeapResource, "64-bit Atomics on Heap Resources")
-SHADER_FLAG(29, AdvancedTextureOps, "Advanced Texture Ops")
-SHADER_FLAG(30, WriteableMSAATextures, "Writeable MSAA Textures")
-
-SHADER_FLAG(31, NextUnusedBit, "Next reserved shader flag bit (not a flag)")
-
-#undef SHADER_FLAG
+#ifdef SHADER_FEATURE_FLAG
+
+SHADER_FEATURE_FLAG(0, Doubles, "Double-precision floating point")
+SHADER_FEATURE_FLAG(1, ComputeShadersPlusRawAndStructuredBuffers, "Raw and Structured buffers")
+SHADER_FEATURE_FLAG(2, UAVsAtEveryStage, "UAVs at every shader stage")
+SHADER_FEATURE_FLAG(3, Max64UAVs, "64 UAV slots")
+SHADER_FEATURE_FLAG(4, MinimumPrecision, "Minimum-precision data types")
+SHADER_FEATURE_FLAG(5, DX11_1_DoubleExtensions, "Double-precision extensions for 11.1")
+SHADER_FEATURE_FLAG(6, DX11_1_ShaderExtensions, "Shader extensions for 11.1")
+SHADER_FEATURE_FLAG(7, LEVEL9ComparisonFiltering, "Comparison filtering for feature level 9")
+SHADER_FEATURE_FLAG(8, TiledResources, "Tiled resources")
+SHADER_FEATURE_FLAG(9, StencilRef, "PS Output Stencil Ref")
+SHADER_FEATURE_FLAG(10, InnerCoverage, "PS Inner Coverage")
+SHADER_FEATURE_FLAG(11, TypedUAVLoadAdditionalFormats, "Typed UAV Load Additional Formats")
+SHADER_FEATURE_FLAG(12, ROVs, "Raster Ordered UAVs")
+SHADER_FEATURE_FLAG(13, ViewportAndRTArrayIndexFromAnyShaderFeedingRasterizer, "SV_RenderTargetArrayIndex or SV_ViewportArrayIndex from any shader feeding rasterizer")
+SHADER_FEATURE_FLAG(14, WaveOps, "Wave level operations")
+SHADER_FEATURE_FLAG(15, Int64Ops, "64-Bit integer")
+SHADER_FEATURE_FLAG(16, ViewID, "View Instancing")
+SHADER_FEATURE_FLAG(17, Barycentrics, "Barycentrics")
+SHADER_FEATURE_FLAG(18, NativeLowPrecision, "Use native low precision")
+SHADER_FEATURE_FLAG(19, ShadingRate, "Shading Rate")
+SHADER_FEATURE_FLAG(20, Raytracing_Tier_1_1, "Raytracing tier 1.1 features")
+SHADER_FEATURE_FLAG(21, SamplerFeedback, "Sampler feedback")
+SHADER_FEATURE_FLAG(22, AtomicInt64OnTypedResource, "64-bit Atomics on Typed Resources")
+SHADER_FEATURE_FLAG(23, AtomicInt64OnGroupShared, "64-bit Atomics on Group Shared")
+SHADER_FEATURE_FLAG(24, DerivativesInMeshAndAmpShaders, "Derivatives in mesh and amplification shaders")
+SHADER_FEATURE_FLAG(25, ResourceDescriptorHeapIndexing, "Resource descriptor heap indexing")
+SHADER_FEATURE_FLAG(26, SamplerDescriptorHeapIndexing, "Sampler descriptor heap indexing")
+SHADER_FEATURE_FLAG(27, RESERVED, "<RESERVED>")
+SHADER_FEATURE_FLAG(28, AtomicInt64OnHeapResource, "64-bit Atomics on Heap Resources")
+SHADER_FEATURE_FLAG(29, AdvancedTextureOps, "Advanced Texture Ops")
+SHADER_FEATURE_FLAG(30, WriteableMSAATextures, "Writeable MSAA Textures")
+
+SHADER_FEATURE_FLAG(31, NextUnusedBit, "Next reserved shader flag bit (not a flag)")
+
+#undef SHADER_FEATURE_FLAG
#endif
#ifdef SEMANTIC_KIND
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 5454df02914a..bfac54a65c5b 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -612,6 +612,7 @@ private:
AAResults *AA = nullptr;
AssumptionCache *AC = nullptr;
const TargetLibraryInfo *LibInfo = nullptr;
+ const TargetLowering *TLI = nullptr;
FunctionLoweringInfo FuncInfo;
// True when either the Target Machine specifies no optimizations or the
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 8cb0bc9fd981..ad876c5db450 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -978,7 +978,7 @@ enum NodeType {
/// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
/// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
- /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics.
+ /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2019 semantics.
FMINIMUM,
FMAXIMUM,
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 2fc1ceafa927..25e6c525b672 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -1488,9 +1488,6 @@ public:
SDValue Chain, SDValue Ptr, SDValue Stride,
SDValue Mask, SDValue EVL, EVT MemVT,
MachineMemOperand *MMO, bool IsExpanding = false);
- SDValue getIndexedStridedLoadVP(SDValue OrigLoad, const SDLoc &DL,
- SDValue Base, SDValue Offset,
- ISD::MemIndexedMode AM);
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val,
SDValue Ptr, SDValue Offset, SDValue Stride,
SDValue Mask, SDValue EVL, EVT MemVT,
@@ -1501,9 +1498,6 @@ public:
SDValue Ptr, SDValue Stride, SDValue Mask,
SDValue EVL, EVT SVT, MachineMemOperand *MMO,
bool IsCompressing = false);
- SDValue getIndexedStridedStoreVP(SDValue OrigStore, const SDLoc &DL,
- SDValue Base, SDValue Offset,
- ISD::MemIndexedMode AM);
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h b/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
index 3d0f836b0c75..29de6bd8685e 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
#define LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
+#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include <cstdint>
@@ -81,10 +82,8 @@ public:
// Returns true `Op0` and `Op1` can be proven to alias/not alias, in
// which case `IsAlias` is set to true/false.
- static bool computeAliasing(const SDNode *Op0,
- const std::optional<int64_t> NumBytes0,
- const SDNode *Op1,
- const std::optional<int64_t> NumBytes1,
+ static bool computeAliasing(const SDNode *Op0, const LocationSize NumBytes0,
+ const SDNode *Op1, const LocationSize NumBytes1,
const SelectionDAG &DAG, bool &IsAlias);
/// Parses tree in N for base, index, offset addresses.
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index f2e00aab8d5d..4c2815679efc 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -596,6 +596,42 @@ public:
/// avoided.
bool isJumpExpensive() const { return JumpIsExpensive; }
+ // Costs parameters used by
+ // SelectionDAGBuilder::shouldKeepJumpConditionsTogether.
+ // shouldKeepJumpConditionsTogether will use these parameter value to
+ // determine if two conditions in the form `br (and/or cond1, cond2)` should
+ // be split into two branches or left as one.
+ //
+ // BaseCost is the cost threshold (in latency). If the estimated latency of
+ // computing both `cond1` and `cond2` is below the cost of just computing
+ // `cond1` + BaseCost, the two conditions will be kept together. Otherwise
+ // they will be split.
+ //
+ // LikelyBias increases BaseCost if branch probability info indicates that it
+ // is likely that both `cond1` and `cond2` will be computed.
+ //
+ // UnlikelyBias decreases BaseCost if branch probability info indicates that
+ // it is likely that both `cond1` and `cond2` will be computed.
+ //
+ // Set any field to -1 to make it ignored (setting BaseCost to -1 results in
+ // `shouldKeepJumpConditionsTogether` always returning false).
+ struct CondMergingParams {
+ int BaseCost;
+ int LikelyBias;
+ int UnlikelyBias;
+ };
+ // Return params for deciding if we should keep two branch conditions merged
+ // or split them into two separate branches.
+ // Arg0: The binary op joining the two conditions (and/or).
+ // Arg1: The first condition (cond1)
+ // Arg2: The second condition (cond2)
+ virtual CondMergingParams
+ getJumpConditionMergingParams(Instruction::BinaryOps, const Value *,
+ const Value *) const {
+ // -1 will always result in splitting.
+ return {-1, -1, -1};
+ }
+
/// Return true if selects are only cheaper than branches if the branch is
/// unlikely to be predicted right.
bool isPredictableSelectExpensive() const {
diff --git a/llvm/include/llvm/DebugInfo/DIContext.h b/llvm/include/llvm/DebugInfo/DIContext.h
index 288ddf77bdfd..b75dc8db5433 100644
--- a/llvm/include/llvm/DebugInfo/DIContext.h
+++ b/llvm/include/llvm/DebugInfo/DIContext.h
@@ -206,6 +206,7 @@ struct DIDumpOptions {
bool IsEH = false;
bool DumpNonSkeleton = false;
bool ShowAggregateErrors = false;
+ std::string JsonErrSummaryFile;
std::function<llvm::StringRef(uint64_t DwarfRegNum, bool IsEH)>
GetNameForDWARFReg;
diff --git a/llvm/include/llvm/Demangle/ItaniumDemangle.h b/llvm/include/llvm/Demangle/ItaniumDemangle.h
index 04bc58d8f63e..d33af157543f 100644
--- a/llvm/include/llvm/Demangle/ItaniumDemangle.h
+++ b/llvm/include/llvm/Demangle/ItaniumDemangle.h
@@ -5540,7 +5540,7 @@ Node *AbstractManglingParser<Alloc, Derived>::parseFloatingLiteral() {
return nullptr;
std::string_view Data(First, N);
for (char C : Data)
- if (!std::isxdigit(C))
+ if (!(C >= '0' && C <= '9') && !(C >= 'a' && C <= 'f'))
return nullptr;
First += N;
if (!consumeIf('E'))
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index 589a9066ac57..5bbaa8c208b8 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -1834,13 +1834,15 @@ public:
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param IsNowait If false, a barrier is emitted.
- /// \param DidIt Local variable used as a flag to indicate 'single' thread
+ /// \param CPVars copyprivate variables.
+ /// \param CPFuncs copy functions to use for each copyprivate variable.
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool IsNowait,
- llvm::Value *DidIt);
+ ArrayRef<llvm::Value *> CPVars = {},
+ ArrayRef<llvm::Function *> CPFuncs = {});
/// Generator for '#omp master'
///
diff --git a/llvm/include/llvm/IR/DIBuilder.h b/llvm/include/llvm/IR/DIBuilder.h
index edec161b3971..010dcbfdadca 100644
--- a/llvm/include/llvm/IR/DIBuilder.h
+++ b/llvm/include/llvm/IR/DIBuilder.h
@@ -262,6 +262,13 @@ namespace llvm {
std::optional<unsigned> DWARFAddressSpace = std::nullopt,
StringRef Name = "", DINodeArray Annotations = nullptr);
+ /// Create a __ptrauth qualifier.
+ DIDerivedType *createPtrAuthQualifiedType(DIType *FromTy, unsigned Key,
+ bool IsAddressDiscriminated,
+ unsigned ExtraDiscriminator,
+ bool IsaPointer,
+ bool authenticatesNullValues);
+
/// Create debugging information entry for a pointer to member.
/// \param PointeeTy Type pointed to by this pointer.
/// \param SizeInBits Size.
diff --git a/llvm/include/llvm/IR/DebugInfoMetadata.h b/llvm/include/llvm/IR/DebugInfoMetadata.h
index 156f6eb49253..1a953c53c17a 100644
--- a/llvm/include/llvm/IR/DebugInfoMetadata.h
+++ b/llvm/include/llvm/IR/DebugInfoMetadata.h
@@ -745,7 +745,7 @@ public:
unsigned getLine() const { return Line; }
uint64_t getSizeInBits() const { return SizeInBits; }
- uint32_t getAlignInBits() const { return SubclassData32; }
+ uint32_t getAlignInBits() const;
uint32_t getAlignInBytes() const { return getAlignInBits() / CHAR_BIT; }
uint64_t getOffsetInBits() const { return OffsetInBits; }
DIFlags getFlags() const { return Flags; }
@@ -972,6 +972,40 @@ public:
///
/// TODO: Split out members (inheritance, fields, methods, etc.).
class DIDerivedType : public DIType {
+public:
+ /// Pointer authentication (__ptrauth) metadata.
+ struct PtrAuthData {
+ union {
+ struct {
+ unsigned Key : 4;
+ unsigned IsAddressDiscriminated : 1;
+ unsigned ExtraDiscriminator : 16;
+ unsigned IsaPointer : 1;
+ unsigned AuthenticatesNullValues : 1;
+ } Data;
+ unsigned RawData;
+ } Payload;
+
+ PtrAuthData(unsigned FromRawData) { Payload.RawData = FromRawData; }
+ PtrAuthData(unsigned Key, bool IsDiscr, unsigned Discriminator,
+ bool IsaPointer, bool AuthenticatesNullValues) {
+ assert(Key < 16);
+ assert(Discriminator <= 0xffff);
+ Payload.Data.Key = Key;
+ Payload.Data.IsAddressDiscriminated = IsDiscr;
+ Payload.Data.ExtraDiscriminator = Discriminator;
+ Payload.Data.IsaPointer = IsaPointer;
+ Payload.Data.AuthenticatesNullValues = AuthenticatesNullValues;
+ }
+ bool operator==(struct PtrAuthData Other) const {
+ return Payload.RawData == Other.Payload.RawData;
+ }
+ bool operator!=(struct PtrAuthData Other) const {
+ return !(*this == Other);
+ }
+ };
+
+private:
friend class LLVMContextImpl;
friend class MDNode;
@@ -982,59 +1016,70 @@ class DIDerivedType : public DIType {
DIDerivedType(LLVMContext &C, StorageType Storage, unsigned Tag,
unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits,
uint64_t OffsetInBits,
- std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
+ std::optional<unsigned> DWARFAddressSpace,
+ std::optional<PtrAuthData> PtrAuthData, DIFlags Flags,
ArrayRef<Metadata *> Ops)
: DIType(C, DIDerivedTypeKind, Storage, Tag, Line, SizeInBits,
AlignInBits, OffsetInBits, Flags, Ops),
- DWARFAddressSpace(DWARFAddressSpace) {}
+ DWARFAddressSpace(DWARFAddressSpace) {
+ if (PtrAuthData)
+ SubclassData32 = PtrAuthData->Payload.RawData;
+ }
~DIDerivedType() = default;
static DIDerivedType *
getImpl(LLVMContext &Context, unsigned Tag, StringRef Name, DIFile *File,
unsigned Line, DIScope *Scope, DIType *BaseType, uint64_t SizeInBits,
uint32_t AlignInBits, uint64_t OffsetInBits,
- std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
+ std::optional<unsigned> DWARFAddressSpace,
+ std::optional<PtrAuthData> PtrAuthData, DIFlags Flags,
Metadata *ExtraData, DINodeArray Annotations, StorageType Storage,
bool ShouldCreate = true) {
return getImpl(Context, Tag, getCanonicalMDString(Context, Name), File,
Line, Scope, BaseType, SizeInBits, AlignInBits, OffsetInBits,
- DWARFAddressSpace, Flags, ExtraData, Annotations.get(),
- Storage, ShouldCreate);
+ DWARFAddressSpace, PtrAuthData, Flags, ExtraData,
+ Annotations.get(), Storage, ShouldCreate);
}
static DIDerivedType *
getImpl(LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
unsigned Line, Metadata *Scope, Metadata *BaseType,
uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
- std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
+ std::optional<unsigned> DWARFAddressSpace,
+ std::optional<PtrAuthData> PtrAuthData, DIFlags Flags,
Metadata *ExtraData, Metadata *Annotations, StorageType Storage,
bool ShouldCreate = true);
TempDIDerivedType cloneImpl() const {
- return getTemporary(
- getContext(), getTag(), getName(), getFile(), getLine(), getScope(),
- getBaseType(), getSizeInBits(), getAlignInBits(), getOffsetInBits(),
- getDWARFAddressSpace(), getFlags(), getExtraData(), getAnnotations());
+ return getTemporary(getContext(), getTag(), getName(), getFile(), getLine(),
+ getScope(), getBaseType(), getSizeInBits(),
+ getAlignInBits(), getOffsetInBits(),
+ getDWARFAddressSpace(), getPtrAuthData(), getFlags(),
+ getExtraData(), getAnnotations());
}
public:
- DEFINE_MDNODE_GET(
- DIDerivedType,
- (unsigned Tag, MDString *Name, Metadata *File, unsigned Line,
- Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
- uint32_t AlignInBits, uint64_t OffsetInBits,
- std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
- Metadata *ExtraData = nullptr, Metadata *Annotations = nullptr),
- (Tag, Name, File, Line, Scope, BaseType, SizeInBits, AlignInBits,
- OffsetInBits, DWARFAddressSpace, Flags, ExtraData, Annotations))
+ DEFINE_MDNODE_GET(DIDerivedType,
+ (unsigned Tag, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Scope, Metadata *BaseType,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits,
+ std::optional<unsigned> DWARFAddressSpace,
+ std::optional<PtrAuthData> PtrAuthData, DIFlags Flags,
+ Metadata *ExtraData = nullptr,
+ Metadata *Annotations = nullptr),
+ (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, DWARFAddressSpace, PtrAuthData,
+ Flags, ExtraData, Annotations))
DEFINE_MDNODE_GET(DIDerivedType,
(unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
DIScope *Scope, DIType *BaseType, uint64_t SizeInBits,
uint32_t AlignInBits, uint64_t OffsetInBits,
- std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
+ std::optional<unsigned> DWARFAddressSpace,
+ std::optional<PtrAuthData> PtrAuthData, DIFlags Flags,
Metadata *ExtraData = nullptr,
DINodeArray Annotations = nullptr),
(Tag, Name, File, Line, Scope, BaseType, SizeInBits,
- AlignInBits, OffsetInBits, DWARFAddressSpace, Flags,
- ExtraData, Annotations))
+ AlignInBits, OffsetInBits, DWARFAddressSpace, PtrAuthData,
+ Flags, ExtraData, Annotations))
TempDIDerivedType clone() const { return cloneImpl(); }
@@ -1048,6 +1093,39 @@ public:
return DWARFAddressSpace;
}
+ std::optional<PtrAuthData> getPtrAuthData() const;
+
+ /// \returns The PointerAuth key.
+ std::optional<unsigned> getPtrAuthKey() const {
+ if (auto PtrAuthData = getPtrAuthData())
+ return (unsigned)PtrAuthData->Payload.Data.Key;
+ return std::nullopt;
+ }
+ /// \returns The PointerAuth address discrimination bit.
+ std::optional<bool> isPtrAuthAddressDiscriminated() const {
+ if (auto PtrAuthData = getPtrAuthData())
+ return (bool)PtrAuthData->Payload.Data.IsAddressDiscriminated;
+ return std::nullopt;
+ }
+ /// \returns The PointerAuth extra discriminator.
+ std::optional<unsigned> getPtrAuthExtraDiscriminator() const {
+ if (auto PtrAuthData = getPtrAuthData())
+ return (unsigned)PtrAuthData->Payload.Data.ExtraDiscriminator;
+ return std::nullopt;
+ }
+ /// \returns The PointerAuth IsaPointer bit.
+ std::optional<bool> isPtrAuthIsaPointer() const {
+ if (auto PtrAuthData = getPtrAuthData())
+ return (bool)PtrAuthData->Payload.Data.IsaPointer;
+ return std::nullopt;
+ }
+ /// \returns The PointerAuth authenticates null values bit.
+ std::optional<bool> getPtrAuthAuthenticatesNullValues() const {
+ if (auto PtrAuthData = getPtrAuthData())
+ return (bool)PtrAuthData->Payload.Data.AuthenticatesNullValues;
+ return std::nullopt;
+ }
+
/// Get extra data associated with this derived type.
///
/// Class type for pointer-to-members, objective-c property node for ivars,
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index 4ee51cd192ed..0e81d3b391a0 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -468,9 +468,7 @@ public:
static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
BasicBlock::iterator InsertBefore);
static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
- Instruction *InsertBefore = nullptr);
- static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
- BasicBlock *InsertAtEnd);
+ BasicBlock *InsertAtEnd = nullptr);
static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
BasicBlock::iterator InsertBefore);
static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
@@ -1538,10 +1536,19 @@ public:
OperandBundleDef OB,
Instruction *InsertPt = nullptr);
+ /// Create a clone of \p CB with operand bundle \p OB added.
+ static CallBase *addOperandBundle(CallBase *CB, uint32_t ID,
+ OperandBundleDef OB,
+ BasicBlock::iterator InsertPt);
+
/// Create a clone of \p CB with operand bundle \p ID removed.
static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
Instruction *InsertPt = nullptr);
+ /// Create a clone of \p CB with operand bundle \p ID removed.
+ static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
+ BasicBlock::iterator InsertPt);
+
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Call ||
I->getOpcode() == Instruction::Invoke ||
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 0f13d25eb30e..c2c0f74c315b 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -514,6 +514,7 @@ def llvm_v2048i1_ty : LLVMType<v2048i1>; //2048 x i1
def llvm_v1i8_ty : LLVMType<v1i8>; // 1 x i8
def llvm_v2i8_ty : LLVMType<v2i8>; // 2 x i8
+def llvm_v3i8_ty : LLVMType<v3i8>; // 3 x i8
def llvm_v4i8_ty : LLVMType<v4i8>; // 4 x i8
def llvm_v8i8_ty : LLVMType<v8i8>; // 8 x i8
def llvm_v16i8_ty : LLVMType<v16i8>; // 16 x i8
@@ -535,6 +536,7 @@ def llvm_v1i32_ty : LLVMType<v1i32>; // 1 x i32
def llvm_v2i32_ty : LLVMType<v2i32>; // 2 x i32
def llvm_v3i32_ty : LLVMType<v3i32>; // 3 x i32
def llvm_v4i32_ty : LLVMType<v4i32>; // 4 x i32
+def llvm_v6i32_ty : LLVMType<v6i32>; // 6 x i32
def llvm_v8i32_ty : LLVMType<v8i32>; // 8 x i32
def llvm_v16i32_ty : LLVMType<v16i32>; // 16 x i32
def llvm_v32i32_ty : LLVMType<v32i32>; // 32 x i32
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 0f29653f1f5b..051e603c0819 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3196,4 +3196,11 @@ def int_amdgcn_fdiv_fast : DefaultAttrsIntrinsic<
[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
[IntrNoMem, IntrSpeculatable]
>;
+
+/// Emit an addrspacecast without null pointer checking.
+/// Should only be inserted by a pass based on analysis of an addrspacecast's src.
+def int_amdgcn_addrspacecast_nonnull : DefaultAttrsIntrinsic<
+ [llvm_anyptr_ty], [llvm_anyptr_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
}
diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td
index c192d4b84417..b44d1c6d3d2f 100644
--- a/llvm/include/llvm/IR/IntrinsicsDirectX.td
+++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td
@@ -24,4 +24,11 @@ def int_dx_dot :
Intrinsic<[LLVMVectorElementType<0>],
[llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, LLVMVectorElementType<0>>],
[IntrNoMem, IntrWillReturn, Commutative] >;
+
+def int_dx_frac : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+
+def int_dx_lerp :
+ Intrinsic<[LLVMScalarOrSameVectorWidth<0, LLVMVectorElementType<0>>],
+ [llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, LLVMVectorElementType<0>>,LLVMScalarOrSameVectorWidth<0, LLVMVectorElementType<0>>],
+ [IntrNoMem, IntrWillReturn] >;
}
diff --git a/llvm/include/llvm/IR/Module.h b/llvm/include/llvm/IR/Module.h
index e41a5940540b..bb2e667ef6f4 100644
--- a/llvm/include/llvm/IR/Module.h
+++ b/llvm/include/llvm/IR/Module.h
@@ -385,17 +385,14 @@ public:
/// @name Function Accessors
/// @{
- /// Look up the specified function in the module symbol table. Four
- /// possibilities:
- /// 1. If it does not exist, add a prototype for the function and return it.
- /// 2. Otherwise, if the existing function has the correct prototype, return
- /// the existing function.
- /// 3. Finally, the function exists but has the wrong prototype: return the
- /// function with a constantexpr cast to the right prototype.
+ /// Look up the specified function in the module symbol table. If it does not
+ /// exist, add a prototype for the function and return it. Otherwise, return
+ /// the existing function.
///
/// In all cases, the returned value is a FunctionCallee wrapper around the
- /// 'FunctionType *T' passed in, as well as a 'Value*' either of the Function or
- /// the bitcast to the function.
+ /// 'FunctionType *T' passed in, as well as the 'Value*' of the Function. The
+ /// function type of the function may differ from the function type stored in
+ /// FunctionCallee if it was previously created with a different type.
///
/// Note: For library calls getOrInsertLibFunc() should be used instead.
FunctionCallee getOrInsertFunction(StringRef Name, FunctionType *T,
@@ -403,12 +400,8 @@ public:
FunctionCallee getOrInsertFunction(StringRef Name, FunctionType *T);
- /// Look up the specified function in the module symbol table. If it does not
- /// exist, add a prototype for the function and return it. This function
- /// guarantees to return a constant of pointer to the specified function type
- /// or a ConstantExpr BitCast of that type if the named function has a
- /// different type. This version of the method takes a list of
- /// function arguments, which makes it easier for clients to use.
+ /// Same as above, but takes a list of function arguments, which makes it
+ /// easier for clients to use.
template <typename... ArgsTy>
FunctionCallee getOrInsertFunction(StringRef Name,
AttributeList AttributeList, Type *RetTy,
diff --git a/llvm/include/llvm/MC/MCExpr.h b/llvm/include/llvm/MC/MCExpr.h
index b31196093720..42d240254be6 100644
--- a/llvm/include/llvm/MC/MCExpr.h
+++ b/llvm/include/llvm/MC/MCExpr.h
@@ -307,6 +307,8 @@ public:
VK_PPC_AIX_TLSGDM, // symbol@m
VK_PPC_AIX_TLSIE, // symbol@ie
VK_PPC_AIX_TLSLE, // symbol@le
+ VK_PPC_AIX_TLSLD, // symbol@ld
+ VK_PPC_AIX_TLSML, // symbol@ml
VK_PPC_GOT_TLSLD, // symbol@got@tlsld
VK_PPC_GOT_TLSLD_LO, // symbol@got@tlsld@l
VK_PPC_GOT_TLSLD_HI, // symbol@got@tlsld@h
diff --git a/llvm/include/llvm/ObjCopy/ELF/ELFConfig.h b/llvm/include/llvm/ObjCopy/ELF/ELFConfig.h
index d77cb69b159d..eafed92516c7 100644
--- a/llvm/include/llvm/ObjCopy/ELF/ELFConfig.h
+++ b/llvm/include/llvm/ObjCopy/ELF/ELFConfig.h
@@ -9,6 +9,7 @@
#ifndef LLVM_OBJCOPY_ELF_ELFCONFIG_H
#define LLVM_OBJCOPY_ELF_ELFCONFIG_H
+#include "llvm/ObjCopy/CommonConfig.h"
#include "llvm/Object/ELFTypes.h"
namespace llvm {
@@ -18,6 +19,8 @@ namespace objcopy {
struct ELFConfig {
uint8_t NewSymbolVisibility = (uint8_t)ELF::STV_DEFAULT;
+ std::vector<std::pair<NameMatcher, uint8_t>> SymbolsToSetVisibility;
+
// ELF entry point address expression. The input parameter is an entry point
// address in the input ELF file. The entry address in the output file is
// calculated with EntryExpr(input_address), when either --set-start or
diff --git a/llvm/include/llvm/Object/DXContainer.h b/llvm/include/llvm/Object/DXContainer.h
index a7f18c799698..b6e3d321da24 100644
--- a/llvm/include/llvm/Object/DXContainer.h
+++ b/llvm/include/llvm/Object/DXContainer.h
@@ -276,7 +276,7 @@ private:
dxbc::Header Header;
SmallVector<uint32_t, 4> PartOffsets;
std::optional<DXILData> DXIL;
- std::optional<uint64_t> ShaderFlags;
+ std::optional<uint64_t> ShaderFeatureFlags;
std::optional<dxbc::ShaderHash> Hash;
std::optional<DirectX::PSVRuntimeInfo> PSVInfo;
DirectX::Signature InputSignature;
@@ -286,7 +286,7 @@ private:
Error parseHeader();
Error parsePartOffsets();
Error parseDXILHeader(StringRef Part);
- Error parseShaderFlags(StringRef Part);
+ Error parseShaderFeatureFlags(StringRef Part);
Error parseHash(StringRef Part);
Error parsePSVInfo(StringRef Part);
Error parseSignature(StringRef Part, DirectX::Signature &Array);
@@ -368,7 +368,9 @@ public:
const std::optional<DXILData> &getDXIL() const { return DXIL; }
- std::optional<uint64_t> getShaderFlags() const { return ShaderFlags; }
+ std::optional<uint64_t> getShaderFeatureFlags() const {
+ return ShaderFeatureFlags;
+ }
std::optional<dxbc::ShaderHash> getShaderHash() const { return Hash; }
diff --git a/llvm/include/llvm/ObjectYAML/DXContainerYAML.h b/llvm/include/llvm/ObjectYAML/DXContainerYAML.h
index 66a6ac70bbea..497f82bbd0f3 100644
--- a/llvm/include/llvm/ObjectYAML/DXContainerYAML.h
+++ b/llvm/include/llvm/ObjectYAML/DXContainerYAML.h
@@ -56,10 +56,10 @@ struct DXILProgram {
std::optional<std::vector<llvm::yaml::Hex8>> DXIL;
};
-#define SHADER_FLAG(Num, Val, Str) bool Val = false;
-struct ShaderFlags {
- ShaderFlags() = default;
- ShaderFlags(uint64_t FlagData);
+#define SHADER_FEATURE_FLAG(Num, Val, Str) bool Val = false;
+struct ShaderFeatureFlags {
+ ShaderFeatureFlags() = default;
+ ShaderFeatureFlags(uint64_t FlagData);
uint64_t getEncodedFlags();
#include "llvm/BinaryFormat/DXContainerConstants.def"
};
@@ -151,7 +151,7 @@ struct Part {
std::string Name;
uint32_t Size;
std::optional<DXILProgram> Program;
- std::optional<ShaderFlags> Flags;
+ std::optional<ShaderFeatureFlags> Flags;
std::optional<ShaderHash> Hash;
std::optional<PSVInfo> Info;
std::optional<DXContainerYAML::Signature> Signature;
@@ -195,8 +195,8 @@ template <> struct MappingTraits<DXContainerYAML::DXILProgram> {
static void mapping(IO &IO, DXContainerYAML::DXILProgram &Program);
};
-template <> struct MappingTraits<DXContainerYAML::ShaderFlags> {
- static void mapping(IO &IO, DXContainerYAML::ShaderFlags &Flags);
+template <> struct MappingTraits<DXContainerYAML::ShaderFeatureFlags> {
+ static void mapping(IO &IO, DXContainerYAML::ShaderFeatureFlags &Flags);
};
template <> struct MappingTraits<DXContainerYAML::ShaderHash> {
diff --git a/llvm/include/llvm/Support/KnownBits.h b/llvm/include/llvm/Support/KnownBits.h
index fb034e0b9e3b..69c569b97cca 100644
--- a/llvm/include/llvm/Support/KnownBits.h
+++ b/llvm/include/llvm/Support/KnownBits.h
@@ -385,6 +385,9 @@ public:
/// Compute known bits for smin(LHS, RHS).
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS);
+ /// Compute known bits for absdiff(LHS, RHS).
+ static KnownBits absdiff(const KnownBits &LHS, const KnownBits &RHS);
+
/// Compute known bits for shl(LHS, RHS).
/// NOTE: RHS (shift amount) bitwidth doesn't need to be the same as LHS.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS,
diff --git a/llvm/include/llvm/Support/TypeSize.h b/llvm/include/llvm/Support/TypeSize.h
index 1b793b0eccf3..68dbe1ea3062 100644
--- a/llvm/include/llvm/Support/TypeSize.h
+++ b/llvm/include/llvm/Support/TypeSize.h
@@ -321,8 +321,6 @@ class TypeSize : public details::FixedOrScalableQuantity<TypeSize, uint64_t> {
: FixedOrScalableQuantity(V) {}
public:
- constexpr TypeSize() : FixedOrScalableQuantity(0, false) {}
-
constexpr TypeSize(ScalarTy Quantity, bool Scalable)
: FixedOrScalableQuantity(Quantity, Scalable) {}
diff --git a/llvm/include/llvm/Support/VirtualFileSystem.h b/llvm/include/llvm/Support/VirtualFileSystem.h
index 1a5ea677db74..ef1fac92c2fa 100644
--- a/llvm/include/llvm/Support/VirtualFileSystem.h
+++ b/llvm/include/llvm/Support/VirtualFileSystem.h
@@ -672,7 +672,6 @@ class RedirectingFileSystemParser;
/// ]
/// }
/// \endverbatim
-///
/// The roots may be absolute or relative. If relative they will be made
/// absolute against either current working directory or the directory where
/// the Overlay YAML file is located, depending on the 'root-relative'
@@ -704,7 +703,6 @@ class RedirectingFileSystemParser;
/// 'contents': [ <file or directory entries> ]
/// }
/// \endverbatim
-///
/// The default attributes for such virtual directories are:
/// \verbatim
/// MTime = now() when created
@@ -713,7 +711,6 @@ class RedirectingFileSystemParser;
/// Size = 0
/// UniqueID = unspecified unique value
/// \endverbatim
-///
/// When a path prefix matches such a directory, the next component in the path
/// is matched against the entries in the 'contents' array.
///
@@ -726,7 +723,6 @@ class RedirectingFileSystemParser;
/// 'external-contents': <path to external directory>
/// }
/// \endverbatim
-///
/// and inherit their attributes from the external directory. When a path
/// prefix matches such an entry, the unmatched components are appended to the
/// 'external-contents' path, and the resulting path is looked up in the
@@ -741,7 +737,6 @@ class RedirectingFileSystemParser;
/// 'external-contents': <path to external file>
/// }
/// \endverbatim
-///
/// Their attributes and file contents are determined by looking up the file at
/// their 'external-contents' path in the external file system.
///
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 19197f50d9df..d2036e478d18 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -815,7 +815,7 @@ def G_FMAXNUM_IEEE : GenericInstruction {
// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
-// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics.
+// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2019 semantics.
def G_FMINIMUM : GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$src1, type0:$src2);
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 17757ca3e411..18db7a819540 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -952,6 +952,37 @@ def redundant_binop_in_equality : GICombineRule<
[{ return Helper.matchRedundantBinOpInEquality(*${root}, ${info}); }]),
(apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+// Transform: (X == 0 & Y == 0) -> (X | Y) == 0
+def double_icmp_zero_and_combine: GICombineRule<
+ (defs root:$root),
+ (match (G_ICMP $d1, $p, $s1, 0),
+ (G_ICMP $d2, $p, $s2, 0),
+ (G_AND $root, $d1, $d2),
+ [{ return ${p}.getPredicate() == CmpInst::ICMP_EQ &&
+ !MRI.getType(${s1}.getReg()).getScalarType().isPointer() &&
+ (MRI.getType(${s1}.getReg()) ==
+ MRI.getType(${s2}.getReg())); }]),
+ (apply (G_OR $ordst, $s1, $s2),
+ (G_ICMP $root, $p, $ordst, 0))
+>;
+
+// Transform: (X != 0 | Y != 0) -> (X | Y) != 0
+def double_icmp_zero_or_combine: GICombineRule<
+ (defs root:$root),
+ (match (G_ICMP $d1, $p, $s1, 0),
+ (G_ICMP $d2, $p, $s2, 0),
+ (G_OR $root, $d1, $d2),
+ [{ return ${p}.getPredicate() == CmpInst::ICMP_NE &&
+ !MRI.getType(${s1}.getReg()).getScalarType().isPointer() &&
+ (MRI.getType(${s1}.getReg()) ==
+ MRI.getType(${s2}.getReg())); }]),
+ (apply (G_OR $ordst, $s1, $s2),
+ (G_ICMP $root, $p, $ordst, 0))
+>;
+
+def double_icmp_zero_and_or_combine : GICombineGroup<[double_icmp_zero_and_combine,
+ double_icmp_zero_or_combine]>;
+
def and_or_disjoint_mask : GICombineRule<
(defs root:$root, build_fn_matchinfo:$info),
(match (wip_match_opcode G_AND):$root,
@@ -1343,7 +1374,7 @@ def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
and_or_disjoint_mask, fma_combines, fold_binop_into_select,
sub_add_reg, select_to_minmax, redundant_binop_in_equality,
fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
- combine_concat_vector]>;
+ combine_concat_vector, double_icmp_zero_and_or_combine]>;
// A combine group used to for prelegalizer combiners at -O0. The combines in
// this group have been selected based on experiments to balance code size and
diff --git a/llvm/include/llvm/TargetParser/AArch64TargetParser.h b/llvm/include/llvm/TargetParser/AArch64TargetParser.h
index 93e9ed46642d..b539627604c3 100644
--- a/llvm/include/llvm/TargetParser/AArch64TargetParser.h
+++ b/llvm/include/llvm/TargetParser/AArch64TargetParser.h
@@ -658,8 +658,8 @@ inline constexpr CpuInfo CpuInfos[] = {
AArch64::AEK_SSBS}))},
{"neoverse-n2", ARMV9A,
(AArch64::ExtensionBitset(
- {AArch64::AEK_BF16, AArch64::AEK_DOTPROD,
- AArch64::AEK_FP16, AArch64::AEK_I8MM, AArch64::AEK_MTE,
+ {AArch64::AEK_BF16, AArch64::AEK_DOTPROD, AArch64::AEK_FP16,
+ AArch64::AEK_FP16FML, AArch64::AEK_I8MM, AArch64::AEK_MTE,
AArch64::AEK_SB, AArch64::AEK_SSBS, AArch64::AEK_SVE,
AArch64::AEK_SVE2, AArch64::AEK_SVE2BITPERM}))},
{"neoverse-512tvb", ARMV8_4A,
@@ -813,14 +813,16 @@ inline constexpr CpuInfo CpuInfos[] = {
AArch64::AEK_SSBS, AArch64::AEK_CSSC}))},
};
-// An alias for a CPU.
-struct CpuAlias {
- StringRef Alias;
+// Name alias.
+struct Alias {
+ StringRef AltName;
StringRef Name;
};
-inline constexpr CpuAlias CpuAliases[] = {{"cobalt-100", "neoverse-n2"},
- {"grace", "neoverse-v2"}};
+inline constexpr Alias CpuAliases[] = {{"cobalt-100", "neoverse-n2"},
+ {"grace", "neoverse-v2"}};
+
+inline constexpr Alias ExtAliases[] = {{"rdma", "rdm"}};
bool getExtensionFeatures(
const AArch64::ExtensionBitset &Extensions,
@@ -828,6 +830,7 @@ bool getExtensionFeatures(
StringRef getArchExtFeature(StringRef ArchExt);
StringRef resolveCPUAlias(StringRef CPU);
+StringRef resolveExtAlias(StringRef ArchExt);
// Information by Name
const ArchInfo *getArchForCpu(StringRef CPU);
diff --git a/llvm/include/llvm/TargetParser/ARMTargetParser.def b/llvm/include/llvm/TargetParser/ARMTargetParser.def
index 1797a1b238d3..f0ddaa1459e5 100644
--- a/llvm/include/llvm/TargetParser/ARMTargetParser.def
+++ b/llvm/include/llvm/TargetParser/ARMTargetParser.def
@@ -346,8 +346,8 @@ ARM_CPU_NAME("cortex-x1c", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
ARM_CPU_NAME("neoverse-n1", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
(ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("neoverse-n2", ARMV9A, FK_NEON_FP_ARMV8, false,
- (ARM::AEK_BF16 | ARM::AEK_DOTPROD | ARM::AEK_I8MM | ARM::AEK_RAS |
- ARM::AEK_SB))
+ (ARM::AEK_BF16 | ARM::AEK_DOTPROD | ARM::AEK_FP16FML |
+ ARM::AEK_I8MM | ARM::AEK_RAS | ARM::AEK_SB ))
ARM_CPU_NAME("neoverse-v1", ARMV8_4A, FK_CRYPTO_NEON_FP_ARMV8, false,
(ARM::AEK_RAS | ARM::AEK_FP16 | ARM::AEK_BF16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cyclone", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h
index e7da677c7d3e..553b4efe0e30 100644
--- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h
+++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h
@@ -25,6 +25,9 @@ namespace RISCV {
// We use 64 bits as the known part in the scalable vector types.
static constexpr unsigned RVVBitsPerBlock = 64;
+void getFeaturesForCPU(StringRef CPU,
+ SmallVectorImpl<std::string> &EnabledFeatures,
+ bool NeedPlus = false);
bool parseCPU(StringRef CPU, bool IsRV64);
bool parseTuneCPU(StringRef CPU, bool IsRV64);
StringRef getMArchFromMcpu(StringRef CPU);
diff --git a/llvm/include/llvm/TextAPI/Record.h b/llvm/include/llvm/TextAPI/Record.h
index 02af3098cc5a..3b30e6c8c267 100644
--- a/llvm/include/llvm/TextAPI/Record.h
+++ b/llvm/include/llvm/TextAPI/Record.h
@@ -156,6 +156,8 @@ public:
: ObjCContainerRecord(Name, RecordLinkage::Unknown),
ClassToExtend(ClassToExtend) {}
+ StringRef getSuperClassName() const { return ClassToExtend; }
+
private:
StringRef ClassToExtend;
};
diff --git a/llvm/include/llvm/TextAPI/RecordsSlice.h b/llvm/include/llvm/TextAPI/RecordsSlice.h
index 5b214d0bfff5..57b23e5ea29e 100644
--- a/llvm/include/llvm/TextAPI/RecordsSlice.h
+++ b/llvm/include/llvm/TextAPI/RecordsSlice.h
@@ -50,9 +50,9 @@ public:
/// Add non-ObjC global record.
///
/// \param Name The name of symbol.
- /// \param Flags The flags that describe attributes of the symbol.
- /// \param GV The kind of global.
/// \param Linkage The linkage of symbol.
+ /// \param GV The kind of global.
+ /// \param Flags The flags that describe attributes of the symbol.
/// \return The non-owning pointer to added record in slice.
GlobalRecord *addGlobal(StringRef Name, RecordLinkage Linkage,
GlobalRecord::Kind GV,
@@ -69,6 +69,7 @@ public:
/// Add ObjC IVar record.
///
+ /// \param Container Owning pointer for instance variable.
/// \param Name The name of ivar, not symbol.
/// \param Linkage The linkage of symbol.
/// \return The non-owning pointer to added record in slice.
@@ -93,7 +94,7 @@ public:
/// Find ObjC Category.
///
/// \param ClassToExtend The name of class, not full symbol name.
- /// \param Categories The name of category.
+ /// \param Category The name of category.
/// \return The non-owning pointer to record in slice.
ObjCCategoryRecord *findObjCCategory(StringRef ClassToExtend,
StringRef Category) const;
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index a91e2f690999..e91abaf0780a 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -5130,7 +5130,11 @@ bool LLParser::parseDIStringType(MDNode *&Result, bool IsDistinct) {
/// ::= !DIDerivedType(tag: DW_TAG_pointer_type, name: "int", file: !0,
/// line: 7, scope: !1, baseType: !2, size: 32,
/// align: 32, offset: 0, flags: 0, extraData: !3,
-/// dwarfAddressSpace: 3)
+/// dwarfAddressSpace: 3, ptrAuthKey: 1,
+/// ptrAuthIsAddressDiscriminated: true,
+/// ptrAuthExtraDiscriminator: 0x1234,
+/// ptrAuthIsaPointer: 1, ptrAuthAuthenticatesNullValues:1
+/// )
bool LLParser::parseDIDerivedType(MDNode *&Result, bool IsDistinct) {
#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED) \
REQUIRED(tag, DwarfTagField, ); \
@@ -5145,19 +5149,30 @@ bool LLParser::parseDIDerivedType(MDNode *&Result, bool IsDistinct) {
OPTIONAL(flags, DIFlagField, ); \
OPTIONAL(extraData, MDField, ); \
OPTIONAL(dwarfAddressSpace, MDUnsignedField, (UINT32_MAX, UINT32_MAX)); \
- OPTIONAL(annotations, MDField, );
+ OPTIONAL(annotations, MDField, ); \
+ OPTIONAL(ptrAuthKey, MDUnsignedField, (0, 7)); \
+ OPTIONAL(ptrAuthIsAddressDiscriminated, MDBoolField, ); \
+ OPTIONAL(ptrAuthExtraDiscriminator, MDUnsignedField, (0, 0xffff)); \
+ OPTIONAL(ptrAuthIsaPointer, MDBoolField, ); \
+ OPTIONAL(ptrAuthAuthenticatesNullValues, MDBoolField, );
PARSE_MD_FIELDS();
#undef VISIT_MD_FIELDS
std::optional<unsigned> DWARFAddressSpace;
if (dwarfAddressSpace.Val != UINT32_MAX)
DWARFAddressSpace = dwarfAddressSpace.Val;
+ std::optional<DIDerivedType::PtrAuthData> PtrAuthData;
+ if (ptrAuthKey.Val)
+ PtrAuthData = DIDerivedType::PtrAuthData(
+ (unsigned)ptrAuthKey.Val, ptrAuthIsAddressDiscriminated.Val,
+ (unsigned)ptrAuthExtraDiscriminator.Val, ptrAuthIsaPointer.Val,
+ ptrAuthAuthenticatesNullValues.Val);
Result = GET_OR_DISTINCT(DIDerivedType,
(Context, tag.Val, name.Val, file.Val, line.Val,
scope.Val, baseType.Val, size.Val, align.Val,
- offset.Val, DWARFAddressSpace, flags.Val,
- extraData.Val, annotations.Val));
+ offset.Val, DWARFAddressSpace, PtrAuthData,
+ flags.Val, extraData.Val, annotations.Val));
return false;
}
diff --git a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
index 770eb83af17f..bdc2db82dfbe 100644
--- a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
+++ b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
@@ -1556,7 +1556,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
break;
}
case bitc::METADATA_DERIVED_TYPE: {
- if (Record.size() < 12 || Record.size() > 14)
+ if (Record.size() < 12 || Record.size() > 15)
return error("Invalid record");
// DWARF address space is encoded as N->getDWARFAddressSpace() + 1. 0 means
@@ -1566,8 +1566,18 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
DWARFAddressSpace = Record[12] - 1;
Metadata *Annotations = nullptr;
- if (Record.size() > 13 && Record[13])
- Annotations = getMDOrNull(Record[13]);
+ std::optional<DIDerivedType::PtrAuthData> PtrAuthData;
+
+ // Only look for annotations/ptrauth if both are allocated.
+ // If not, we can't tell which was intended to be embedded, as both ptrauth
+ // and annotations have been expected at Record[13] at various times.
+ if (Record.size() > 14) {
+ if (Record[13])
+ Annotations = getMDOrNull(Record[13]);
+
+ if (Record[14])
+ PtrAuthData = DIDerivedType::PtrAuthData(Record[14]);
+ }
IsDistinct = Record[0];
DINode::DIFlags Flags = static_cast<DINode::DIFlags>(Record[10]);
@@ -1577,7 +1587,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
getMDOrNull(Record[3]), Record[4],
getDITypeRefOrNull(Record[5]),
getDITypeRefOrNull(Record[6]), Record[7], Record[8],
- Record[9], DWARFAddressSpace, Flags,
+ Record[9], DWARFAddressSpace, PtrAuthData, Flags,
getDITypeRefOrNull(Record[11]), Annotations)),
NextMetadataNo);
NextMetadataNo++;
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 656f2a6ce870..85319dc69e94 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -1804,6 +1804,11 @@ void ModuleBitcodeWriter::writeDIDerivedType(const DIDerivedType *N,
Record.push_back(VE.getMetadataOrNullID(N->getAnnotations().get()));
+ if (auto PtrAuthData = N->getPtrAuthData())
+ Record.push_back(PtrAuthData->Payload.RawData);
+ else
+ Record.push_back(0);
+
Stream.EmitRecord(bitc::METADATA_DERIVED_TYPE, Record, Abbrev);
Record.clear();
}
diff --git a/llvm/lib/CodeGen/AsmPrinter/AccelTable.cpp b/llvm/lib/CodeGen/AsmPrinter/AccelTable.cpp
index 9e1727a0b8d1..55cdc3c92864 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AccelTable.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AccelTable.cpp
@@ -38,9 +38,8 @@ void AccelTableBase::computeBucketCount() {
for (const auto &E : Entries)
Uniques.push_back(E.second.HashValue);
- auto Counts = llvm::dwarf::getDebugNamesBucketAndHashCount(Uniques);
- BucketCount = Counts.first;
- UniqueHashCount = Counts.second;
+ std::tie(BucketCount, UniqueHashCount) =
+ llvm::dwarf::getDebugNamesBucketAndHashCount(Uniques);
}
void AccelTableBase::finalize(AsmPrinter *Asm, StringRef Prefix) {
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index d462859e4894..ae0226934804 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -803,6 +803,20 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIDerivedType *DTy) {
if (DTy->getDWARFAddressSpace())
addUInt(Buffer, dwarf::DW_AT_address_class, dwarf::DW_FORM_data4,
*DTy->getDWARFAddressSpace());
+ if (auto Key = DTy->getPtrAuthKey())
+ addUInt(Buffer, dwarf::DW_AT_LLVM_ptrauth_key, dwarf::DW_FORM_data1, *Key);
+ if (auto AddrDisc = DTy->isPtrAuthAddressDiscriminated())
+ if (AddrDisc.value())
+ addFlag(Buffer, dwarf::DW_AT_LLVM_ptrauth_address_discriminated);
+ if (auto Disc = DTy->getPtrAuthExtraDiscriminator())
+ addUInt(Buffer, dwarf::DW_AT_LLVM_ptrauth_extra_discriminator,
+ dwarf::DW_FORM_data2, *Disc);
+ if (auto IsaPointer = DTy->isPtrAuthIsaPointer())
+ if (*IsaPointer)
+ addFlag(Buffer, dwarf::DW_AT_LLVM_ptrauth_isa_pointer);
+ if (auto AuthenticatesNullValues = DTy->getPtrAuthAuthenticatesNullValues())
+ if (*AuthenticatesNullValues)
+ addFlag(Buffer, dwarf::DW_AT_LLVM_ptrauth_authenticates_null_values);
}
void DwarfUnit::constructSubprogramArguments(DIE &Buffer, DITypeRefArray Args) {
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 38bb808dd5bd..7c986dbbc2c7 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -596,8 +596,6 @@ bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
const Value *CondVal = BrInst.getCondition();
MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
- const auto &TLI = *MF->getSubtarget().getTargetLowering();
-
// If this is a series of conditions that are or'd or and'd together, emit
// this as a sequence of branches instead of setcc's with and/or operations.
// As long as jumps are not expensive (exceptions for multi-use logic ops,
@@ -617,7 +615,7 @@ bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
// jle foo
using namespace PatternMatch;
const Instruction *CondI = dyn_cast<Instruction>(CondVal);
- if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() &&
+ if (!TLI->isJumpExpensive() && CondI && CondI->hasOneUse() &&
!BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
Value *Vec;
@@ -1385,9 +1383,8 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
return true;
}
- auto &TLI = *MF->getSubtarget().getTargetLowering();
MachineMemOperand::Flags Flags =
- TLI.getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
+ TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
if (AA->pointsToConstantMemory(
MemoryLocation(Ptr, LocationSize::precise(StoreSize), AAInfo))) {
@@ -1434,8 +1431,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
return true;
}
- auto &TLI = *MF->getSubtarget().getTargetLowering();
- MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
+ MachineMemOperand::Flags Flags = TLI->getStoreMemOperandFlags(SI, *DL);
for (unsigned i = 0; i < Vals.size(); ++i) {
Register Addr;
@@ -1779,8 +1775,7 @@ void IRTranslator::getStackGuard(Register DstReg,
auto MIB =
MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
- auto &TLI = *MF->getSubtarget().getTargetLowering();
- Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
+ Value *Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());
if (!Global)
return;
@@ -2111,9 +2106,8 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
// does. Simplest intrinsic ever!
return true;
case Intrinsic::vastart: {
- auto &TLI = *MF->getSubtarget().getTargetLowering();
Value *Ptr = CI.getArgOperand(0);
- unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
+ unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
Align Alignment = getKnownAlignment(Ptr, *DL);
MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
@@ -2189,14 +2183,13 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
case Intrinsic::fmuladd: {
const TargetMachine &TM = MF->getTarget();
- const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
Register Dst = getOrCreateVReg(CI);
Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
- TLI.isFMAFasterThanFMulAndFAdd(*MF,
- TLI.getValueType(*DL, CI.getType()))) {
+ TLI->isFMAFasterThanFMulAndFAdd(*MF,
+ TLI->getValueType(*DL, CI.getType()))) {
// TODO: Revisit this to see if we should move this part of the
// lowering to the combiner.
MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
@@ -2254,10 +2247,9 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
getStackGuard(getOrCreateVReg(CI), MIRBuilder);
return true;
case Intrinsic::stackprotector: {
- const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
Register GuardVal;
- if (TLI.useLoadStackGuardNode()) {
+ if (TLI->useLoadStackGuardNode()) {
GuardVal = MRI->createGenericVirtualRegister(PtrTy);
getStackGuard(GuardVal, MIRBuilder);
} else
@@ -2635,10 +2627,9 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
}
// Add a MachineMemOperand if it is a target mem intrinsic.
- const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
TargetLowering::IntrinsicInfo Info;
// TODO: Add a GlobalISel version of getTgtMemIntrinsic.
- if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
+ if (TLI->getTgtMemIntrinsic(Info, CI, *MF, ID)) {
Align Alignment = Info.align.value_or(
DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
LLT MemTy = Info.memVT.isSimple()
@@ -2818,10 +2809,9 @@ bool IRTranslator::translateLandingPad(const User &U,
// If there aren't registers to copy the values into (e.g., during SjLj
// exceptions), then don't bother.
- auto &TLI = *MF->getSubtarget().getTargetLowering();
const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
- if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
- TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
+ if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
+ TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
return true;
// If landingpad's return type is token type, we don't create DAG nodes
@@ -2852,7 +2842,7 @@ bool IRTranslator::translateLandingPad(const User &U,
assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
// Mark exception register as live in.
- Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
+ Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
if (!ExceptionReg)
return false;
@@ -2860,7 +2850,7 @@ bool IRTranslator::translateLandingPad(const User &U,
ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
- Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
+ Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
if (!SelectorReg)
return false;
@@ -2986,8 +2976,7 @@ bool IRTranslator::translateExtractElement(const User &U,
Register Res = getOrCreateVReg(U);
Register Val = getOrCreateVReg(*U.getOperand(0));
- const auto &TLI = *MF->getSubtarget().getTargetLowering();
- unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
+ unsigned PreferredVecIdxWidth = TLI->getVectorIdxTy(*DL).getSizeInBits();
Register Idx;
if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
if (CI->getBitWidth() != PreferredVecIdxWidth) {
@@ -3039,8 +3028,7 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
MachineIRBuilder &MIRBuilder) {
const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
- auto &TLI = *MF->getSubtarget().getTargetLowering();
- auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
+ auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
auto Res = getOrCreateVRegs(I);
Register OldValRes = Res[0];
@@ -3061,8 +3049,7 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
bool IRTranslator::translateAtomicRMW(const User &U,
MachineIRBuilder &MIRBuilder) {
const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
- auto &TLI = *MF->getSubtarget().getTargetLowering();
- auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
+ auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
Register Res = getOrCreateVReg(I);
Register Addr = getOrCreateVReg(*I.getPointerOperand());
@@ -3302,8 +3289,7 @@ bool IRTranslator::translate(const Instruction &Inst) {
CurBuilder->setDebugLoc(Inst.getDebugLoc());
CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));
- auto &TLI = *MF->getSubtarget().getTargetLowering();
- if (TLI.fallBackToDAGISel(Inst))
+ if (TLI->fallBackToDAGISel(Inst))
return false;
switch (Inst.getOpcode()) {
@@ -3454,9 +3440,8 @@ bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
// Check if we need to generate stack-protector guard checks.
StackProtector &SP = getAnalysis<StackProtector>();
if (SP.shouldEmitSDCheck(BB)) {
- const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
bool FunctionBasedInstrumentation =
- TLI.getSSPStackGuardCheck(*MF->getFunction().getParent());
+ TLI->getSSPStackGuardCheck(*MF->getFunction().getParent());
SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
}
// Handle stack protector.
@@ -3501,10 +3486,9 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
MachineBasicBlock *ParentBB) {
CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
// First create the loads to the guard/stack slot for the comparison.
- const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
- LLT PtrMemTy = getLLTForMVT(TLI.getPointerMemTy(*DL));
+ LLT PtrMemTy = getLLTForMVT(TLI->getPointerMemTy(*DL));
MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
int FI = MFI.getStackProtectorIndex();
@@ -3522,13 +3506,13 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile)
.getReg(0);
- if (TLI.useStackGuardXorFP()) {
+ if (TLI->useStackGuardXorFP()) {
LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
return false;
}
// Retrieve guard check function, nullptr if instrumentation is inlined.
- if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
+ if (const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {
// This path is currently untestable on GlobalISel, since the only platform
// that needs this seems to be Windows, and we fall back on that currently.
// The code still lives here in case that changes.
@@ -3563,13 +3547,13 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
// If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
// Otherwise, emit a volatile load to retrieve the stack guard value.
- if (TLI.useLoadStackGuardNode()) {
+ if (TLI->useLoadStackGuardNode()) {
Guard =
MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
getStackGuard(Guard, *CurBuilder);
} else {
// TODO: test using android subtarget when we support @llvm.thread.pointer.
- const Value *IRGuard = TLI.getSDagStackGuard(M);
+ const Value *IRGuard = TLI->getSDagStackGuard(M);
Register GuardPtr = getOrCreateVReg(*IRGuard);
Guard = CurBuilder
@@ -3593,13 +3577,12 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
MachineBasicBlock *FailureBB) {
CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
- const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
- const char *Name = TLI.getLibcallName(Libcall);
+ const char *Name = TLI->getLibcallName(Libcall);
CallLowering::CallLoweringInfo Info;
- Info.CallConv = TLI.getLibcallCallingConv(Libcall);
+ Info.CallConv = TLI->getLibcallCallingConv(Libcall);
Info.Callee = MachineOperand::CreateES(Name);
Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
0};
@@ -3662,6 +3645,7 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
? EnableCSEInIRTranslator
: TPC->isGISelCSEEnabled();
+ TLI = MF->getSubtarget().getTargetLowering();
if (EnableCSE) {
EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
@@ -3696,12 +3680,8 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
- const auto &TLI = *MF->getSubtarget().getTargetLowering();
-
SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
- SL->init(TLI, TM, *DL);
-
-
+ SL->init(*TLI, TM, *DL);
assert(PendingPHIs.empty() && "stale PHIs");
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index c4d49adc21c4..33ada3655dc7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -27835,7 +27835,7 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
bool IsAtomic;
SDValue BasePtr;
int64_t Offset;
- std::optional<int64_t> NumBytes;
+ LocationSize NumBytes;
MachineMemOperand *MMO;
};
@@ -27853,7 +27853,8 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
LSN->isAtomic(),
LSN->getBasePtr(),
Offset /*base offset*/,
- std::optional<int64_t>(Size),
+ Size != ~UINT64_C(0) ? LocationSize::precise(Size)
+ : LocationSize::beforeOrAfterPointer(),
LSN->getMemOperand()};
}
if (const auto *LN = cast<LifetimeSDNode>(N))
@@ -27861,13 +27862,15 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
/*isAtomic*/ false,
LN->getOperand(1),
(LN->hasOffset()) ? LN->getOffset() : 0,
- (LN->hasOffset()) ? std::optional<int64_t>(LN->getSize())
- : std::optional<int64_t>(),
+ (LN->hasOffset()) ? LocationSize::precise(LN->getSize())
+ : LocationSize::beforeOrAfterPointer(),
(MachineMemOperand *)nullptr};
// Default.
return {false /*isvolatile*/,
- /*isAtomic*/ false, SDValue(),
- (int64_t)0 /*offset*/, std::optional<int64_t>() /*size*/,
+ /*isAtomic*/ false,
+ SDValue(),
+ (int64_t)0 /*offset*/,
+ LocationSize::beforeOrAfterPointer() /*size*/,
(MachineMemOperand *)nullptr};
};
@@ -27922,18 +27925,20 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
int64_t SrcValOffset1 = MUC1.MMO->getOffset();
Align OrigAlignment0 = MUC0.MMO->getBaseAlign();
Align OrigAlignment1 = MUC1.MMO->getBaseAlign();
- auto &Size0 = MUC0.NumBytes;
- auto &Size1 = MUC1.NumBytes;
+ LocationSize Size0 = MUC0.NumBytes;
+ LocationSize Size1 = MUC1.NumBytes;
if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 &&
- Size0.has_value() && Size1.has_value() && *Size0 == *Size1 &&
- OrigAlignment0 > *Size0 && SrcValOffset0 % *Size0 == 0 &&
- SrcValOffset1 % *Size1 == 0) {
+ Size0.hasValue() && Size1.hasValue() && Size0 == Size1 &&
+ OrigAlignment0 > Size0.getValue() &&
+ SrcValOffset0 % Size0.getValue() == 0 &&
+ SrcValOffset1 % Size1.getValue() == 0) {
int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0.value();
int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1.value();
// There is no overlap between these relatively aligned accesses of
// similar size. Return no alias.
- if ((OffAlign0 + *Size0) <= OffAlign1 || (OffAlign1 + *Size1) <= OffAlign0)
+ if ((OffAlign0 + (int64_t)Size0.getValue()) <= OffAlign1 ||
+ (OffAlign1 + (int64_t)Size1.getValue()) <= OffAlign0)
return false;
}
@@ -27946,12 +27951,12 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
UseAA = false;
#endif
- if (UseAA && AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() && Size0 &&
- Size1) {
+ if (UseAA && AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() &&
+ Size0.hasValue() && Size1.hasValue()) {
// Use alias analysis information.
int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
- int64_t Overlap0 = *Size0 + SrcValOffset0 - MinOffset;
- int64_t Overlap1 = *Size1 + SrcValOffset1 - MinOffset;
+ int64_t Overlap0 = Size0.getValue() + SrcValOffset0 - MinOffset;
+ int64_t Overlap1 = Size1.getValue() + SrcValOffset1 - MinOffset;
if (AA->isNoAlias(
MemoryLocation(MUC0.MMO->getValue(), Overlap0,
UseTBAA ? MUC0.MMO->getAAInfo() : AAMDNodes()),
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index e150f27240d7..5b1b7c7c6277 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3645,32 +3645,42 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
}
}
}
- } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
- // If this is a ZEXTLoad and we are looking at the loaded value.
- EVT VT = LD->getMemoryVT();
- unsigned MemBits = VT.getScalarSizeInBits();
- Known.Zero.setBitsFrom(MemBits);
- } else if (const MDNode *Ranges = LD->getRanges()) {
- EVT VT = LD->getValueType(0);
-
- // TODO: Handle for extending loads
- if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
+ } else if (Op.getResNo() == 0) {
+ KnownBits Known0(!LD->getMemoryVT().isScalableVT()
+ ? LD->getMemoryVT().getFixedSizeInBits()
+ : BitWidth);
+ EVT VT = Op.getValueType();
+ // Fill in any known bits from range information. There are 3 types being
+ // used. The results VT (same vector elt size as BitWidth), the loaded
+ // MemoryVT (which may or may not be vector) and the range VTs original
+ // type. The range matadata needs the full range (i.e
+ // MemoryVT().getSizeInBits()), which is truncated to the correct elt size
+ // if it is know. These are then extended to the original VT sizes below.
+ if (const MDNode *MD = LD->getRanges()) {
+ computeKnownBitsFromRangeMetadata(*MD, Known0);
if (VT.isVector()) {
// Handle truncation to the first demanded element.
// TODO: Figure out which demanded elements are covered
if (DemandedElts != 1 || !getDataLayout().isLittleEndian())
break;
+ Known0 = Known0.trunc(BitWidth);
+ }
+ }
- // Handle the case where a load has a vector type, but scalar memory
- // with an attached range.
- EVT MemVT = LD->getMemoryVT();
- KnownBits KnownFull(MemVT.getSizeInBits());
+ if (LD->getMemoryVT().isVector())
+ Known0 = Known0.trunc(LD->getMemoryVT().getScalarSizeInBits());
- computeKnownBitsFromRangeMetadata(*Ranges, KnownFull);
- Known = KnownFull.trunc(BitWidth);
- } else
- computeKnownBitsFromRangeMetadata(*Ranges, Known);
- }
+ // Extend the Known bits from memory to the size of the result.
+ if (ISD::isZEXTLoad(Op.getNode()))
+ Known = Known0.zext(BitWidth);
+ else if (ISD::isSEXTLoad(Op.getNode()))
+ Known = Known0.sext(BitWidth);
+ else if (ISD::isEXTLoad(Op.getNode()))
+ Known = Known0.anyext(BitWidth);
+ else
+ Known = Known0;
+ assert(Known.getBitWidth() == BitWidth);
+ return Known;
}
break;
}
@@ -9106,26 +9116,6 @@ SDValue SelectionDAG::getExtStridedLoadVP(
Stride, Mask, EVL, MemVT, MMO, IsExpanding);
}
-SDValue SelectionDAG::getIndexedStridedLoadVP(SDValue OrigLoad, const SDLoc &DL,
- SDValue Base, SDValue Offset,
- ISD::MemIndexedMode AM) {
- auto *SLD = cast<VPStridedLoadSDNode>(OrigLoad);
- assert(SLD->getOffset().isUndef() &&
- "Strided load is already a indexed load!");
- // Don't propagate the invariant or dereferenceable flags.
- auto MMOFlags =
- SLD->getMemOperand()->getFlags() &
- ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
- MachineFunction &MF = getMachineFunction();
- MachineMemOperand *MMO = MF.getMachineMemOperand(
- SLD->getPointerInfo(), MMOFlags, SLD->getMemOperand()->getSize(),
- SLD->getOriginalAlign(), SLD->getAAInfo());
- return getStridedLoadVP(AM, SLD->getExtensionType(), OrigLoad.getValueType(),
- DL, SLD->getChain(), Base, Offset, SLD->getStride(),
- SLD->getMask(), SLD->getVectorLength(),
- SLD->getMemoryVT(), MMO, SLD->isExpandingLoad());
-}
-
SDValue SelectionDAG::getStridedStoreVP(SDValue Chain, const SDLoc &DL,
SDValue Val, SDValue Ptr,
SDValue Offset, SDValue Stride,
@@ -9211,38 +9201,6 @@ SDValue SelectionDAG::getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL,
return V;
}
-SDValue SelectionDAG::getIndexedStridedStoreVP(SDValue OrigStore,
- const SDLoc &DL, SDValue Base,
- SDValue Offset,
- ISD::MemIndexedMode AM) {
- auto *SST = cast<VPStridedStoreSDNode>(OrigStore);
- assert(SST->getOffset().isUndef() &&
- "Strided store is already an indexed store!");
- SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
- SDValue Ops[] = {
- SST->getChain(), SST->getValue(), Base, Offset, SST->getStride(),
- SST->getMask(), SST->getVectorLength()};
- FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops);
- ID.AddInteger(SST->getMemoryVT().getRawBits());
- ID.AddInteger(SST->getRawSubclassData());
- ID.AddInteger(SST->getPointerInfo().getAddrSpace());
- void *IP = nullptr;
- if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
- return SDValue(E, 0);
-
- auto *N = newSDNode<VPStridedStoreSDNode>(
- DL.getIROrder(), DL.getDebugLoc(), VTs, AM, SST->isTruncatingStore(),
- SST->isCompressingStore(), SST->getMemoryVT(), SST->getMemOperand());
- createOperands(N, Ops);
-
- CSEMap.InsertNode(N, IP);
- InsertNode(N);
- SDValue V(N, 0);
- NewSDValueDbgMsg(V, "Creating new node: ", this);
- return V;
-}
-
SDValue SelectionDAG::getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
ISD::MemIndexType IndexType) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
index 66825d845c19..9670c3ac8430 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
@@ -91,11 +91,10 @@ bool BaseIndexOffset::equalBaseIndex(const BaseIndexOffset &Other,
}
bool BaseIndexOffset::computeAliasing(const SDNode *Op0,
- const std::optional<int64_t> NumBytes0,
+ const LocationSize NumBytes0,
const SDNode *Op1,
- const std::optional<int64_t> NumBytes1,
+ const LocationSize NumBytes1,
const SelectionDAG &DAG, bool &IsAlias) {
-
BaseIndexOffset BasePtr0 = match(Op0, DAG);
if (!BasePtr0.getBase().getNode())
return false;
@@ -105,27 +104,26 @@ bool BaseIndexOffset::computeAliasing(const SDNode *Op0,
return false;
int64_t PtrDiff;
- if (NumBytes0 && NumBytes1 &&
- BasePtr0.equalBaseIndex(BasePtr1, DAG, PtrDiff)) {
+ if (BasePtr0.equalBaseIndex(BasePtr1, DAG, PtrDiff)) {
// If the size of memory access is unknown, do not use it to analysis.
// One example of unknown size memory access is to load/store scalable
// vector objects on the stack.
// BasePtr1 is PtrDiff away from BasePtr0. They alias if none of the
// following situations arise:
- if (PtrDiff >= 0 &&
- *NumBytes0 != static_cast<int64_t>(MemoryLocation::UnknownSize)) {
+ if (PtrDiff >= 0 && NumBytes0.hasValue() && !NumBytes0.isScalable()) {
// [----BasePtr0----]
// [---BasePtr1--]
// ========PtrDiff========>
- IsAlias = !(*NumBytes0 <= PtrDiff);
+ IsAlias = !(static_cast<int64_t>(NumBytes0.getValue().getFixedValue()) <=
+ PtrDiff);
return true;
}
- if (PtrDiff < 0 &&
- *NumBytes1 != static_cast<int64_t>(MemoryLocation::UnknownSize)) {
+ if (PtrDiff < 0 && NumBytes1.hasValue() && !NumBytes1.isScalable()) {
// [----BasePtr0----]
// [---BasePtr1--]
// =====(-PtrDiff)====>
- IsAlias = !((PtrDiff + *NumBytes1) <= 0);
+ IsAlias = !((PtrDiff + static_cast<int64_t>(
+ NumBytes1.getValue().getFixedValue())) <= 0);
return true;
}
return false;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index ab2f42d2024c..4f6263cc492f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -26,6 +26,7 @@
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/Analysis.h"
@@ -93,6 +94,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/InstructionCost.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
@@ -2446,6 +2448,147 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
SL->SwitchCases.push_back(CB);
}
+// Collect dependencies on V recursively. This is used for the cost analysis in
+// `shouldKeepJumpConditionsTogether`.
+static bool
+collectInstructionDeps(SmallPtrSet<const Instruction *, 8> *Deps,
+ const Value *V,
+ SmallPtrSet<const Instruction *, 8> *Necessary = nullptr,
+ unsigned Depth = 0) {
+ // Return false if we have an incomplete count.
+ if (Depth >= SelectionDAG::MaxRecursionDepth)
+ return false;
+
+ auto *I = dyn_cast<Instruction>(V);
+ if (I == nullptr)
+ return true;
+
+ if (Necessary != nullptr) {
+ // This instruction is necessary for the other side of the condition so
+ // don't count it.
+ if (Necessary->contains(I))
+ return true;
+ }
+
+ // Already added this dep.
+ if (!Deps->insert(I).second)
+ return true;
+
+ for (unsigned OpIdx = 0, E = I->getNumOperands(); OpIdx < E; ++OpIdx)
+ if (!collectInstructionDeps(Deps, I->getOperand(OpIdx), Necessary,
+ Depth + 1))
+ return false;
+ return true;
+}
+
+bool SelectionDAGBuilder::shouldKeepJumpConditionsTogether(
+ const FunctionLoweringInfo &FuncInfo, const BranchInst &I,
+ Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs,
+ TargetLoweringBase::CondMergingParams Params) const {
+ if (I.getNumSuccessors() != 2)
+ return false;
+
+ if (Params.BaseCost < 0)
+ return false;
+
+ // Baseline cost.
+ InstructionCost CostThresh = Params.BaseCost;
+
+ BranchProbabilityInfo *BPI = nullptr;
+ if (Params.LikelyBias || Params.UnlikelyBias)
+ BPI = FuncInfo.BPI;
+ if (BPI != nullptr) {
+ // See if we are either likely to get an early out or compute both lhs/rhs
+ // of the condition.
+ BasicBlock *IfFalse = I.getSuccessor(0);
+ BasicBlock *IfTrue = I.getSuccessor(1);
+
+ std::optional<bool> Likely;
+ if (BPI->isEdgeHot(I.getParent(), IfTrue))
+ Likely = true;
+ else if (BPI->isEdgeHot(I.getParent(), IfFalse))
+ Likely = false;
+
+ if (Likely) {
+ if (Opc == (*Likely ? Instruction::And : Instruction::Or))
+ // Its likely we will have to compute both lhs and rhs of condition
+ CostThresh += Params.LikelyBias;
+ else {
+ if (Params.UnlikelyBias < 0)
+ return false;
+ // Its likely we will get an early out.
+ CostThresh -= Params.UnlikelyBias;
+ }
+ }
+ }
+
+ if (CostThresh <= 0)
+ return false;
+
+ // Collect "all" instructions that lhs condition is dependent on.
+ SmallPtrSet<const Instruction *, 8> LhsDeps, RhsDeps;
+ collectInstructionDeps(&LhsDeps, Lhs);
+ // Collect "all" instructions that rhs condition is dependent on AND are
+ // dependencies of lhs. This gives us an estimate on which instructions we
+ // stand to save by splitting the condition.
+ if (!collectInstructionDeps(&RhsDeps, Rhs, &LhsDeps))
+ return false;
+ // Add the compare instruction itself unless its a dependency on the LHS.
+ if (const auto *RhsI = dyn_cast<Instruction>(Rhs))
+ if (!LhsDeps.contains(RhsI))
+ RhsDeps.insert(RhsI);
+
+ const auto &TLI = DAG.getTargetLoweringInfo();
+ const auto &TTI =
+ TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
+
+ InstructionCost CostOfIncluding = 0;
+ // See if this instruction will need to computed independently of whether RHS
+ // is.
+ auto ShouldCountInsn = [&RhsDeps](const Instruction *Ins) {
+ for (const auto *U : Ins->users()) {
+ // If user is independent of RHS calculation we don't need to count it.
+ if (auto *UIns = dyn_cast<Instruction>(U))
+ if (!RhsDeps.contains(UIns))
+ return false;
+ }
+ return true;
+ };
+
+ // Prune instructions from RHS Deps that are dependencies of unrelated
+ // instructions. The value (SelectionDAG::MaxRecursionDepth) is fairly
+ // arbitrary and just meant to cap the how much time we spend in the pruning
+ // loop. Its highly unlikely to come into affect.
+ const unsigned MaxPruneIters = SelectionDAG::MaxRecursionDepth;
+ // Stop after a certain point. No incorrectness from including too many
+ // instructions.
+ for (unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
+ const Instruction *ToDrop = nullptr;
+ for (const auto *Ins : RhsDeps) {
+ if (!ShouldCountInsn(Ins)) {
+ ToDrop = Ins;
+ break;
+ }
+ }
+ if (ToDrop == nullptr)
+ break;
+ RhsDeps.erase(ToDrop);
+ }
+
+ for (const auto *Ins : RhsDeps) {
+ // Finally accumulate latency that we can only attribute to computing the
+ // RHS condition. Use latency because we are essentially trying to calculate
+ // the cost of the dependency chain.
+ // Possible TODO: We could try to estimate ILP and make this more precise.
+ CostOfIncluding +=
+ TTI.getInstructionCost(Ins, TargetTransformInfo::TCK_Latency);
+
+ if (CostOfIncluding > CostThresh)
+ return false;
+ }
+ return true;
+}
+
void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
@@ -2660,8 +2803,13 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
Opcode = Instruction::Or;
- if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
- match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
+ if (Opcode &&
+ !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
+ match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value()))) &&
+ !shouldKeepJumpConditionsTogether(
+ FuncInfo, I, Opcode, BOp0, BOp1,
+ DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
+ Opcode, BOp0, BOp1))) {
FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
getEdgeProbability(BrMBB, Succ0MBB),
getEdgeProbability(BrMBB, Succ1MBB),
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 47657313cb6a..2084de473b80 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -385,6 +385,11 @@ public:
N = NewN;
}
+ bool shouldKeepJumpConditionsTogether(
+ const FunctionLoweringInfo &FuncInfo, const BranchInst &I,
+ Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs,
+ TargetLoweringBase::CondMergingParams Params) const;
+
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
MachineBasicBlock *SwitchBB,
diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 654a61572708..6943ce261d9d 100644
--- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -2418,6 +2418,15 @@ MCSection *TargetLoweringObjectFileXCOFF::getSectionForExternalReference(
SmallString<128> Name;
getNameWithPrefix(Name, GO, TM);
+ // AIX TLS local-dynamic does not need the external reference for the
+ // "_$TLSML" symbol.
+ if (GO->getThreadLocalMode() == GlobalVariable::LocalDynamicTLSModel &&
+ GO->hasName() && GO->getName() == "_$TLSML") {
+ return getContext().getXCOFFSection(
+ Name, SectionKind::getData(),
+ XCOFF::CsectProperties(XCOFF::XMC_TC, XCOFF::XTY_SD));
+ }
+
XCOFF::StorageMappingClass SMC =
isa<Function>(GO) ? XCOFF::XMC_DS : XCOFF::XMC_UA;
if (GO->isThreadLocal())
@@ -2675,13 +2684,17 @@ MCSection *TargetLoweringObjectFileXCOFF::getSectionForTOCEntry(
// the chance of needing -bbigtoc is decreased. Also, the toc-entry for
// EH info is never referenced directly using instructions so it can be
// allocated with TE storage-mapping class.
+ // The "_$TLSML" symbol for TLS local-dynamic mode requires XMC_TC, otherwise
+ // the AIX assembler will complain.
return getContext().getXCOFFSection(
cast<MCSymbolXCOFF>(Sym)->getSymbolTableName(), SectionKind::getData(),
- XCOFF::CsectProperties((TM.getCodeModel() == CodeModel::Large ||
- cast<MCSymbolXCOFF>(Sym)->isEHInfo())
- ? XCOFF::XMC_TE
- : XCOFF::XMC_TC,
- XCOFF::XTY_SD));
+ XCOFF::CsectProperties(
+ ((TM.getCodeModel() == CodeModel::Large &&
+ cast<MCSymbolXCOFF>(Sym)->getSymbolTableName() != "_$TLSML") ||
+ cast<MCSymbolXCOFF>(Sym)->isEHInfo())
+ ? XCOFF::XMC_TE
+ : XCOFF::XMC_TC,
+ XCOFF::XTY_SD));
}
MCSection *TargetLoweringObjectFileXCOFF::getSectionForLSDA(
diff --git a/llvm/lib/CodeGen/TypePromotion.cpp b/llvm/lib/CodeGen/TypePromotion.cpp
index 48ad8de77801..34aeb62a87a0 100644
--- a/llvm/lib/CodeGen/TypePromotion.cpp
+++ b/llvm/lib/CodeGen/TypePromotion.cpp
@@ -136,6 +136,7 @@ public:
class TypePromotionImpl {
unsigned TypeSize = 0;
+ const TargetLowering *TLI = nullptr;
LLVMContext *Ctx = nullptr;
unsigned RegisterBitWidth = 0;
SmallPtrSet<Value *, 16> AllVisited;
@@ -272,64 +273,58 @@ bool TypePromotionImpl::isSink(Value *V) {
/// Return whether this instruction can safely wrap.
bool TypePromotionImpl::isSafeWrap(Instruction *I) {
- // We can support a potentially wrapping instruction (I) if:
+ // We can support a potentially wrapping Add/Sub instruction (I) if:
// - It is only used by an unsigned icmp.
// - The icmp uses a constant.
- // - The wrapping value (I) is decreasing, i.e would underflow - wrapping
- // around zero to become a larger number than before.
// - The wrapping instruction (I) also uses a constant.
//
- // We can then use the two constants to calculate whether the result would
- // wrap in respect to itself in the original bitwidth. If it doesn't wrap,
- // just underflows the range, the icmp would give the same result whether the
- // result has been truncated or not. We calculate this by:
- // - Zero extending both constants, if needed, to RegisterBitWidth.
- // - Take the absolute value of I's constant, adding this to the icmp const.
- // - Check that this value is not out of range for small type. If it is, it
- // means that it has underflowed enough to wrap around the icmp constant.
+ // This a common pattern emitted to check if a value is within a range.
//
// For example:
//
- // %sub = sub i8 %a, 2
- // %cmp = icmp ule i8 %sub, 254
+ // %sub = sub i8 %a, C1
+ // %cmp = icmp ule i8 %sub, C2
+ //
+ // or
+ //
+ // %add = add i8 %a, C1
+ // %cmp = icmp ule i8 %add, C2.
//
- // If %a = 0, %sub = -2 == FE == 254
- // But if this is evalulated as a i32
- // %sub = -2 == FF FF FF FE == 4294967294
- // So the unsigned compares (i8 and i32) would not yield the same result.
+ // We will treat an add as though it were a subtract by -C1. To promote
+ // the Add/Sub we will zero extend the LHS and the subtracted amount. For Add,
+ // this means we need to negate the constant, zero extend to RegisterBitWidth,
+ // and negate in the larger type.
//
- // Another way to look at it is:
- // %a - 2 <= 254
- // %a + 2 <= 254 + 2
- // %a <= 256
- // And we can't represent 256 in the i8 format, so we don't support it.
+ // This will produce a value in the range [-zext(C1), zext(X)-zext(C1)] where
+ // C1 is the subtracted amount. This is either a small unsigned number or a
+ // large unsigned number in the promoted type.
//
- // Whereas:
+ // Now we need to correct the compare constant C2. Values >= C1 in the
+ // original add result range have been remapped to large values in the
+ // promoted range. If the compare constant fell into this range we need to
+ // remap it as well. We can do this as -(zext(-C2)).
//
- // %sub i8 %a, 1
+ // For example:
+ //
+ // %sub = sub i8 %a, 2
// %cmp = icmp ule i8 %sub, 254
//
- // If %a = 0, %sub = -1 == FF == 255
- // As i32:
- // %sub = -1 == FF FF FF FF == 4294967295
+ // becomes
//
- // In this case, the unsigned compare results would be the same and this
- // would also be true for ult, uge and ugt:
- // - (255 < 254) == (0xFFFFFFFF < 254) == false
- // - (255 <= 254) == (0xFFFFFFFF <= 254) == false
- // - (255 > 254) == (0xFFFFFFFF > 254) == true
- // - (255 >= 254) == (0xFFFFFFFF >= 254) == true
+ // %zext = zext %a to i32
+ // %sub = sub i32 %zext, 2
+ // %cmp = icmp ule i32 %sub, 4294967294
//
- // To demonstrate why we can't handle increasing values:
+ // Another example:
//
- // %add = add i8 %a, 2
- // %cmp = icmp ult i8 %add, 127
+ // %sub = sub i8 %a, 1
+ // %cmp = icmp ule i8 %sub, 254
//
- // If %a = 254, %add = 256 == (i8 1)
- // As i32:
- // %add = 256
+ // becomes
//
- // (1 < 127) != (256 < 127)
+ // %zext = zext %a to i32
+ // %sub = sub i32 %zext, 1
+ // %cmp = icmp ule i32 %sub, 254
unsigned Opc = I->getOpcode();
if (Opc != Instruction::Add && Opc != Instruction::Sub)
@@ -356,15 +351,23 @@ bool TypePromotionImpl::isSafeWrap(Instruction *I) {
APInt OverflowConst = cast<ConstantInt>(I->getOperand(1))->getValue();
if (Opc == Instruction::Sub)
OverflowConst = -OverflowConst;
- if (!OverflowConst.isNonPositive())
- return false;
+
+ // If the constant is positive, we will end up filling the promoted bits with
+ // all 1s. Make sure that results in a cheap add constant.
+ if (!OverflowConst.isNonPositive()) {
+ // We don't have the true promoted width, just use 64 so we can create an
+ // int64_t for the isLegalAddImmediate call.
+ if (OverflowConst.getBitWidth() >= 64)
+ return false;
+
+ APInt NewConst = -((-OverflowConst).zext(64));
+ if (!TLI->isLegalAddImmediate(NewConst.getSExtValue()))
+ return false;
+ }
SafeWrap.insert(I);
- // Using C1 = OverflowConst and C2 = ICmpConst, we can either prove that:
- // zext(x) + sext(C1) <u zext(C2) if C1 < 0 and C1 >s C2
- // zext(x) + sext(C1) <u sext(C2) if C1 < 0 and C1 <=s C2
- if (OverflowConst.sgt(ICmpConst)) {
+ if (OverflowConst.ugt(ICmpConst)) {
LLVM_DEBUG(dbgs() << "IR Promotion: Allowing safe overflow for sext "
<< "const of " << *I << "\n");
return true;
@@ -487,18 +490,24 @@ void IRPromoter::PromoteTree() {
continue;
if (auto *Const = dyn_cast<ConstantInt>(Op)) {
- // For subtract, we don't need to sext the constant. We only put it in
+ // For subtract, we only need to zext the constant. We only put it in
// SafeWrap because SafeWrap.size() is used elsewhere.
- // For cmp, we need to sign extend a constant appearing in either
- // operand. For add, we should only sign extend the RHS.
- Constant *NewConst =
- ConstantInt::get(Const->getContext(),
- (SafeWrap.contains(I) &&
- (I->getOpcode() == Instruction::ICmp || i == 1) &&
- I->getOpcode() != Instruction::Sub)
- ? Const->getValue().sext(PromotedWidth)
- : Const->getValue().zext(PromotedWidth));
- I->setOperand(i, NewConst);
+ // For Add and ICmp we need to find how far the constant is from the
+ // top of its original unsigned range and place it the same distance
+ // from the top of its new unsigned range. We can do this by negating
+ // the constant, zero extending it, then negating in the new type.
+ APInt NewConst;
+ if (SafeWrap.contains(I)) {
+ if (I->getOpcode() == Instruction::ICmp)
+ NewConst = -((-Const->getValue()).zext(PromotedWidth));
+ else if (I->getOpcode() == Instruction::Add && i == 1)
+ NewConst = -((-Const->getValue()).zext(PromotedWidth));
+ else
+ NewConst = Const->getValue().zext(PromotedWidth);
+ } else
+ NewConst = Const->getValue().zext(PromotedWidth);
+
+ I->setOperand(i, ConstantInt::get(Const->getContext(), NewConst));
} else if (isa<UndefValue>(Op))
I->setOperand(i, ConstantInt::get(ExtTy, 0));
}
@@ -917,7 +926,7 @@ bool TypePromotionImpl::run(Function &F, const TargetMachine *TM,
bool MadeChange = false;
const DataLayout &DL = F.getParent()->getDataLayout();
const TargetSubtargetInfo *SubtargetInfo = TM->getSubtargetImpl(F);
- const TargetLowering *TLI = SubtargetInfo->getTargetLowering();
+ TLI = SubtargetInfo->getTargetLowering();
RegisterBitWidth =
TTI.getRegisterBitWidth(TargetTransformInfo::RGK_Scalar).getFixedValue();
Ctx = &F.getParent()->getContext();
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp b/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp
index 20ef59e7b442..520debe513d9 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp
@@ -29,7 +29,9 @@
#include "llvm/Support/DJB.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/JSON.h"
#include "llvm/Support/WithColor.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
@@ -2026,12 +2028,37 @@ void OutputCategoryAggregator::EnumerateResults(
}
void DWARFVerifier::summarize() {
- if (ErrorCategory.GetNumCategories() && DumpOpts.ShowAggregateErrors) {
+ if (DumpOpts.ShowAggregateErrors && ErrorCategory.GetNumCategories()) {
error() << "Aggregated error counts:\n";
ErrorCategory.EnumerateResults([&](StringRef s, unsigned count) {
error() << s << " occurred " << count << " time(s).\n";
});
}
+ if (!DumpOpts.JsonErrSummaryFile.empty()) {
+ std::error_code EC;
+ raw_fd_ostream JsonStream(DumpOpts.JsonErrSummaryFile, EC,
+ sys::fs::OF_Text);
+ if (EC) {
+ error() << "unable to open json summary file '"
+ << DumpOpts.JsonErrSummaryFile
+ << "' for writing: " << EC.message() << '\n';
+ return;
+ }
+
+ llvm::json::Object Categories;
+ uint64_t ErrorCount = 0;
+ ErrorCategory.EnumerateResults([&](StringRef Category, unsigned Count) {
+ llvm::json::Object Val;
+ Val.try_emplace("count", Count);
+ Categories.try_emplace(Category, std::move(Val));
+ ErrorCount += Count;
+ });
+ llvm::json::Object RootNode;
+ RootNode.try_emplace("error-categories", std::move(Categories));
+ RootNode.try_emplace("error-count", ErrorCount);
+
+ JsonStream << llvm::json::Value(std::move(RootNode));
+ }
}
raw_ostream &DWARFVerifier::error() const { return WithColor::error(OS); }
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 09f59c81123e..d65ed8c11d86 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -4047,13 +4047,17 @@ OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc,
OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSingle(
const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
- FinalizeCallbackTy FiniCB, bool IsNowait, llvm::Value *DidIt) {
+ FinalizeCallbackTy FiniCB, bool IsNowait, ArrayRef<llvm::Value *> CPVars,
+ ArrayRef<llvm::Function *> CPFuncs) {
if (!updateToLocation(Loc))
return Loc.IP;
- // If needed (i.e. not null), initialize `DidIt` with 0
- if (DidIt) {
+ // If needed allocate and initialize `DidIt` with 0.
+ // DidIt: flag variable: 1=single thread; 0=not single thread.
+ llvm::Value *DidIt = nullptr;
+ if (!CPVars.empty()) {
+ DidIt = Builder.CreateAlloca(llvm::Type::getInt32Ty(Builder.getContext()));
Builder.CreateStore(Builder.getInt32(0), DidIt);
}
@@ -4070,17 +4074,36 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSingle(
Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single);
Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
+ auto FiniCBWrapper = [&](InsertPointTy IP) {
+ FiniCB(IP);
+
+ // The thread that executes the single region must set `DidIt` to 1.
+ // This is used by __kmpc_copyprivate, to know if the caller is the
+ // single thread or not.
+ if (DidIt)
+ Builder.CreateStore(Builder.getInt32(1), DidIt);
+ };
+
// generates the following:
// if (__kmpc_single()) {
// .... single region ...
// __kmpc_end_single
// }
+ // __kmpc_copyprivate
// __kmpc_barrier
- EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
+ EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCBWrapper,
/*Conditional*/ true,
/*hasFinalize*/ true);
- if (!IsNowait)
+
+ if (DidIt) {
+ for (size_t I = 0, E = CPVars.size(); I < E; ++I)
+ // NOTE BufSize is currently unused, so just pass 0.
+ createCopyPrivate(LocationDescription(Builder.saveIP(), Loc.DL),
+ /*BufSize=*/ConstantInt::get(Int64, 0), CPVars[I],
+ CPFuncs[I], DidIt);
+ // NOTE __kmpc_copyprivate already inserts a barrier
+ } else if (!IsNowait)
createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
/* CheckCancelFlag */ false);
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 4e1e48b4ad4a..479622cd1bdc 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -2135,6 +2135,17 @@ static void writeDIDerivedType(raw_ostream &Out, const DIDerivedType *N,
Printer.printInt("dwarfAddressSpace", *DWARFAddressSpace,
/* ShouldSkipZero */ false);
Printer.printMetadata("annotations", N->getRawAnnotations());
+ if (auto Key = N->getPtrAuthKey())
+ Printer.printInt("ptrAuthKey", *Key);
+ if (auto AddrDisc = N->isPtrAuthAddressDiscriminated())
+ Printer.printBool("ptrAuthIsAddressDiscriminated", *AddrDisc);
+ if (auto Disc = N->getPtrAuthExtraDiscriminator())
+ Printer.printInt("ptrAuthExtraDiscriminator", *Disc);
+ if (auto IsaPointer = N->isPtrAuthIsaPointer())
+ Printer.printBool("ptrAuthIsaPointer", *IsaPointer);
+ if (auto AuthenticatesNullValues = N->getPtrAuthAuthenticatesNullValues())
+ Printer.printBool("ptrAuthAuthenticatesNullValues",
+ *AuthenticatesNullValues);
Out << ")";
}
diff --git a/llvm/lib/IR/DIBuilder.cpp b/llvm/lib/IR/DIBuilder.cpp
index 62efaba02534..2842cb15e78f 100644
--- a/llvm/lib/IR/DIBuilder.cpp
+++ b/llvm/lib/IR/DIBuilder.cpp
@@ -296,7 +296,20 @@ DIStringType *DIBuilder::createStringType(StringRef Name,
DIDerivedType *DIBuilder::createQualifiedType(unsigned Tag, DIType *FromTy) {
return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr, FromTy, 0,
- 0, 0, std::nullopt, DINode::FlagZero);
+ 0, 0, std::nullopt, std::nullopt, DINode::FlagZero);
+}
+
+DIDerivedType *DIBuilder::createPtrAuthQualifiedType(
+ DIType *FromTy, unsigned Key, bool IsAddressDiscriminated,
+ unsigned ExtraDiscriminator, bool IsaPointer,
+ bool AuthenticatesNullValues) {
+ return DIDerivedType::get(
+ VMContext, dwarf::DW_TAG_LLVM_ptrauth_type, "", nullptr, 0, nullptr,
+ FromTy, 0, 0, 0, std::nullopt,
+ std::optional<DIDerivedType::PtrAuthData>({Key, IsAddressDiscriminated,
+ ExtraDiscriminator, IsaPointer,
+ AuthenticatesNullValues}),
+ DINode::FlagZero);
}
DIDerivedType *
@@ -307,8 +320,8 @@ DIBuilder::createPointerType(DIType *PointeeTy, uint64_t SizeInBits,
// FIXME: Why is there a name here?
return DIDerivedType::get(VMContext, dwarf::DW_TAG_pointer_type, Name,
nullptr, 0, nullptr, PointeeTy, SizeInBits,
- AlignInBits, 0, DWARFAddressSpace, DINode::FlagZero,
- nullptr, Annotations);
+ AlignInBits, 0, DWARFAddressSpace, std::nullopt,
+ DINode::FlagZero, nullptr, Annotations);
}
DIDerivedType *DIBuilder::createMemberPointerType(DIType *PointeeTy,
@@ -318,7 +331,8 @@ DIDerivedType *DIBuilder::createMemberPointerType(DIType *PointeeTy,
DINode::DIFlags Flags) {
return DIDerivedType::get(VMContext, dwarf::DW_TAG_ptr_to_member_type, "",
nullptr, 0, nullptr, PointeeTy, SizeInBits,
- AlignInBits, 0, std::nullopt, Flags, Base);
+ AlignInBits, 0, std::nullopt, std::nullopt, Flags,
+ Base);
}
DIDerivedType *
@@ -327,7 +341,7 @@ DIBuilder::createReferenceType(unsigned Tag, DIType *RTy, uint64_t SizeInBits,
std::optional<unsigned> DWARFAddressSpace) {
assert(RTy && "Unable to create reference type");
return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr, RTy,
- SizeInBits, AlignInBits, 0, DWARFAddressSpace,
+ SizeInBits, AlignInBits, 0, DWARFAddressSpace, {},
DINode::FlagZero);
}
@@ -338,15 +352,16 @@ DIDerivedType *DIBuilder::createTypedef(DIType *Ty, StringRef Name,
DINodeArray Annotations) {
return DIDerivedType::get(VMContext, dwarf::DW_TAG_typedef, Name, File,
LineNo, getNonCompileUnitScope(Context), Ty, 0,
- AlignInBits, 0, std::nullopt, Flags, nullptr,
- Annotations);
+ AlignInBits, 0, std::nullopt, std::nullopt, Flags,
+ nullptr, Annotations);
}
DIDerivedType *DIBuilder::createFriend(DIType *Ty, DIType *FriendTy) {
assert(Ty && "Invalid type!");
assert(FriendTy && "Invalid friend type!");
return DIDerivedType::get(VMContext, dwarf::DW_TAG_friend, "", nullptr, 0, Ty,
- FriendTy, 0, 0, 0, std::nullopt, DINode::FlagZero);
+ FriendTy, 0, 0, 0, std::nullopt, std::nullopt,
+ DINode::FlagZero);
}
DIDerivedType *DIBuilder::createInheritance(DIType *Ty, DIType *BaseTy,
@@ -358,7 +373,7 @@ DIDerivedType *DIBuilder::createInheritance(DIType *Ty, DIType *BaseTy,
ConstantInt::get(IntegerType::get(VMContext, 32), VBPtrOffset));
return DIDerivedType::get(VMContext, dwarf::DW_TAG_inheritance, "", nullptr,
0, Ty, BaseTy, 0, 0, BaseOffset, std::nullopt,
- Flags, ExtraData);
+ std::nullopt, Flags, ExtraData);
}
DIDerivedType *DIBuilder::createMemberType(
@@ -368,7 +383,7 @@ DIDerivedType *DIBuilder::createMemberType(
return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
LineNumber, getNonCompileUnitScope(Scope), Ty,
SizeInBits, AlignInBits, OffsetInBits, std::nullopt,
- Flags, nullptr, Annotations);
+ std::nullopt, Flags, nullptr, Annotations);
}
static ConstantAsMetadata *getConstantOrNull(Constant *C) {
@@ -381,10 +396,10 @@ DIDerivedType *DIBuilder::createVariantMemberType(
DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
Constant *Discriminant, DINode::DIFlags Flags, DIType *Ty) {
- return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
- LineNumber, getNonCompileUnitScope(Scope), Ty,
- SizeInBits, AlignInBits, OffsetInBits, std::nullopt,
- Flags, getConstantOrNull(Discriminant));
+ return DIDerivedType::get(
+ VMContext, dwarf::DW_TAG_member, Name, File, LineNumber,
+ getNonCompileUnitScope(Scope), Ty, SizeInBits, AlignInBits, OffsetInBits,
+ std::nullopt, std::nullopt, Flags, getConstantOrNull(Discriminant));
}
DIDerivedType *DIBuilder::createBitFieldMemberType(
@@ -395,7 +410,7 @@ DIDerivedType *DIBuilder::createBitFieldMemberType(
return DIDerivedType::get(
VMContext, dwarf::DW_TAG_member, Name, File, LineNumber,
getNonCompileUnitScope(Scope), Ty, SizeInBits, /*AlignInBits=*/0,
- OffsetInBits, std::nullopt, Flags,
+ OffsetInBits, std::nullopt, std::nullopt, Flags,
ConstantAsMetadata::get(ConstantInt::get(IntegerType::get(VMContext, 64),
StorageOffsetInBits)),
Annotations);
@@ -409,7 +424,8 @@ DIBuilder::createStaticMemberType(DIScope *Scope, StringRef Name, DIFile *File,
Flags |= DINode::FlagStaticMember;
return DIDerivedType::get(VMContext, Tag, Name, File, LineNumber,
getNonCompileUnitScope(Scope), Ty, 0, AlignInBits,
- 0, std::nullopt, Flags, getConstantOrNull(Val));
+ 0, std::nullopt, std::nullopt, Flags,
+ getConstantOrNull(Val));
}
DIDerivedType *
@@ -420,7 +436,7 @@ DIBuilder::createObjCIVar(StringRef Name, DIFile *File, unsigned LineNumber,
return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
LineNumber, getNonCompileUnitScope(File), Ty,
SizeInBits, AlignInBits, OffsetInBits, std::nullopt,
- Flags, PropertyNode);
+ std::nullopt, Flags, PropertyNode);
}
DIObjCProperty *
@@ -555,10 +571,10 @@ DIDerivedType *DIBuilder::createSetType(DIScope *Scope, StringRef Name,
DIFile *File, unsigned LineNo,
uint64_t SizeInBits,
uint32_t AlignInBits, DIType *Ty) {
- auto *R =
- DIDerivedType::get(VMContext, dwarf::DW_TAG_set_type, Name, File, LineNo,
- getNonCompileUnitScope(Scope), Ty, SizeInBits,
- AlignInBits, 0, std::nullopt, DINode::FlagZero);
+ auto *R = DIDerivedType::get(VMContext, dwarf::DW_TAG_set_type, Name, File,
+ LineNo, getNonCompileUnitScope(Scope), Ty,
+ SizeInBits, AlignInBits, 0, std::nullopt,
+ std::nullopt, DINode::FlagZero);
trackIfUnresolved(R);
return R;
}
diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp
index e044ab3230c5..510cc31da6ca 100644
--- a/llvm/lib/IR/DebugInfo.cpp
+++ b/llvm/lib/IR/DebugInfo.cpp
@@ -99,8 +99,8 @@ static void findDbgIntrinsics(SmallVectorImpl<IntrinsicT *> &Result, Value *V,
SmallPtrSet<DPValue *, 4> EncounteredDPValues;
/// Append IntrinsicT users of MetadataAsValue(MD).
- auto AppendUsers = [&Ctx, &EncounteredIntrinsics, &Result,
- DPValues](Metadata *MD) {
+ auto AppendUsers = [&Ctx, &EncounteredIntrinsics, &EncounteredDPValues,
+ &Result, DPValues](Metadata *MD) {
if (auto *MDV = MetadataAsValue::getIfExists(Ctx, MD)) {
for (User *U : MDV->users())
if (IntrinsicT *DVI = dyn_cast<IntrinsicT>(U))
@@ -113,7 +113,8 @@ static void findDbgIntrinsics(SmallVectorImpl<IntrinsicT *> &Result, Value *V,
if (LocalAsMetadata *L = dyn_cast<LocalAsMetadata>(MD)) {
for (DPValue *DPV : L->getAllDPValueUsers()) {
if (Type == DPValue::LocationType::Any || DPV->getType() == Type)
- DPValues->push_back(DPV);
+ if (EncounteredDPValues.insert(DPV).second)
+ DPValues->push_back(DPV);
}
}
};
@@ -1335,9 +1336,9 @@ LLVMMetadataRef LLVMDIBuilderCreatePointerType(
LLVMDIBuilderRef Builder, LLVMMetadataRef PointeeTy,
uint64_t SizeInBits, uint32_t AlignInBits, unsigned AddressSpace,
const char *Name, size_t NameLen) {
- return wrap(unwrap(Builder)->createPointerType(unwrapDI<DIType>(PointeeTy),
- SizeInBits, AlignInBits,
- AddressSpace, {Name, NameLen}));
+ return wrap(unwrap(Builder)->createPointerType(
+ unwrapDI<DIType>(PointeeTy), SizeInBits, AlignInBits, AddressSpace,
+ {Name, NameLen}));
}
LLVMMetadataRef LLVMDIBuilderCreateStructType(
diff --git a/llvm/lib/IR/DebugInfoMetadata.cpp b/llvm/lib/IR/DebugInfoMetadata.cpp
index 28f96653d815..36c13e79a649 100644
--- a/llvm/lib/IR/DebugInfoMetadata.cpp
+++ b/llvm/lib/IR/DebugInfoMetadata.cpp
@@ -34,6 +34,10 @@ cl::opt<bool> EnableFSDiscriminator(
cl::desc("Enable adding flow sensitive discriminators"));
} // namespace llvm
+uint32_t DIType::getAlignInBits() const {
+ return (getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ? 0 : SubclassData32);
+}
+
const DIExpression::FragmentInfo DebugVariable::DefaultFragment = {
std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::min()};
@@ -731,26 +735,32 @@ Constant *DIDerivedType::getDiscriminantValue() const {
return nullptr;
}
-DIDerivedType *
-DIDerivedType::getImpl(LLVMContext &Context, unsigned Tag, MDString *Name,
- Metadata *File, unsigned Line, Metadata *Scope,
- Metadata *BaseType, uint64_t SizeInBits,
- uint32_t AlignInBits, uint64_t OffsetInBits,
- std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
- Metadata *ExtraData, Metadata *Annotations,
- StorageType Storage, bool ShouldCreate) {
+DIDerivedType *DIDerivedType::getImpl(
+ LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits,
+ std::optional<unsigned> DWARFAddressSpace,
+ std::optional<PtrAuthData> PtrAuthData, DIFlags Flags, Metadata *ExtraData,
+ Metadata *Annotations, StorageType Storage, bool ShouldCreate) {
assert(isCanonical(Name) && "Expected canonical MDString");
DEFINE_GETIMPL_LOOKUP(DIDerivedType,
(Tag, Name, File, Line, Scope, BaseType, SizeInBits,
- AlignInBits, OffsetInBits, DWARFAddressSpace, Flags,
- ExtraData, Annotations));
+ AlignInBits, OffsetInBits, DWARFAddressSpace,
+ PtrAuthData, Flags, ExtraData, Annotations));
Metadata *Ops[] = {File, Scope, Name, BaseType, ExtraData, Annotations};
DEFINE_GETIMPL_STORE(DIDerivedType,
(Tag, Line, SizeInBits, AlignInBits, OffsetInBits,
- DWARFAddressSpace, Flags),
+ DWARFAddressSpace, PtrAuthData, Flags),
Ops);
}
+std::optional<DIDerivedType::PtrAuthData>
+DIDerivedType::getPtrAuthData() const {
+ return getTag() == dwarf::DW_TAG_LLVM_ptrauth_type
+ ? std::optional<PtrAuthData>(PtrAuthData(SubclassData32))
+ : std::nullopt;
+}
+
DICompositeType *DICompositeType::getImpl(
LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
unsigned Line, Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index c54f8d7aca4a..ce221758ef79 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -46,11 +46,11 @@ Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
BasicBlock *InsertAtEnd)
- : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
+ : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
- // append this instruction into the basic block
- assert(InsertAtEnd && "Basic block to append to may not be NULL!");
- insertInto(InsertAtEnd, InsertAtEnd->end());
+ // If requested, append this instruction into the basic block.
+ if (InsertAtEnd)
+ insertInto(InsertAtEnd, InsertAtEnd->end());
}
Instruction::~Instruction() {
@@ -73,7 +73,6 @@ Instruction::~Instruction() {
setMetadata(LLVMContext::MD_DIAssignID, nullptr);
}
-
void Instruction::setParent(BasicBlock *P) {
Parent = P;
}
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 25778570ebf3..42cdcad78228 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -304,6 +304,20 @@ void LandingPadInst::addClause(Constant *Val) {
//===----------------------------------------------------------------------===//
CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
+ BasicBlock::iterator InsertPt) {
+ switch (CB->getOpcode()) {
+ case Instruction::Call:
+ return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
+ case Instruction::Invoke:
+ return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
+ case Instruction::CallBr:
+ return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
+ default:
+ llvm_unreachable("Unknown CallBase sub-class!");
+ }
+}
+
+CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
Instruction *InsertPt) {
switch (CB->getOpcode()) {
case Instruction::Call:
@@ -559,6 +573,18 @@ CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
OperandBundleDef OB,
+ BasicBlock::iterator InsertPt) {
+ if (CB->getOperandBundle(ID))
+ return CB;
+
+ SmallVector<OperandBundleDef, 1> Bundles;
+ CB->getOperandBundlesAsDefs(Bundles);
+ Bundles.push_back(OB);
+ return Create(CB, Bundles, InsertPt);
+}
+
+CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
+ OperandBundleDef OB,
Instruction *InsertPt) {
if (CB->getOperandBundle(ID))
return CB;
@@ -570,6 +596,23 @@ CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
}
CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
+ BasicBlock::iterator InsertPt) {
+ SmallVector<OperandBundleDef, 1> Bundles;
+ bool CreateNew = false;
+
+ for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
+ auto Bundle = CB->getOperandBundleAt(I);
+ if (Bundle.getTagID() == ID) {
+ CreateNew = true;
+ continue;
+ }
+ Bundles.emplace_back(Bundle);
+ }
+
+ return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
+}
+
+CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
Instruction *InsertPt) {
SmallVector<OperandBundleDef, 1> Bundles;
bool CreateNew = false;
@@ -717,6 +760,13 @@ void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
}
CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
+ BasicBlock::iterator InsertBefore)
+ : CallBase(Ty->getReturnType(), Instruction::Call,
+ OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
+ init(Ty, Func, Name);
+}
+
+CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
Instruction *InsertBefore)
: CallBase(Ty->getReturnType(), Instruction::Call,
OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
@@ -881,6 +931,20 @@ InvokeInst::InvokeInst(const InvokeInst &II)
}
InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
+ BasicBlock::iterator InsertPt) {
+ std::vector<Value *> Args(II->arg_begin(), II->arg_end());
+
+ auto *NewII = InvokeInst::Create(
+ II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
+ II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
+ NewII->setCallingConv(II->getCallingConv());
+ NewII->SubclassOptionalData = II->SubclassOptionalData;
+ NewII->setAttributes(II->getAttributes());
+ NewII->setDebugLoc(II->getDebugLoc());
+ return NewII;
+}
+
+InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
Instruction *InsertPt) {
std::vector<Value *> Args(II->arg_begin(), II->arg_end());
@@ -954,6 +1018,21 @@ CallBrInst::CallBrInst(const CallBrInst &CBI)
}
CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
+ BasicBlock::iterator InsertPt) {
+ std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
+
+ auto *NewCBI = CallBrInst::Create(
+ CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
+ CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
+ NewCBI->setCallingConv(CBI->getCallingConv());
+ NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
+ NewCBI->setAttributes(CBI->getAttributes());
+ NewCBI->setDebugLoc(CBI->getDebugLoc());
+ NewCBI->NumIndirectDests = CBI->NumIndirectDests;
+ return NewCBI;
+}
+
+CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
Instruction *InsertPt) {
std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
@@ -1138,6 +1217,18 @@ CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
unsigned NumReservedValues,
const Twine &NameStr,
+ BasicBlock::iterator InsertBefore)
+ : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
+ InsertBefore) {
+ if (UnwindDest)
+ ++NumReservedValues;
+ init(ParentPad, UnwindDest, NumReservedValues + 1);
+ setName(NameStr);
+}
+
+CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumReservedValues,
+ const Twine &NameStr,
Instruction *InsertBefore)
: Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
InsertBefore) {
@@ -3224,6 +3315,14 @@ void BinaryOperator::AssertOK() {
BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
const Twine &Name,
+ BasicBlock::iterator InsertBefore) {
+ assert(S1->getType() == S2->getType() &&
+ "Cannot create binary operator with two operands of differing type!");
+ return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
+ const Twine &Name,
Instruction *InsertBefore) {
assert(S1->getType() == S2->getType() &&
"Cannot create binary operator with two operands of differing type!");
@@ -3246,14 +3345,6 @@ BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
}
BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
- Instruction *InsertBefore) {
- Value *Zero = ConstantInt::get(Op->getType(), 0);
- return new BinaryOperator(Instruction::Sub,
- Zero, Op,
- Op->getType(), Name, InsertBefore);
-}
-
-BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd) {
Value *Zero = ConstantInt::get(Op->getType(), 0);
return new BinaryOperator(Instruction::Sub,
@@ -3286,6 +3377,13 @@ BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
}
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
+ BasicBlock::iterator InsertBefore) {
+ Constant *C = Constant::getAllOnesValue(Op->getType());
+ return new BinaryOperator(Instruction::Xor, Op, C,
+ Op->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
Instruction *InsertBefore) {
Constant *C = Constant::getAllOnesValue(Op->getType());
return new BinaryOperator(Instruction::Xor, Op, C,
@@ -3831,6 +3929,17 @@ CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
const Twine &Name,
+ BasicBlock::iterator InsertBefore) {
+ if (S->getType()->isPointerTy() && Ty->isIntegerTy())
+ return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
+ if (S->getType()->isIntegerTy() && Ty->isPointerTy())
+ return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
+
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
+ const Twine &Name,
Instruction *InsertBefore) {
if (S->getType()->isPointerTy() && Ty->isIntegerTy())
return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
@@ -4465,6 +4574,18 @@ CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
CmpInst *
CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
+ const Twine &Name, BasicBlock::iterator InsertBefore) {
+ if (Op == Instruction::ICmp) {
+ return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+ }
+
+ return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+}
+
+CmpInst *
+CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
const Twine &Name, Instruction *InsertBefore) {
if (Op == Instruction::ICmp) {
if (InsertBefore)
diff --git a/llvm/lib/IR/LLVMContextImpl.h b/llvm/lib/IR/LLVMContextImpl.h
index 2ee1080a1ffa..05e2b56587b2 100644
--- a/llvm/lib/IR/LLVMContextImpl.h
+++ b/llvm/lib/IR/LLVMContextImpl.h
@@ -539,6 +539,7 @@ template <> struct MDNodeKeyImpl<DIDerivedType> {
uint64_t OffsetInBits;
uint32_t AlignInBits;
std::optional<unsigned> DWARFAddressSpace;
+ std::optional<DIDerivedType::PtrAuthData> PtrAuthData;
unsigned Flags;
Metadata *ExtraData;
Metadata *Annotations;
@@ -546,18 +547,21 @@ template <> struct MDNodeKeyImpl<DIDerivedType> {
MDNodeKeyImpl(unsigned Tag, MDString *Name, Metadata *File, unsigned Line,
Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
uint32_t AlignInBits, uint64_t OffsetInBits,
- std::optional<unsigned> DWARFAddressSpace, unsigned Flags,
- Metadata *ExtraData, Metadata *Annotations)
+ std::optional<unsigned> DWARFAddressSpace,
+ std::optional<DIDerivedType::PtrAuthData> PtrAuthData,
+ unsigned Flags, Metadata *ExtraData, Metadata *Annotations)
: Tag(Tag), Name(Name), File(File), Line(Line), Scope(Scope),
BaseType(BaseType), SizeInBits(SizeInBits), OffsetInBits(OffsetInBits),
AlignInBits(AlignInBits), DWARFAddressSpace(DWARFAddressSpace),
- Flags(Flags), ExtraData(ExtraData), Annotations(Annotations) {}
+ PtrAuthData(PtrAuthData), Flags(Flags), ExtraData(ExtraData),
+ Annotations(Annotations) {}
MDNodeKeyImpl(const DIDerivedType *N)
: Tag(N->getTag()), Name(N->getRawName()), File(N->getRawFile()),
Line(N->getLine()), Scope(N->getRawScope()),
BaseType(N->getRawBaseType()), SizeInBits(N->getSizeInBits()),
OffsetInBits(N->getOffsetInBits()), AlignInBits(N->getAlignInBits()),
- DWARFAddressSpace(N->getDWARFAddressSpace()), Flags(N->getFlags()),
+ DWARFAddressSpace(N->getDWARFAddressSpace()),
+ PtrAuthData(N->getPtrAuthData()), Flags(N->getFlags()),
ExtraData(N->getRawExtraData()), Annotations(N->getRawAnnotations()) {}
bool isKeyOf(const DIDerivedType *RHS) const {
@@ -568,7 +572,8 @@ template <> struct MDNodeKeyImpl<DIDerivedType> {
AlignInBits == RHS->getAlignInBits() &&
OffsetInBits == RHS->getOffsetInBits() &&
DWARFAddressSpace == RHS->getDWARFAddressSpace() &&
- Flags == RHS->getFlags() && ExtraData == RHS->getRawExtraData() &&
+ PtrAuthData == RHS->getPtrAuthData() && Flags == RHS->getFlags() &&
+ ExtraData == RHS->getRawExtraData() &&
Annotations == RHS->getRawAnnotations();
}
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 3741e5deaa4c..1b02d11ff4e7 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -1223,6 +1223,7 @@ void Verifier::visitDIDerivedType(const DIDerivedType &N) {
N.getTag() == dwarf::DW_TAG_volatile_type ||
N.getTag() == dwarf::DW_TAG_restrict_type ||
N.getTag() == dwarf::DW_TAG_atomic_type ||
+ N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
N.getTag() == dwarf::DW_TAG_member ||
(N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
N.getTag() == dwarf::DW_TAG_inheritance ||
@@ -5002,7 +5003,9 @@ void Verifier::visitInstruction(Instruction &I) {
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
Check(GV->getParent() == &M, "Referencing global in another module!", &I,
&M, GV, GV->getParent());
- } else if (isa<Instruction>(I.getOperand(i))) {
+ } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
+ Check(OpInst->getFunction() == BB->getParent(),
+ "Referring to an instruction in another function!", &I);
verifyDominatesUse(I, i);
} else if (isa<InlineAsm>(I.getOperand(i))) {
Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp
index 485fd1885ddb..28b2cbb0e8b0 100644
--- a/llvm/lib/MC/MCExpr.cpp
+++ b/llvm/lib/MC/MCExpr.cpp
@@ -338,6 +338,10 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
return "ie";
case VK_PPC_AIX_TLSLE:
return "le";
+ case VK_PPC_AIX_TLSLD:
+ return "ld";
+ case VK_PPC_AIX_TLSML:
+ return "ml";
case VK_PPC_GOT_TLSLD: return "got@tlsld";
case VK_PPC_GOT_TLSLD_LO: return "got@tlsld@l";
case VK_PPC_GOT_TLSLD_HI: return "got@tlsld@h";
diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp
index a1c32eee3286..76a3e501f459 100644
--- a/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -44,6 +44,7 @@
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolMachO.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Casting.h"
@@ -1950,7 +1951,8 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
Lex();
}
- if (MAI.hasSubsectionsViaSymbols() && CFIStartProcLoc && Sym->isExternal())
+ if (MAI.hasSubsectionsViaSymbols() && CFIStartProcLoc &&
+ Sym->isExternal() && !cast<MCSymbolMachO>(Sym)->isAltEntry())
return Error(StartTokLoc, "non-private labels cannot appear between "
".cfi_startproc / .cfi_endproc pairs") &&
Error(*CFIStartProcLoc, "previous .cfi_startproc was here");
diff --git a/llvm/lib/MC/XCOFFObjectWriter.cpp b/llvm/lib/MC/XCOFFObjectWriter.cpp
index 8809af2e5e0c..d46bbaf75765 100644
--- a/llvm/lib/MC/XCOFFObjectWriter.cpp
+++ b/llvm/lib/MC/XCOFFObjectWriter.cpp
@@ -715,7 +715,8 @@ void XCOFFObjectWriter::recordRelocation(MCAssembler &Asm,
if (Type == XCOFF::RelocationType::R_POS ||
Type == XCOFF::RelocationType::R_TLS ||
Type == XCOFF::RelocationType::R_TLS_LE ||
- Type == XCOFF::RelocationType::R_TLS_IE)
+ Type == XCOFF::RelocationType::R_TLS_IE ||
+ Type == XCOFF::RelocationType::R_TLS_LD)
// The FixedValue should be symbol's virtual address in this object file
// plus any constant value that we might get.
FixedValue = getVirtualAddress(SymA, SymASec) + Target.getConstant();
diff --git a/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp b/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp
index 1b3a58298ec0..f52bcb74938d 100644
--- a/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp
+++ b/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp
@@ -300,6 +300,10 @@ static Error updateAndRemoveSymbols(const CommonConfig &Config,
Config.SymbolsToLocalize.matches(Sym.Name)))
Sym.Binding = STB_LOCAL;
+ for (auto &[Matcher, Visibility] : ELFConfig.SymbolsToSetVisibility)
+ if (Matcher.matches(Sym.Name))
+ Sym.Visibility = Visibility;
+
// Note: these two globalize flags have very similar names but different
// meanings:
//
diff --git a/llvm/lib/Object/DXContainer.cpp b/llvm/lib/Object/DXContainer.cpp
index 0401c20b98ec..935749afe338 100644
--- a/llvm/lib/Object/DXContainer.cpp
+++ b/llvm/lib/Object/DXContainer.cpp
@@ -72,13 +72,13 @@ Error DXContainer::parseDXILHeader(StringRef Part) {
return Error::success();
}
-Error DXContainer::parseShaderFlags(StringRef Part) {
- if (ShaderFlags)
+Error DXContainer::parseShaderFeatureFlags(StringRef Part) {
+ if (ShaderFeatureFlags)
return parseFailed("More than one SFI0 part is present in the file");
uint64_t FlagValue = 0;
if (Error Err = readInteger(Part, Part.begin(), FlagValue))
return Err;
- ShaderFlags = FlagValue;
+ ShaderFeatureFlags = FlagValue;
return Error::success();
}
@@ -168,7 +168,7 @@ Error DXContainer::parsePartOffsets() {
return Err;
break;
case dxbc::PartType::SFI0:
- if (Error Err = parseShaderFlags(PartData))
+ if (Error Err = parseShaderFeatureFlags(PartData))
return Err;
break;
case dxbc::PartType::HASH:
diff --git a/llvm/lib/ObjectYAML/DXContainerYAML.cpp b/llvm/lib/ObjectYAML/DXContainerYAML.cpp
index 1f03f2c7d399..7dc9822bdd22 100644
--- a/llvm/lib/ObjectYAML/DXContainerYAML.cpp
+++ b/llvm/lib/ObjectYAML/DXContainerYAML.cpp
@@ -23,15 +23,15 @@ namespace llvm {
static_assert((uint64_t)dxbc::FeatureFlags::NextUnusedBit <= 1ull << 63,
"Shader flag bits exceed enum size.");
-DXContainerYAML::ShaderFlags::ShaderFlags(uint64_t FlagData) {
-#define SHADER_FLAG(Num, Val, Str) \
+DXContainerYAML::ShaderFeatureFlags::ShaderFeatureFlags(uint64_t FlagData) {
+#define SHADER_FEATURE_FLAG(Num, Val, Str) \
Val = (FlagData & (uint64_t)dxbc::FeatureFlags::Val) > 0;
#include "llvm/BinaryFormat/DXContainerConstants.def"
}
-uint64_t DXContainerYAML::ShaderFlags::getEncodedFlags() {
+uint64_t DXContainerYAML::ShaderFeatureFlags::getEncodedFlags() {
uint64_t Flag = 0;
-#define SHADER_FLAG(Num, Val, Str) \
+#define SHADER_FEATURE_FLAG(Num, Val, Str) \
if (Val) \
Flag |= (uint64_t)dxbc::FeatureFlags::Val;
#include "llvm/BinaryFormat/DXContainerConstants.def"
@@ -103,9 +103,9 @@ void MappingTraits<DXContainerYAML::DXILProgram>::mapping(
IO.mapOptional("DXIL", Program.DXIL);
}
-void MappingTraits<DXContainerYAML::ShaderFlags>::mapping(
- IO &IO, DXContainerYAML::ShaderFlags &Flags) {
-#define SHADER_FLAG(Num, Val, Str) IO.mapRequired(#Val, Flags.Val);
+void MappingTraits<DXContainerYAML::ShaderFeatureFlags>::mapping(
+ IO &IO, DXContainerYAML::ShaderFeatureFlags &Flags) {
+#define SHADER_FEATURE_FLAG(Num, Val, Str) IO.mapRequired(#Val, Flags.Val);
#include "llvm/BinaryFormat/DXContainerConstants.def"
}
diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index 142bd50b3798..cbbbec0ccc8c 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -209,6 +209,15 @@ static cl::opt<bool> EnableLoopFlatten("enable-loop-flatten", cl::init(false),
cl::Hidden,
cl::desc("Enable the LoopFlatten Pass"));
+// Experimentally allow loop header duplication. This should allow for better
+// optimization at Oz, since loop-idiom recognition can then recognize things
+// like memcpy. If this ends up being useful for many targets, we should drop
+// this flag and make a code generation option that can be controlled
+// independent of the opt level and exposed through the frontend.
+static cl::opt<bool> EnableLoopHeaderDuplication(
+ "enable-loop-header-duplication", cl::init(false), cl::Hidden,
+ cl::desc("Enable loop header duplication at any optimization level"));
+
static cl::opt<bool>
EnableDFAJumpThreading("enable-dfa-jump-thread",
cl::desc("Enable DFA jump threading"),
@@ -630,8 +639,9 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
/*AllowSpeculation=*/false));
// Disable header duplication in loop rotation at -Oz.
- LPM1.addPass(
- LoopRotatePass(Level != OptimizationLevel::Oz, isLTOPreLink(Phase)));
+ LPM1.addPass(LoopRotatePass(EnableLoopHeaderDuplication ||
+ Level != OptimizationLevel::Oz,
+ isLTOPreLink(Phase)));
// TODO: Investigate promotion cap for O1.
LPM1.addPass(LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/true));
@@ -707,7 +717,7 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
// Re-consider control flow based optimizations after redundancy elimination,
// redo DCE, etc.
- if (EnableDFAJumpThreading && Level.getSizeLevel() == 0)
+ if (EnableDFAJumpThreading)
FPM.addPass(DFAJumpThreadingPass());
FPM.addPass(JumpThreadingPass());
@@ -812,7 +822,8 @@ void PassBuilder::addPGOInstrPasses(ModulePassManager &MPM,
// Disable header duplication in loop rotation at -Oz.
MPM.addPass(createModuleToFunctionPassAdaptor(
createFunctionToLoopPassAdaptor(
- LoopRotatePass(Level != OptimizationLevel::Oz),
+ LoopRotatePass(EnableLoopHeaderDuplication ||
+ Level != OptimizationLevel::Oz),
/*UseMemorySSA=*/false,
/*UseBlockFrequencyInfo=*/false),
PTO.EagerlyInvalidateAnalyses));
@@ -1422,7 +1433,9 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level,
LoopPassManager LPM;
// First rotate loops that may have been un-rotated by prior passes.
// Disable header duplication at -Oz.
- LPM.addPass(LoopRotatePass(Level != OptimizationLevel::Oz, LTOPreLink));
+ LPM.addPass(LoopRotatePass(EnableLoopHeaderDuplication ||
+ Level != OptimizationLevel::Oz,
+ LTOPreLink));
// Some loops may have become dead by now. Try to delete them.
// FIXME: see discussion in https://reviews.llvm.org/D112851,
// this may need to be revisited once we run GVN before loop deletion
diff --git a/llvm/lib/Support/ErrorHandling.cpp b/llvm/lib/Support/ErrorHandling.cpp
index b8b3b7424ac6..d2d3dcc2f478 100644
--- a/llvm/lib/Support/ErrorHandling.cpp
+++ b/llvm/lib/Support/ErrorHandling.cpp
@@ -130,7 +130,8 @@ void llvm::install_bad_alloc_error_handler(fatal_error_handler_t handler,
#if LLVM_ENABLE_THREADS == 1
std::lock_guard<std::mutex> Lock(BadAllocErrorHandlerMutex);
#endif
- assert(!ErrorHandler && "Bad alloc error handler already registered!\n");
+ assert(!BadAllocErrorHandler &&
+ "Bad alloc error handler already registered!\n");
BadAllocErrorHandler = handler;
BadAllocErrorHandlerUserData = user_data;
}
diff --git a/llvm/lib/Support/KnownBits.cpp b/llvm/lib/Support/KnownBits.cpp
index 770e4051ca3f..c44a08cc1c2e 100644
--- a/llvm/lib/Support/KnownBits.cpp
+++ b/llvm/lib/Support/KnownBits.cpp
@@ -176,6 +176,22 @@ KnownBits KnownBits::smin(const KnownBits &LHS, const KnownBits &RHS) {
return Flip(umax(Flip(LHS), Flip(RHS)));
}
+KnownBits KnownBits::absdiff(const KnownBits &LHS, const KnownBits &RHS) {
+ // absdiff(LHS,RHS) = sub(umax(LHS,RHS), umin(LHS,RHS)).
+ KnownBits UMaxValue = umax(LHS, RHS);
+ KnownBits UMinValue = umin(LHS, RHS);
+ KnownBits MinMaxDiff = computeForAddSub(false, false, UMaxValue, UMinValue);
+
+ // find the common bits between sub(LHS,RHS) and sub(RHS,LHS).
+ KnownBits Diff0 = computeForAddSub(false, false, LHS, RHS);
+ KnownBits Diff1 = computeForAddSub(false, false, RHS, LHS);
+ KnownBits SubDiff = Diff0.intersectWith(Diff1);
+
+ KnownBits KnownAbsDiff = MinMaxDiff.unionWith(SubDiff);
+ assert(!KnownAbsDiff.hasConflict() && "Bad Output");
+ return KnownAbsDiff;
+}
+
static unsigned getMaxShiftAmount(const APInt &MaxValue, unsigned BitWidth) {
if (isPowerOf2_32(BitWidth))
return MaxValue.extractBitsAsZExtValue(Log2_32(BitWidth), 0);
diff --git a/llvm/lib/Support/Path.cpp b/llvm/lib/Support/Path.cpp
index c8de2c0625aa..acee228a0d04 100644
--- a/llvm/lib/Support/Path.cpp
+++ b/llvm/lib/Support/Path.cpp
@@ -850,7 +850,7 @@ createTemporaryFile(const Twine &Model, int &ResultFD,
"Model must be a simple filename.");
// Use P.begin() so that createUniqueEntity doesn't need to recreate Storage.
return createUniqueEntity(P.begin(), ResultFD, ResultPath, true, Type, Flags,
- owner_read | owner_write);
+ all_read | all_write);
}
static std::error_code
diff --git a/llvm/lib/Support/RISCVISAInfo.cpp b/llvm/lib/Support/RISCVISAInfo.cpp
index d028302b8c4d..68f5c36e8faf 100644
--- a/llvm/lib/Support/RISCVISAInfo.cpp
+++ b/llvm/lib/Support/RISCVISAInfo.cpp
@@ -109,6 +109,7 @@ static const RISCVSupportedExtension SupportedExtensions[] = {
{"za128rs", {1, 0}},
{"za64rs", {1, 0}},
+ {"zacas", {1, 0}},
{"zawrs", {1, 0}},
{"zba", {1, 0}},
@@ -220,7 +221,6 @@ static const RISCVSupportedExtension SupportedExperimentalExtensions[] = {
{"zaamo", {0, 2}},
{"zabha", {1, 0}},
- {"zacas", {1, 0}},
{"zalasr", {0, 1}},
{"zalrsc", {0, 2}},
diff --git a/llvm/lib/Target/AArch64/AArch64.td b/llvm/lib/Target/AArch64/AArch64.td
index 169b00e5ebc9..b837066554f3 100644
--- a/llvm/lib/Target/AArch64/AArch64.td
+++ b/llvm/lib/Target/AArch64/AArch64.td
@@ -1514,7 +1514,7 @@ def ProcessorFeatures {
FeatureFPARMv8, FeatureFullFP16, FeatureNEON,
FeatureRCPC, FeatureSPE, FeatureSSBS,
FeaturePerfMon];
- list<SubtargetFeature> NeoverseN2 = [HasV9_0aOps, FeatureBF16, FeatureETE,
+ list<SubtargetFeature> NeoverseN2 = [HasV9_0aOps, FeatureBF16, FeatureETE, FeatureFP16FML,
FeatureMatMulInt8, FeatureMTE, FeatureSVE2,
FeatureSVE2BitPerm, FeatureTRBE,
FeaturePerfMon];
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 635beeed0df8..49bcab588e52 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -5179,7 +5179,8 @@ FastISel *AArch64::createFastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo) {
SMEAttrs CallerAttrs(*FuncInfo.Fn);
- if (CallerAttrs.hasZAState() || CallerAttrs.hasStreamingInterfaceOrBody() ||
+ if (CallerAttrs.hasZAState() || CallerAttrs.hasZT0State() ||
+ CallerAttrs.hasStreamingInterfaceOrBody() ||
CallerAttrs.hasStreamingCompatibleInterface())
return nullptr;
return new AArch64FastISel(FuncInfo, LibInfo);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 3b92e95d7c28..7f80e877cb24 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -271,11 +271,9 @@ static bool isMergePassthruOpcode(unsigned Opc) {
static bool isZeroingInactiveLanes(SDValue Op) {
switch (Op.getOpcode()) {
default:
- // We guarantee i1 splat_vectors to zero the other lanes by
- // implementing it with ptrue and possibly a punpklo for nxv1i1.
- if (ISD::isConstantSplatVectorAllOnes(Op.getNode()))
- return true;
return false;
+ // We guarantee i1 splat_vectors to zero the other lanes
+ case ISD::SPLAT_VECTOR:
case AArch64ISD::PTRUE:
case AArch64ISD::SETCC_MERGE_ZERO:
return true;
@@ -2549,7 +2547,6 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
MAKE_CASE(AArch64ISD::FSUB_PRED)
MAKE_CASE(AArch64ISD::RDSVL)
MAKE_CASE(AArch64ISD::BIC)
- MAKE_CASE(AArch64ISD::BIT)
MAKE_CASE(AArch64ISD::CBZ)
MAKE_CASE(AArch64ISD::CBNZ)
MAKE_CASE(AArch64ISD::TBZ)
@@ -7520,6 +7517,22 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
(AArch64::GPR32RegClass.contains(MO.getReg()) ||
AArch64::GPR64RegClass.contains(MO.getReg())))
MI.removeOperand(I);
+
+ // Add an implicit use of 'VG' for ADDXri/SUBXri, which are instructions that
+ // have nothing to do with VG, were it not that they are used to materialise a
+ // frame-address. If they contain a frame-index to a scalable vector, this
+ // will likely require an ADDVL instruction to materialise the address, thus
+ // reading VG.
+ const MachineFunction &MF = *MI.getMF();
+ if (MF.getInfo<AArch64FunctionInfo>()->hasStreamingModeChanges() &&
+ (MI.getOpcode() == AArch64::ADDXri ||
+ MI.getOpcode() == AArch64::SUBXri)) {
+ const MachineOperand &MO = MI.getOperand(1);
+ if (MO.isFI() && MF.getFrameInfo().getStackID(MO.getIndex()) ==
+ TargetStackID::ScalableVector)
+ MI.addOperand(MachineOperand::CreateReg(AArch64::VG, /*IsDef=*/false,
+ /*IsImplicit=*/true));
+ }
}
SDValue AArch64TargetLowering::changeStreamingMode(
@@ -25892,7 +25905,8 @@ bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
auto CallerAttrs = SMEAttrs(*Inst.getFunction());
auto CalleeAttrs = SMEAttrs(*Base);
if (CallerAttrs.requiresSMChange(CalleeAttrs) ||
- CallerAttrs.requiresLazySave(CalleeAttrs))
+ CallerAttrs.requiresLazySave(CalleeAttrs) ||
+ CallerAttrs.requiresPreservingZT0(CalleeAttrs))
return true;
}
return false;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index bec13484450d..c1fe76c07cba 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -285,9 +285,6 @@ enum NodeType : unsigned {
EORV_PRED,
ANDV_PRED,
- // Vector bitwise insertion
- BIT,
-
// Compare-and-branch
CBZ,
CBNZ,
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 10ad5b1f8f25..7f8856db6c6e 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -2809,6 +2809,7 @@ class AddSubImmShift<bit isSub, bit setFlags, RegisterClass dstRegtype,
let Inst{23-22} = imm{13-12}; // '00' => lsl #0, '01' => lsl #12
let Inst{21-10} = imm{11-0};
let DecoderMethod = "DecodeAddSubImmShift";
+ let hasPostISelHook = 1;
}
class BaseAddSubRegPseudo<RegisterClass regtype,
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 39c96092f103..17e0e36ee682 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -9481,58 +9481,6 @@ unsigned llvm::getBLRCallOpcode(const MachineFunction &MF) {
return AArch64::BLR;
}
-bool AArch64InstrInfo::isReallyTriviallyReMaterializable(
- const MachineInstr &MI) const {
- const MachineFunction &MF = *MI.getMF();
- const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>();
-
- // If the function contains changes to streaming mode, then there
- // is a danger that rematerialised instructions end up between
- // instruction sequences (e.g. call sequences, or prolog/epilogue)
- // where the streaming-SVE mode is temporarily changed.
- if (AFI.hasStreamingModeChanges()) {
- // Avoid rematerializing rematerializable instructions that use/define
- // scalable values, such as 'pfalse' or 'ptrue', which result in different
- // results when the runtime vector length is different.
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- const MachineFrameInfo &MFI = MF.getFrameInfo();
- if (any_of(MI.operands(), [&MRI, &MFI](const MachineOperand &MO) {
- if (MO.isFI() &&
- MFI.getStackID(MO.getIndex()) == TargetStackID::ScalableVector)
- return true;
- if (!MO.isReg())
- return false;
-
- if (MO.getReg().isVirtual()) {
- const TargetRegisterClass *RC = MRI.getRegClass(MO.getReg());
- return AArch64::ZPRRegClass.hasSubClassEq(RC) ||
- AArch64::PPRRegClass.hasSubClassEq(RC);
- }
- return AArch64::ZPRRegClass.contains(MO.getReg()) ||
- AArch64::PPRRegClass.contains(MO.getReg());
- }))
- return false;
-
- // Avoid rematerializing instructions that return a value that is
- // different depending on vector length, even when it is not returned
- // in a scalable vector/predicate register.
- switch (MI.getOpcode()) {
- default:
- break;
- case AArch64::RDVLI_XI:
- case AArch64::ADDVL_XXI:
- case AArch64::ADDPL_XXI:
- case AArch64::CNTB_XPiI:
- case AArch64::CNTH_XPiI:
- case AArch64::CNTW_XPiI:
- case AArch64::CNTD_XPiI:
- return false;
- }
- }
-
- return TargetInstrInfo::isReallyTriviallyReMaterializable(MI);
-}
-
MachineBasicBlock::iterator
AArch64InstrInfo::probedStackAlloc(MachineBasicBlock::iterator MBBI,
Register TargetReg, bool FrameSetup) const {
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 63e0cb80d858..6c6689091ead 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -381,8 +381,6 @@ public:
int64_t &ByteSized,
int64_t &VGSized);
- bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override;
-
// Return true if address of the form BaseReg + Scale * ScaledReg + Offset can
// be used for a load/store of NumBytes. BaseReg is always present and
// implicit.
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index b01a8cd00025..52137c1f4065 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -730,7 +730,6 @@ def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
def AArch64vsli : SDNode<"AArch64ISD::VSLI", SDT_AArch64vshiftinsert>;
def AArch64vsri : SDNode<"AArch64ISD::VSRI", SDT_AArch64vshiftinsert>;
-def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
def AArch64bsp: SDNode<"AArch64ISD::BSP", SDT_AArch64trivec>;
def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
@@ -5333,7 +5332,7 @@ defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
defm BSP : SIMDLogicalThreeVectorPseudo<TriOpFrag<(or (and node:$LHS, node:$MHS),
(and (vnot node:$LHS), node:$RHS))>>;
defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl">;
-defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
+defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit">;
defm BIF : SIMDLogicalThreeVectorTied<1, 0b11, "bif">;
def : Pat<(AArch64bsp (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
@@ -8216,8 +8215,10 @@ defm ST4 : SIMDLdSt4SingleAliases<"st4">;
//----------------------------------------------------------------------------
let Predicates = [HasAES] in {
+let isCommutable = 1 in {
def AESErr : AESTiedInst<0b0100, "aese", int_aarch64_crypto_aese>;
def AESDrr : AESTiedInst<0b0101, "aesd", int_aarch64_crypto_aesd>;
+}
def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>;
def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>;
}
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index b919c116445c..531f21f9c043 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -443,6 +443,9 @@ AArch64RegisterInfo::getStrictlyReservedRegs(const MachineFunction &MF) const {
Reserved.set(SubReg);
}
+ // VG cannot be allocated
+ Reserved.set(AArch64::VG);
+
if (MF.getSubtarget<AArch64Subtarget>().hasSME2()) {
for (MCSubRegIterator SubReg(AArch64::ZT0, this, /*self=*/true);
SubReg.isValid(); ++SubReg)
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index acf067f2cc5a..2907ba74ff81 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -233,6 +233,8 @@ def MSRpstatePseudo :
(ins svcr_op:$pstatefield, timm0_1:$imm, GPR64:$rtpstate, timm0_1:$expected_pstate, variable_ops), []>,
Sched<[WriteSys]> {
let hasPostISelHook = 1;
+ let Uses = [VG];
+ let Defs = [VG];
}
def : Pat<(AArch64_smstart (i32 svcr_op:$pstate), (i64 GPR64:$rtpstate), (i64 timm0_1:$expected_pstate)),
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
index 3dc3d31a34e8..26dbad713594 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
@@ -535,7 +535,8 @@ bool AArch64CallLowering::fallBackToDAGISel(const MachineFunction &MF) const {
}
SMEAttrs Attrs(F);
- if (Attrs.hasZAState() || Attrs.hasStreamingInterfaceOrBody() ||
+ if (Attrs.hasZAState() || Attrs.hasZT0State() ||
+ Attrs.hasStreamingInterfaceOrBody() ||
Attrs.hasStreamingCompatibleInterface())
return true;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index a2e805e8cb56..117c4004d41d 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -52,6 +52,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
const LLT v16s8 = LLT::fixed_vector(16, 8);
const LLT v8s8 = LLT::fixed_vector(8, 8);
const LLT v4s8 = LLT::fixed_vector(4, 8);
+ const LLT v2s8 = LLT::fixed_vector(2, 8);
const LLT v8s16 = LLT::fixed_vector(8, 16);
const LLT v4s16 = LLT::fixed_vector(4, 16);
const LLT v2s16 = LLT::fixed_vector(2, 16);
@@ -387,8 +388,14 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
.clampMaxNumElements(0, s32, 4)
.clampMaxNumElements(0, s64, 2)
.clampMaxNumElements(0, p0, 2)
+ // TODO: Use BITCAST for v2i8, v2i16 after G_TRUNC gets sorted out
+ .bitcastIf(typeInSet(0, {v4s8}),
+ [=](const LegalityQuery &Query) {
+ const LLT VecTy = Query.Types[0];
+ return std::pair(0, LLT::scalar(VecTy.getSizeInBits()));
+ })
.customIf(IsPtrVecPred)
- .scalarizeIf(typeIs(0, v2s16), 0);
+ .scalarizeIf(typeInSet(0, {v2s16, v2s8}), 0);
getActionDefinitionsBuilder(G_STORE)
.customIf([=](const LegalityQuery &Query) {
@@ -422,8 +429,14 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
.clampMaxNumElements(0, s64, 2)
.clampMaxNumElements(0, p0, 2)
.lowerIfMemSizeNotPow2()
+ // TODO: Use BITCAST for v2i8, v2i16 after G_TRUNC gets sorted out
+ .bitcastIf(typeInSet(0, {v4s8}),
+ [=](const LegalityQuery &Query) {
+ const LLT VecTy = Query.Types[0];
+ return std::pair(0, LLT::scalar(VecTy.getSizeInBits()));
+ })
.customIf(IsPtrVecPred)
- .scalarizeIf(typeIs(0, v2s16), 0);
+ .scalarizeIf(typeInSet(0, {v2s16, v2s8}), 0);
getActionDefinitionsBuilder(G_INDEXED_STORE)
// Idx 0 == Ptr, Idx 1 == Val
@@ -993,6 +1006,12 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
ABSActions
.legalFor({s32, s64});
ABSActions.legalFor(PackedVectorAllTypeList)
+ .widenScalarIf(
+ [=](const LegalityQuery &Query) { return Query.Types[0] == v4s8; },
+ [=](const LegalityQuery &Query) { return std::make_pair(0, v4s16); })
+ .widenScalarIf(
+ [=](const LegalityQuery &Query) { return Query.Types[0] == v2s16; },
+ [=](const LegalityQuery &Query) { return std::make_pair(0, v2s32); })
.clampNumElements(0, v8s8, v16s8)
.clampNumElements(0, v4s16, v8s16)
.clampNumElements(0, v2s32, v4s32)
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 44d9a8ac7cb6..33cb5f9734b8 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -223,6 +223,8 @@ def MSRpstatesvcrImm1
let Inst{8} = imm;
let Inst{7-5} = 0b011; // op2
let hasPostISelHook = 1;
+ let Uses = [VG];
+ let Defs = [VG];
}
def : InstAlias<"smstart", (MSRpstatesvcrImm1 0b011, 0b1)>;
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 789ec817d3d8..c8ca1832ec18 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -365,6 +365,7 @@ class sve_int_ptrue<bits<2> sz8_64, bits<3> opc, string asm, PPRRegOp pprty,
let ElementSize = pprty.ElementSize;
let hasSideEffects = 0;
let isReMaterializable = 1;
+ let Uses = [VG];
}
multiclass sve_int_ptrue<bits<3> opc, string asm, SDPatternOperator op> {
@@ -755,6 +756,7 @@ class sve_int_pfalse<bits<6> opc, string asm>
let hasSideEffects = 0;
let isReMaterializable = 1;
+ let Uses = [VG];
}
multiclass sve_int_pfalse<bits<6> opc, string asm> {
@@ -1090,6 +1092,7 @@ class sve_int_count<bits<3> opc, string asm>
let hasSideEffects = 0;
let isReMaterializable = 1;
+ let Uses = [VG];
}
multiclass sve_int_count<bits<3> opc, string asm, SDPatternOperator op> {
@@ -1982,6 +1985,7 @@ class sve_int_dup_mask_imm<string asm>
let DecoderMethod = "DecodeSVELogicalImmInstruction";
let hasSideEffects = 0;
let isReMaterializable = 1;
+ let Uses = [VG];
}
multiclass sve_int_dup_mask_imm<string asm> {
@@ -2862,6 +2866,7 @@ class sve_int_arith_vl<bit opc, string asm, bit streaming_sve = 0b0>
let Inst{4-0} = Rd;
let hasSideEffects = 0;
+ let Uses = [VG];
}
class sve_int_read_vl_a<bit op, bits<5> opc2, string asm, bit streaming_sve = 0b0>
@@ -2882,6 +2887,7 @@ class sve_int_read_vl_a<bit op, bits<5> opc2, string asm, bit streaming_sve = 0b
let hasSideEffects = 0;
let isReMaterializable = 1;
+ let Uses = [VG];
}
//===----------------------------------------------------------------------===//
@@ -4699,6 +4705,7 @@ class sve_int_dup_imm<bits<2> sz8_64, string asm,
let hasSideEffects = 0;
let isReMaterializable = 1;
+ let Uses = [VG];
}
multiclass sve_int_dup_imm<string asm> {
@@ -4741,6 +4748,7 @@ class sve_int_dup_fpimm<bits<2> sz8_64, Operand fpimmtype,
let hasSideEffects = 0;
let isReMaterializable = 1;
+ let Uses = [VG];
}
multiclass sve_int_dup_fpimm<string asm> {
@@ -5657,6 +5665,7 @@ class sve_int_index_ii<bits<2> sz8_64, string asm, ZPRRegOp zprty,
let hasSideEffects = 0;
let isReMaterializable = 1;
+ let Uses = [VG];
}
multiclass sve_int_index_ii<string asm> {
@@ -9308,6 +9317,7 @@ class sve2p1_ptrue_pn<string mnemonic, bits<2> sz, PNRP8to15RegOp pnrty, SDPatte
let hasSideEffects = 0;
let isReMaterializable = 1;
+ let Uses = [VG];
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallingConv.td b/llvm/lib/Target/AMDGPU/AMDGPUCallingConv.td
index c5207228dc91..4be64629ddac 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallingConv.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallingConv.td
@@ -66,6 +66,8 @@ def RetCC_SI_Gfx : CallingConv<[
def CC_SI_SHADER : CallingConv<[
+ CCIfType<[i1], CCPromoteToType<i32>>,
+
CCIfInReg<CCIfType<[f32, i32, f16, i16, v2i16, v2f16, bf16, v2bf16] , CCAssignToReg<[
SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 1c75c5a47c9d..0edbbf7cb0af 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -99,6 +99,7 @@ class AMDGPUCodeGenPrepareImpl
: public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
public:
const GCNSubtarget *ST = nullptr;
+ const AMDGPUTargetMachine *TM = nullptr;
const TargetLibraryInfo *TLInfo = nullptr;
AssumptionCache *AC = nullptr;
DominatorTree *DT = nullptr;
@@ -310,6 +311,7 @@ public:
bool visitICmpInst(ICmpInst &I);
bool visitSelectInst(SelectInst &I);
bool visitPHINode(PHINode &I);
+ bool visitAddrSpaceCastInst(AddrSpaceCastInst &I);
bool visitIntrinsicInst(IntrinsicInst &I);
bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
@@ -2013,6 +2015,75 @@ bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) {
return true;
}
+/// \param V Value to check
+/// \param DL DataLayout
+/// \param TM TargetMachine (TODO: remove once DL contains nullptr values)
+/// \param AS Target Address Space
+/// \return true if \p V cannot be the null value of \p AS, false otherwise.
+static bool isPtrKnownNeverNull(const Value *V, const DataLayout &DL,
+ const AMDGPUTargetMachine &TM, unsigned AS) {
+ // Pointer cannot be null if it's a block address, GV or alloca.
+ // NOTE: We don't support extern_weak, but if we did, we'd need to check for
+ // it as the symbol could be null in such cases.
+ if (isa<BlockAddress>(V) || isa<GlobalValue>(V) || isa<AllocaInst>(V))
+ return true;
+
+ // Check nonnull arguments.
+ if (const auto *Arg = dyn_cast<Argument>(V); Arg && Arg->hasNonNullAttr())
+ return true;
+
+ // TODO: Calls that return nonnull?
+
+ // For all other things, use KnownBits.
+ // We either use 0 or all bits set to indicate null, so check whether the
+ // value can be zero or all ones.
+ //
+ // TODO: Use ValueTracking's isKnownNeverNull if it becomes aware that some
+ // address spaces have non-zero null values.
+ auto SrcPtrKB = computeKnownBits(V, DL).trunc(DL.getPointerSizeInBits(AS));
+ const auto NullVal = TM.getNullPointerValue(AS);
+ assert((NullVal == 0 || NullVal == -1) &&
+ "don't know how to check for this null value!");
+ return NullVal ? !SrcPtrKB.getMaxValue().isAllOnes() : SrcPtrKB.isNonZero();
+}
+
+bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
+ // Intrinsic doesn't support vectors, also it seems that it's often difficult
+ // to prove that a vector cannot have any nulls in it so it's unclear if it's
+ // worth supporting.
+ if (I.getType()->isVectorTy())
+ return false;
+
+ // Check if this can be lowered to a amdgcn.addrspacecast.nonnull.
+ // This is only worthwhile for casts from/to priv/local to flat.
+ const unsigned SrcAS = I.getSrcAddressSpace();
+ const unsigned DstAS = I.getDestAddressSpace();
+
+ bool CanLower = false;
+ if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
+ CanLower = (DstAS == AMDGPUAS::LOCAL_ADDRESS ||
+ DstAS == AMDGPUAS::PRIVATE_ADDRESS);
+ else if (DstAS == AMDGPUAS::FLAT_ADDRESS)
+ CanLower = (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
+ SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
+ if (!CanLower)
+ return false;
+
+ SmallVector<const Value *, 4> WorkList;
+ getUnderlyingObjects(I.getOperand(0), WorkList);
+ if (!all_of(WorkList, [&](const Value *V) {
+ return isPtrKnownNeverNull(V, *DL, *TM, SrcAS);
+ }))
+ return false;
+
+ IRBuilder<> B(&I);
+ auto *Intrin = B.CreateIntrinsic(
+ I.getType(), Intrinsic::amdgcn_addrspacecast_nonnull, {I.getOperand(0)});
+ I.replaceAllUsesWith(Intrin);
+ I.eraseFromParent();
+ return true;
+}
+
bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case Intrinsic::bitreverse:
@@ -2196,6 +2267,7 @@ bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
return false;
const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
+ Impl.TM = &TM;
Impl.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
Impl.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
@@ -2214,6 +2286,7 @@ PreservedAnalyses AMDGPUCodeGenPreparePass::run(Function &F,
AMDGPUCodeGenPrepareImpl Impl;
Impl.Mod = F.getParent();
Impl.DL = &Impl.Mod->getDataLayout();
+ Impl.TM = static_cast<const AMDGPUTargetMachine *>(&TM);
Impl.TLInfo = &FAM.getResult<TargetLibraryAnalysis>(F);
Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
Impl.AC = &FAM.getResult<AssumptionAnalysis>(F);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGenRegisterBankInfo.def b/llvm/lib/Target/AMDGPU/AMDGPUGenRegisterBankInfo.def
index d6a94c972340..0d41d5d2186f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGenRegisterBankInfo.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGenRegisterBankInfo.def
@@ -284,6 +284,7 @@ const RegisterBankInfo::ValueMapping *getValueMapping(unsigned BankID,
break;
}
+ assert(Idx < std::size(ValMappings));
assert(Log2_32_Ceil(Size) == Log2_32_Ceil(ValMappings[Idx].BreakDown->Length));
assert(BankID == ValMappings[Idx].BreakDown->RegBank->getID());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp
index 4f65a95de82a..a0c6bf7cc31c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp
@@ -177,7 +177,16 @@ void DivergenceLoweringHelper::buildMergeLaneMasks(
B.buildInstr(OrOp, {DstReg}, {PrevMaskedReg, CurMaskedReg});
}
-void DivergenceLoweringHelper::constrainAsLaneMask(Incoming &In) { return; }
+// GlobalISel has to constrain S1 incoming taken as-is with lane mask register
+// class. Insert a copy of Incoming.Reg to new lane mask inside Incoming.Block,
+// Incoming.Reg becomes that new lane mask.
+void DivergenceLoweringHelper::constrainAsLaneMask(Incoming &In) {
+ B.setInsertPt(*In.Block, In.Block->getFirstTerminator());
+
+ auto Copy = B.buildCopy(LLT::scalar(1), In.Reg);
+ MRI->setRegClass(Copy.getReg(0), ST->getBoolRC());
+ In.Reg = Copy.getReg(0);
+}
} // End anonymous namespace.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
index e3f724850795..57769fe998d1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
@@ -2337,8 +2337,6 @@ private:
ScheduleDAGMI *DAG;
- std::vector<std::unique_ptr<ScheduleDAGMutation>> *SavedMutations;
-
// Organize lists of SchedGroups by their SyncID. SchedGroups /
// SCHED_GROUP_BARRIERs with different SyncIDs will have no edges added
// between then.
@@ -2381,10 +2379,7 @@ public:
AMDGPU::SchedulingPhase Phase = AMDGPU::SchedulingPhase::Initial;
IGroupLPDAGMutation() = default;
- IGroupLPDAGMutation(
- AMDGPU::SchedulingPhase Phase,
- std::vector<std::unique_ptr<ScheduleDAGMutation>> *SavedMutations)
- : SavedMutations(SavedMutations), Phase(Phase) {}
+ IGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase) : Phase(Phase) {}
};
unsigned SchedGroup::NumSchedGroups = 0;
@@ -2602,13 +2597,6 @@ void IGroupLPDAGMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
PS.solve();
return;
}
-
- if (!SavedMutations)
- return;
-
- // We did not apply a mutation, fall back to SavedMutations
- for (auto &m : *SavedMutations)
- m->apply(DAG);
}
void IGroupLPDAGMutation::addSchedBarrierEdges(SUnit &SchedBarrier) {
@@ -2707,10 +2695,9 @@ namespace llvm {
/// same scheduling region (e.g. pre and post-RA scheduling / multiple
/// scheduling "phases"), we can reenter this mutation framework more than once
/// for a given region.
-std::unique_ptr<ScheduleDAGMutation> createIGroupLPDAGMutation(
- AMDGPU::SchedulingPhase Phase,
- std::vector<std::unique_ptr<ScheduleDAGMutation>> *SavedMutations) {
- return std::make_unique<IGroupLPDAGMutation>(Phase, SavedMutations);
+std::unique_ptr<ScheduleDAGMutation>
+createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase) {
+ return std::make_unique<IGroupLPDAGMutation>(Phase);
}
} // end namespace llvm
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.h b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.h
index 46ef4d702d00..aff7096f26d6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.h
@@ -20,9 +20,8 @@ namespace AMDGPU {
enum class SchedulingPhase { Initial, PreRAReentry, PostRA };
} // namespace AMDGPU
-std::unique_ptr<ScheduleDAGMutation> createIGroupLPDAGMutation(
- AMDGPU::SchedulingPhase Phase,
- std::vector<std::unique_ptr<ScheduleDAGMutation>> *SavedMutations);
+std::unique_ptr<ScheduleDAGMutation>
+createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase);
} // namespace llvm
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index e8cc87e52e65..8a71550e5532 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -3089,10 +3089,10 @@ SDValue AMDGPUTargetLowering::lowerCTLZResults(SDValue Op,
assert(ResultVT == Arg.getValueType());
auto const LeadingZeroes = 32u - ResultVT.getFixedSizeInBits();
+ auto SubVal = DAG.getConstant(LeadingZeroes, SL, MVT::i32);
auto NewOp = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Arg);
- auto ShiftVal = DAG.getConstant(LeadingZeroes, SL, MVT::i32);
- NewOp = DAG.getNode(ISD::SHL, SL, MVT::i32, NewOp, ShiftVal);
NewOp = DAG.getNode(Op.getOpcode(), SL, MVT::i32, NewOp);
+ NewOp = DAG.getNode(ISD::SUB, SL, MVT::i32, NewOp, SubVal);
return DAG.getNode(ISD::TRUNCATE, SL, ResultVT, NewOp);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index aacc3590a5db..b2c65e61b009 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -34,12 +34,6 @@
using namespace llvm;
using namespace MIPatternMatch;
-static cl::opt<bool> AllowRiskySelect(
- "amdgpu-global-isel-risky-select",
- cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
- cl::init(false),
- cl::ReallyHidden);
-
#define GET_GLOBALISEL_IMPL
#define AMDGPUSubtarget GCNSubtarget
#include "AMDGPUGenGlobalISel.inc"
@@ -211,14 +205,12 @@ bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
const Register DefReg = I.getOperand(0).getReg();
const LLT DefTy = MRI->getType(DefReg);
- if (DefTy == LLT::scalar(1)) {
- if (!AllowRiskySelect) {
- LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
- return false;
- }
-
- LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
- }
+ // S1 G_PHIs should not be selected in instruction-select, instead:
+ // - divergent S1 G_PHI should go through lane mask merging algorithm
+ // and be fully inst-selected in AMDGPUGlobalISelDivergenceLowering
+ // - uniform S1 G_PHI should be lowered into S32 G_PHI in AMDGPURegBankSelect
+ if (DefTy == LLT::scalar(1))
+ return false;
// TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 13d751072913..4c3b983f2960 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2247,10 +2247,16 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
MachineIRBuilder &B) const {
MachineFunction &MF = B.getMF();
+ // MI can either be a G_ADDRSPACE_CAST or a
+ // G_INTRINSIC @llvm.amdgcn.addrspacecast.nonnull
+ assert(MI.getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
+ (isa<GIntrinsic>(MI) && cast<GIntrinsic>(MI).getIntrinsicID() ==
+ Intrinsic::amdgcn_addrspacecast_nonnull));
+
const LLT S32 = LLT::scalar(32);
Register Dst = MI.getOperand(0).getReg();
- Register Src = MI.getOperand(1).getReg();
-
+ Register Src = isa<GIntrinsic>(MI) ? MI.getOperand(2).getReg()
+ : MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(Dst);
LLT SrcTy = MRI.getType(Src);
unsigned DestAS = DstTy.getAddressSpace();
@@ -2271,7 +2277,9 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
if (SrcAS == AMDGPUAS::FLAT_ADDRESS &&
(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
DestAS == AMDGPUAS::PRIVATE_ADDRESS)) {
- if (isKnownNonNull(Src, MRI, TM, SrcAS)) {
+ // For llvm.amdgcn.addrspacecast.nonnull we can always assume non-null, for
+ // G_ADDRSPACE_CAST we need to guess.
+ if (isa<GIntrinsic>(MI) || isKnownNonNull(Src, MRI, TM, SrcAS)) {
// Extract low 32-bits of the pointer.
B.buildExtract(Dst, Src, 0);
MI.eraseFromParent();
@@ -2308,7 +2316,9 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
// avoid the ptrtoint?
auto BuildPtr = B.buildMergeLikeInstr(DstTy, {SrcAsInt, ApertureReg});
- if (isKnownNonNull(Src, MRI, TM, SrcAS)) {
+ // For llvm.amdgcn.addrspacecast.nonnull we can always assume non-null, for
+ // G_ADDRSPACE_CAST we need to guess.
+ if (isa<GIntrinsic>(MI) || isKnownNonNull(Src, MRI, TM, SrcAS)) {
B.buildCopy(Dst, BuildPtr);
MI.eraseFromParent();
return true;
@@ -7020,6 +7030,8 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
return false;
}
+ case Intrinsic::amdgcn_addrspacecast_nonnull:
+ return legalizeAddrSpaceCast(MI, MRI, B);
case Intrinsic::amdgcn_make_buffer_rsrc:
return legalizePointerAsRsrcIntrin(MI, MRI, B);
case Intrinsic::amdgcn_kernarg_segment_ptr:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
index 9dade7442c35..e2678e8336c5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -2292,7 +2292,7 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
MachineOperand::CreateReg(Reg, false, false, true);
ArrayRef<MachineOperand> Cond(RegOp);
LLVM_DEBUG(dbgs() << "RegionExitReg: ");
- LLVM_DEBUG(Cond[0].print(dbgs(), TRI));
+ LLVM_DEBUG(RegOp.print(dbgs(), TRI));
LLVM_DEBUG(dbgs() << "\n");
TII->insertBranch(*RegionExit, CurrentRegion->getEntry(), RegionExit,
Cond, DebugLoc());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 0d830df1f1f1..76e843455bab 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -461,8 +461,7 @@ createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
if (ST.shouldClusterStores())
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
- DAG->addMutation(
- createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial, nullptr));
+ DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
return DAG;
@@ -472,8 +471,7 @@ static ScheduleDAGInstrs *
createGCNMaxILPMachineScheduler(MachineSchedContext *C) {
ScheduleDAGMILive *DAG =
new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
- DAG->addMutation(
- createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial, nullptr));
+ DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
return DAG;
}
@@ -937,7 +935,7 @@ public:
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
DAG->addMutation(
- createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA, nullptr));
+ createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
DAG->addMutation(createVOPDPairingMutation());
return DAG;
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index b7b471d8dc7b..cb4eddfe5320 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1685,24 +1685,48 @@ public:
private:
struct OperandInfoTy {
SMLoc Loc;
- int64_t Id;
+ int64_t Val;
bool IsSymbolic = false;
bool IsDefined = false;
- OperandInfoTy(int64_t Id_) : Id(Id_) {}
+ OperandInfoTy(int64_t Val) : Val(Val) {}
+ };
+
+ struct StructuredOpField : OperandInfoTy {
+ StringLiteral Id;
+ StringLiteral Desc;
+ unsigned Width;
+ bool IsDefined = false;
+
+ StructuredOpField(StringLiteral Id, StringLiteral Desc, unsigned Width,
+ int64_t Default)
+ : OperandInfoTy(Default), Id(Id), Desc(Desc), Width(Width) {}
+ virtual ~StructuredOpField() = default;
+
+ bool Error(AMDGPUAsmParser &Parser, const Twine &Err) const {
+ Parser.Error(Loc, "invalid " + Desc + ": " + Err);
+ return false;
+ }
+
+ virtual bool validate(AMDGPUAsmParser &Parser) const {
+ if (IsSymbolic && Val == OPR_ID_UNSUPPORTED)
+ return Error(Parser, "not supported on this GPU");
+ if (!isUIntN(Width, Val))
+ return Error(Parser, "only " + Twine(Width) + "-bit values are legal");
+ return true;
+ }
};
+ ParseStatus parseStructuredOpFields(ArrayRef<StructuredOpField *> Fields);
+ bool validateStructuredOpFields(ArrayRef<const StructuredOpField *> Fields);
+
bool parseSendMsgBody(OperandInfoTy &Msg, OperandInfoTy &Op, OperandInfoTy &Stream);
bool validateSendMsg(const OperandInfoTy &Msg,
const OperandInfoTy &Op,
const OperandInfoTy &Stream);
- bool parseHwregBody(OperandInfoTy &HwReg,
- OperandInfoTy &Offset,
- OperandInfoTy &Width);
- bool validateHwreg(const OperandInfoTy &HwReg,
- const OperandInfoTy &Offset,
- const OperandInfoTy &Width);
+ ParseStatus parseHwregFunc(OperandInfoTy &HwReg, OperandInfoTy &Offset,
+ OperandInfoTy &Width);
SMLoc getFlatOffsetLoc(const OperandVector &Operands) const;
SMLoc getSMEMOffsetLoc(const OperandVector &Operands) const;
@@ -7197,71 +7221,44 @@ bool AMDGPUOperand::isDepCtr() const { return isS16Imm(); }
// hwreg
//===----------------------------------------------------------------------===//
-bool
-AMDGPUAsmParser::parseHwregBody(OperandInfoTy &HwReg,
- OperandInfoTy &Offset,
- OperandInfoTy &Width) {
+ParseStatus AMDGPUAsmParser::parseHwregFunc(OperandInfoTy &HwReg,
+ OperandInfoTy &Offset,
+ OperandInfoTy &Width) {
using namespace llvm::AMDGPU::Hwreg;
+ if (!trySkipId("hwreg", AsmToken::LParen))
+ return ParseStatus::NoMatch;
+
// The register may be specified by name or using a numeric code
HwReg.Loc = getLoc();
if (isToken(AsmToken::Identifier) &&
- (HwReg.Id = getHwregId(getTokenStr(), getSTI())) != OPR_ID_UNKNOWN) {
+ (HwReg.Val = getHwregId(getTokenStr(), getSTI())) != OPR_ID_UNKNOWN) {
HwReg.IsSymbolic = true;
lex(); // skip register name
- } else if (!parseExpr(HwReg.Id, "a register name")) {
- return false;
+ } else if (!parseExpr(HwReg.Val, "a register name")) {
+ return ParseStatus::Failure;
}
if (trySkipToken(AsmToken::RParen))
- return true;
+ return ParseStatus::Success;
// parse optional params
if (!skipToken(AsmToken::Comma, "expected a comma or a closing parenthesis"))
- return false;
+ return ParseStatus::Failure;
Offset.Loc = getLoc();
- if (!parseExpr(Offset.Id))
- return false;
+ if (!parseExpr(Offset.Val))
+ return ParseStatus::Failure;
if (!skipToken(AsmToken::Comma, "expected a comma"))
- return false;
+ return ParseStatus::Failure;
Width.Loc = getLoc();
- return parseExpr(Width.Id) &&
- skipToken(AsmToken::RParen, "expected a closing parenthesis");
-}
-
-bool
-AMDGPUAsmParser::validateHwreg(const OperandInfoTy &HwReg,
- const OperandInfoTy &Offset,
- const OperandInfoTy &Width) {
-
- using namespace llvm::AMDGPU::Hwreg;
+ if (!parseExpr(Width.Val) ||
+ !skipToken(AsmToken::RParen, "expected a closing parenthesis"))
+ return ParseStatus::Failure;
- if (HwReg.IsSymbolic) {
- if (HwReg.Id == OPR_ID_UNSUPPORTED) {
- Error(HwReg.Loc,
- "specified hardware register is not supported on this GPU");
- return false;
- }
- } else {
- if (!isValidHwreg(HwReg.Id)) {
- Error(HwReg.Loc,
- "invalid code of hardware register: only 6-bit values are legal");
- return false;
- }
- }
- if (!isValidHwregOffset(Offset.Id)) {
- Error(Offset.Loc, "invalid bit offset: only 5-bit values are legal");
- return false;
- }
- if (!isValidHwregWidth(Width.Id)) {
- Error(Width.Loc,
- "invalid bitfield width: only values from 1 to 32 are legal");
- return false;
- }
- return true;
+ return ParseStatus::Success;
}
ParseStatus AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
@@ -7270,24 +7267,40 @@ ParseStatus AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
int64_t ImmVal = 0;
SMLoc Loc = getLoc();
- if (trySkipId("hwreg", AsmToken::LParen)) {
- OperandInfoTy HwReg(OPR_ID_UNKNOWN);
- OperandInfoTy Offset(HwregOffset::Default);
- OperandInfoTy Width(HwregSize::Default);
- if (parseHwregBody(HwReg, Offset, Width) &&
- validateHwreg(HwReg, Offset, Width)) {
- ImmVal = HwregEncoding::encode(HwReg.Id, Offset.Id, Width.Id);
- } else {
- return ParseStatus::Failure;
+ StructuredOpField HwReg("id", "hardware register", HwregId::Width,
+ HwregId::Default);
+ StructuredOpField Offset("offset", "bit offset", HwregOffset::Width,
+ HwregOffset::Default);
+ struct : StructuredOpField {
+ using StructuredOpField::StructuredOpField;
+ bool validate(AMDGPUAsmParser &Parser) const override {
+ if (!isUIntN(Width, Val - 1))
+ return Error(Parser, "only values from 1 to 32 are legal");
+ return true;
}
- } else if (parseExpr(ImmVal, "a hwreg macro")) {
- if (ImmVal < 0 || !isUInt<16>(ImmVal))
- return Error(Loc, "invalid immediate: only 16-bit values are legal");
- } else {
- return ParseStatus::Failure;
+ } Width("size", "bitfield width", HwregSize::Width, HwregSize::Default);
+ ParseStatus Res = parseStructuredOpFields({&HwReg, &Offset, &Width});
+
+ if (Res.isNoMatch())
+ Res = parseHwregFunc(HwReg, Offset, Width);
+
+ if (Res.isSuccess()) {
+ if (!validateStructuredOpFields({&HwReg, &Offset, &Width}))
+ return ParseStatus::Failure;
+ ImmVal = HwregEncoding::encode(HwReg.Val, Offset.Val, Width.Val);
}
- Operands.push_back(AMDGPUOperand::CreateImm(this, ImmVal, Loc, AMDGPUOperand::ImmTyHwreg));
+ if (Res.isNoMatch() &&
+ parseExpr(ImmVal, "a hwreg macro, structured immediate"))
+ Res = ParseStatus::Success;
+
+ if (!Res.isSuccess())
+ return ParseStatus::Failure;
+
+ if (!isUInt<16>(ImmVal))
+ return Error(Loc, "invalid immediate: only 16-bit values are legal");
+ Operands.push_back(
+ AMDGPUOperand::CreateImm(this, ImmVal, Loc, AMDGPUOperand::ImmTyHwreg));
return ParseStatus::Success;
}
@@ -7307,10 +7320,10 @@ AMDGPUAsmParser::parseSendMsgBody(OperandInfoTy &Msg,
Msg.Loc = getLoc();
if (isToken(AsmToken::Identifier) &&
- (Msg.Id = getMsgId(getTokenStr(), getSTI())) != OPR_ID_UNKNOWN) {
+ (Msg.Val = getMsgId(getTokenStr(), getSTI())) != OPR_ID_UNKNOWN) {
Msg.IsSymbolic = true;
lex(); // skip message name
- } else if (!parseExpr(Msg.Id, "a message name")) {
+ } else if (!parseExpr(Msg.Val, "a message name")) {
return false;
}
@@ -7318,16 +7331,16 @@ AMDGPUAsmParser::parseSendMsgBody(OperandInfoTy &Msg,
Op.IsDefined = true;
Op.Loc = getLoc();
if (isToken(AsmToken::Identifier) &&
- (Op.Id = getMsgOpId(Msg.Id, getTokenStr())) >= 0) {
+ (Op.Val = getMsgOpId(Msg.Val, getTokenStr())) >= 0) {
lex(); // skip operation name
- } else if (!parseExpr(Op.Id, "an operation name")) {
+ } else if (!parseExpr(Op.Val, "an operation name")) {
return false;
}
if (trySkipToken(AsmToken::Comma)) {
Stream.IsDefined = true;
Stream.Loc = getLoc();
- if (!parseExpr(Stream.Id))
+ if (!parseExpr(Stream.Val))
return false;
}
}
@@ -7347,17 +7360,17 @@ AMDGPUAsmParser::validateSendMsg(const OperandInfoTy &Msg,
bool Strict = Msg.IsSymbolic;
if (Strict) {
- if (Msg.Id == OPR_ID_UNSUPPORTED) {
+ if (Msg.Val == OPR_ID_UNSUPPORTED) {
Error(Msg.Loc, "specified message id is not supported on this GPU");
return false;
}
} else {
- if (!isValidMsgId(Msg.Id, getSTI())) {
+ if (!isValidMsgId(Msg.Val, getSTI())) {
Error(Msg.Loc, "invalid message id");
return false;
}
}
- if (Strict && (msgRequiresOp(Msg.Id, getSTI()) != Op.IsDefined)) {
+ if (Strict && (msgRequiresOp(Msg.Val, getSTI()) != Op.IsDefined)) {
if (Op.IsDefined) {
Error(Op.Loc, "message does not support operations");
} else {
@@ -7365,16 +7378,16 @@ AMDGPUAsmParser::validateSendMsg(const OperandInfoTy &Msg,
}
return false;
}
- if (!isValidMsgOp(Msg.Id, Op.Id, getSTI(), Strict)) {
+ if (!isValidMsgOp(Msg.Val, Op.Val, getSTI(), Strict)) {
Error(Op.Loc, "invalid operation id");
return false;
}
- if (Strict && !msgSupportsStream(Msg.Id, Op.Id, getSTI()) &&
+ if (Strict && !msgSupportsStream(Msg.Val, Op.Val, getSTI()) &&
Stream.IsDefined) {
Error(Stream.Loc, "message operation does not support streams");
return false;
}
- if (!isValidMsgStream(Msg.Id, Op.Id, Stream.Id, getSTI(), Strict)) {
+ if (!isValidMsgStream(Msg.Val, Op.Val, Stream.Val, getSTI(), Strict)) {
Error(Stream.Loc, "invalid message stream id");
return false;
}
@@ -7393,7 +7406,7 @@ ParseStatus AMDGPUAsmParser::parseSendMsg(OperandVector &Operands) {
OperandInfoTy Stream(STREAM_ID_NONE_);
if (parseSendMsgBody(Msg, Op, Stream) &&
validateSendMsg(Msg, Op, Stream)) {
- ImmVal = encodeMsg(Msg.Id, Op.Id, Stream.Id);
+ ImmVal = encodeMsg(Msg.Val, Op.Val, Stream.Val);
} else {
return ParseStatus::Failure;
}
@@ -7730,6 +7743,48 @@ AMDGPUAsmParser::getConstLoc(const OperandVector &Operands) const {
return getOperandLoc(Test, Operands);
}
+ParseStatus
+AMDGPUAsmParser::parseStructuredOpFields(ArrayRef<StructuredOpField *> Fields) {
+ if (!trySkipToken(AsmToken::LCurly))
+ return ParseStatus::NoMatch;
+
+ bool First = true;
+ while (!trySkipToken(AsmToken::RCurly)) {
+ if (!First &&
+ !skipToken(AsmToken::Comma, "comma or closing brace expected"))
+ return ParseStatus::Failure;
+
+ StringRef Id = getTokenStr();
+ SMLoc IdLoc = getLoc();
+ if (!skipToken(AsmToken::Identifier, "field name expected") ||
+ !skipToken(AsmToken::Colon, "colon expected"))
+ return ParseStatus::Failure;
+
+ auto I =
+ find_if(Fields, [Id](StructuredOpField *F) { return F->Id == Id; });
+ if (I == Fields.end())
+ return Error(IdLoc, "unknown field");
+ if ((*I)->IsDefined)
+ return Error(IdLoc, "duplicate field");
+
+ // TODO: Support symbolic values.
+ (*I)->Loc = getLoc();
+ if (!parseExpr((*I)->Val))
+ return ParseStatus::Failure;
+ (*I)->IsDefined = true;
+
+ First = false;
+ }
+ return ParseStatus::Success;
+}
+
+bool AMDGPUAsmParser::validateStructuredOpFields(
+ ArrayRef<const StructuredOpField *> Fields) {
+ return all_of(Fields, [this](const StructuredOpField *F) {
+ return F->validate(*this);
+ });
+}
+
//===----------------------------------------------------------------------===//
// swizzle
//===----------------------------------------------------------------------===//
@@ -8113,20 +8168,6 @@ void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
bool IsAtomicReturn = false;
if (IsAtomic) {
- for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
- if (!Op.isCPol())
- continue;
- IsAtomicReturn = Op.getImm() & AMDGPU::CPol::GLC;
- break;
- }
-
- if (!IsAtomicReturn) {
- int NewOpc = AMDGPU::getAtomicNoRetOp(Inst.getOpcode());
- if (NewOpc != -1)
- Inst.setOpcode(NewOpc);
- }
-
IsAtomicReturn = MII.get(Inst.getOpcode()).TSFlags &
SIInstrFlags::IsAtomicRet;
}
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index 7f812ed7871b..7bb92256fbdd 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -127,6 +127,7 @@ class MTBUF_Real <MTBUF_Pseudo ps, string real_name = ps.Mnemonic> :
// copy relevant pseudo op flags
let UseNamedOperandTable = ps.UseNamedOperandTable;
let SubtargetPredicate = ps.SubtargetPredicate;
+ let OtherPredicates = ps.OtherPredicates;
let AsmMatchConverter = ps.AsmMatchConverter;
let Constraints = ps.Constraints;
let DisableEncoding = ps.DisableEncoding;
@@ -154,12 +155,12 @@ class MTBUF_Real <MTBUF_Pseudo ps, string real_name = ps.Mnemonic> :
}
class getMTBUFInsDA<list<RegisterClass> vdataList,
- list<RegisterClass> vaddrList=[], bit hasGFX12Enc> {
+ list<RegisterClass> vaddrList=[], bit hasRestrictedSOffset> {
RegisterClass vdataClass = !if(!empty(vdataList), ?, !head(vdataList));
RegisterClass vaddrClass = !if(!empty(vaddrList), ?, !head(vaddrList));
RegisterOperand vdata_op = getLdStRegisterOperand<vdataClass>.ret;
- dag SOffset = !if(hasGFX12Enc, (ins SReg_32:$soffset),
+ dag SOffset = !if(hasRestrictedSOffset, (ins SReg_32:$soffset),
(ins SCSrc_b32:$soffset));
dag NonVaddrInputs = !con((ins SReg_128:$srsrc), SOffset,
@@ -173,13 +174,13 @@ class getMTBUFInsDA<list<RegisterClass> vdataList,
!con((ins vdata_op:$vdata), Inputs));
}
-class getMTBUFIns<int addrKind, list<RegisterClass> vdataList=[], bit hasGFX12Enc> {
+class getMTBUFIns<int addrKind, list<RegisterClass> vdataList=[], bit hasRestrictedSOffset> {
dag ret =
- !if(!eq(addrKind, BUFAddrKind.Offset), getMTBUFInsDA<vdataList, [], hasGFX12Enc>.ret,
- !if(!eq(addrKind, BUFAddrKind.OffEn), getMTBUFInsDA<vdataList, [VGPR_32], hasGFX12Enc>.ret,
- !if(!eq(addrKind, BUFAddrKind.IdxEn), getMTBUFInsDA<vdataList, [VGPR_32], hasGFX12Enc>.ret,
- !if(!eq(addrKind, BUFAddrKind.BothEn), getMTBUFInsDA<vdataList, [VReg_64], hasGFX12Enc>.ret,
- !if(!eq(addrKind, BUFAddrKind.Addr64), getMTBUFInsDA<vdataList, [VReg_64], hasGFX12Enc>.ret,
+ !if(!eq(addrKind, BUFAddrKind.Offset), getMTBUFInsDA<vdataList, [], hasRestrictedSOffset>.ret,
+ !if(!eq(addrKind, BUFAddrKind.OffEn), getMTBUFInsDA<vdataList, [VGPR_32], hasRestrictedSOffset>.ret,
+ !if(!eq(addrKind, BUFAddrKind.IdxEn), getMTBUFInsDA<vdataList, [VGPR_32], hasRestrictedSOffset>.ret,
+ !if(!eq(addrKind, BUFAddrKind.BothEn), getMTBUFInsDA<vdataList, [VReg_64], hasRestrictedSOffset>.ret,
+ !if(!eq(addrKind, BUFAddrKind.Addr64), getMTBUFInsDA<vdataList, [VReg_64], hasRestrictedSOffset>.ret,
(ins))))));
}
@@ -214,13 +215,13 @@ class MTBUF_Load_Pseudo <string opName,
int addrKind,
RegisterClass vdataClass,
int elems,
- bit hasGFX12Enc = 0,
+ bit hasRestrictedSOffset = 0,
list<dag> pattern=[],
// Workaround bug bz30254
int addrKindCopy = addrKind>
: MTBUF_Pseudo<opName,
(outs getLdStRegisterOperand<vdataClass>.ret:$vdata),
- getMTBUFIns<addrKindCopy, [], hasGFX12Enc>.ret,
+ getMTBUFIns<addrKindCopy, [], hasRestrictedSOffset>.ret,
getMTBUFAsmOps<addrKindCopy>.ret,
pattern>,
MTBUF_SetupAddr<addrKindCopy> {
@@ -231,23 +232,23 @@ class MTBUF_Load_Pseudo <string opName,
}
multiclass MTBUF_Pseudo_Loads_Helper<string opName, RegisterClass vdataClass,
- int elems, bit hasGFX12Enc> {
+ int elems, bit hasRestrictedSOffset> {
- def _OFFSET : MTBUF_Load_Pseudo <opName, BUFAddrKind.Offset, vdataClass, elems, hasGFX12Enc>,
+ def _OFFSET : MTBUF_Load_Pseudo <opName, BUFAddrKind.Offset, vdataClass, elems, hasRestrictedSOffset>,
MTBUFAddr64Table<0, NAME>;
- def _ADDR64 : MTBUF_Load_Pseudo <opName, BUFAddrKind.Addr64, vdataClass, elems, hasGFX12Enc>,
+ def _ADDR64 : MTBUF_Load_Pseudo <opName, BUFAddrKind.Addr64, vdataClass, elems, hasRestrictedSOffset>,
MTBUFAddr64Table<1, NAME>;
- def _OFFEN : MTBUF_Load_Pseudo <opName, BUFAddrKind.OffEn, vdataClass, elems, hasGFX12Enc>;
- def _IDXEN : MTBUF_Load_Pseudo <opName, BUFAddrKind.IdxEn, vdataClass, elems, hasGFX12Enc>;
- def _BOTHEN : MTBUF_Load_Pseudo <opName, BUFAddrKind.BothEn, vdataClass, elems, hasGFX12Enc>;
+ def _OFFEN : MTBUF_Load_Pseudo <opName, BUFAddrKind.OffEn, vdataClass, elems, hasRestrictedSOffset>;
+ def _IDXEN : MTBUF_Load_Pseudo <opName, BUFAddrKind.IdxEn, vdataClass, elems, hasRestrictedSOffset>;
+ def _BOTHEN : MTBUF_Load_Pseudo <opName, BUFAddrKind.BothEn, vdataClass, elems, hasRestrictedSOffset>;
let DisableWQM = 1 in {
- def _OFFSET_exact : MTBUF_Load_Pseudo <opName, BUFAddrKind.Offset, vdataClass, elems, hasGFX12Enc>;
- def _OFFEN_exact : MTBUF_Load_Pseudo <opName, BUFAddrKind.OffEn, vdataClass, elems, hasGFX12Enc>;
- def _IDXEN_exact : MTBUF_Load_Pseudo <opName, BUFAddrKind.IdxEn, vdataClass, elems, hasGFX12Enc>;
- def _BOTHEN_exact : MTBUF_Load_Pseudo <opName, BUFAddrKind.BothEn, vdataClass, elems, hasGFX12Enc>;
+ def _OFFSET_exact : MTBUF_Load_Pseudo <opName, BUFAddrKind.Offset, vdataClass, elems, hasRestrictedSOffset>;
+ def _OFFEN_exact : MTBUF_Load_Pseudo <opName, BUFAddrKind.OffEn, vdataClass, elems, hasRestrictedSOffset>;
+ def _IDXEN_exact : MTBUF_Load_Pseudo <opName, BUFAddrKind.IdxEn, vdataClass, elems, hasRestrictedSOffset>;
+ def _BOTHEN_exact : MTBUF_Load_Pseudo <opName, BUFAddrKind.BothEn, vdataClass, elems, hasRestrictedSOffset>;
}
}
@@ -261,14 +262,14 @@ class MTBUF_Store_Pseudo <string opName,
int addrKind,
RegisterClass vdataClass,
int elems,
- bit hasGFX12Enc = 0,
+ bit hasRestrictedSOffset = 0,
list<dag> pattern=[],
// Workaround bug bz30254
int addrKindCopy = addrKind,
RegisterClass vdataClassCopy = vdataClass>
: MTBUF_Pseudo<opName,
(outs),
- getMTBUFIns<addrKindCopy, [vdataClassCopy], hasGFX12Enc>.ret,
+ getMTBUFIns<addrKindCopy, [vdataClassCopy], hasRestrictedSOffset>.ret,
getMTBUFAsmOps<addrKindCopy>.ret,
pattern>,
MTBUF_SetupAddr<addrKindCopy> {
@@ -279,23 +280,23 @@ class MTBUF_Store_Pseudo <string opName,
}
multiclass MTBUF_Pseudo_Stores_Helper<string opName, RegisterClass vdataClass,
- int elems, bit hasGFX12Enc> {
+ int elems, bit hasRestrictedSOffset> {
- def _OFFSET : MTBUF_Store_Pseudo <opName, BUFAddrKind.Offset, vdataClass, elems, hasGFX12Enc>,
+ def _OFFSET : MTBUF_Store_Pseudo <opName, BUFAddrKind.Offset, vdataClass, elems, hasRestrictedSOffset>,
MTBUFAddr64Table<0, NAME>;
- def _ADDR64 : MTBUF_Store_Pseudo <opName, BUFAddrKind.Addr64, vdataClass, elems, hasGFX12Enc>,
+ def _ADDR64 : MTBUF_Store_Pseudo <opName, BUFAddrKind.Addr64, vdataClass, elems, hasRestrictedSOffset>,
MTBUFAddr64Table<1, NAME>;
- def _OFFEN : MTBUF_Store_Pseudo <opName, BUFAddrKind.OffEn, vdataClass, elems, hasGFX12Enc>;
- def _IDXEN : MTBUF_Store_Pseudo <opName, BUFAddrKind.IdxEn, vdataClass, elems, hasGFX12Enc>;
- def _BOTHEN : MTBUF_Store_Pseudo <opName, BUFAddrKind.BothEn, vdataClass, elems, hasGFX12Enc>;
+ def _OFFEN : MTBUF_Store_Pseudo <opName, BUFAddrKind.OffEn, vdataClass, elems, hasRestrictedSOffset>;
+ def _IDXEN : MTBUF_Store_Pseudo <opName, BUFAddrKind.IdxEn, vdataClass, elems, hasRestrictedSOffset>;
+ def _BOTHEN : MTBUF_Store_Pseudo <opName, BUFAddrKind.BothEn, vdataClass, elems, hasRestrictedSOffset>;
let DisableWQM = 1 in {
- def _OFFSET_exact : MTBUF_Store_Pseudo <opName, BUFAddrKind.Offset, vdataClass, elems, hasGFX12Enc>;
- def _OFFEN_exact : MTBUF_Store_Pseudo <opName, BUFAddrKind.OffEn, vdataClass, elems, hasGFX12Enc>;
- def _IDXEN_exact : MTBUF_Store_Pseudo <opName, BUFAddrKind.IdxEn, vdataClass, elems, hasGFX12Enc>;
- def _BOTHEN_exact : MTBUF_Store_Pseudo <opName, BUFAddrKind.BothEn, vdataClass, elems, hasGFX12Enc>;
+ def _OFFSET_exact : MTBUF_Store_Pseudo <opName, BUFAddrKind.Offset, vdataClass, elems, hasRestrictedSOffset>;
+ def _OFFEN_exact : MTBUF_Store_Pseudo <opName, BUFAddrKind.OffEn, vdataClass, elems, hasRestrictedSOffset>;
+ def _IDXEN_exact : MTBUF_Store_Pseudo <opName, BUFAddrKind.IdxEn, vdataClass, elems, hasRestrictedSOffset>;
+ def _BOTHEN_exact : MTBUF_Store_Pseudo <opName, BUFAddrKind.BothEn, vdataClass, elems, hasRestrictedSOffset>;
}
}
@@ -404,12 +405,12 @@ class getLdStVDataRegisterOperand<RegisterClass RC, bit isTFE> {
}
class getMUBUFInsDA<list<RegisterClass> vdataList,
- list<RegisterClass> vaddrList, bit isTFE, bit hasGFX12Enc> {
+ list<RegisterClass> vaddrList, bit isTFE, bit hasRestrictedSOffset> {
RegisterClass vdataClass = !if(!empty(vdataList), ?, !head(vdataList));
RegisterClass vaddrClass = !if(!empty(vaddrList), ?, !head(vaddrList));
RegisterOperand vdata_op = getLdStVDataRegisterOperand<vdataClass, isTFE>.ret;
- dag SOffset = !if(hasGFX12Enc, (ins SReg_32:$soffset), (ins SCSrc_b32:$soffset));
+ dag SOffset = !if(hasRestrictedSOffset, (ins SReg_32:$soffset), (ins SCSrc_b32:$soffset));
dag NonVaddrInputs = !con((ins SReg_128:$srsrc), SOffset, (ins Offset:$offset, CPol_0:$cpol, i1imm_0:$swz));
dag Inputs = !if(!empty(vaddrList), NonVaddrInputs, !con((ins vaddrClass:$vaddr), NonVaddrInputs));
@@ -435,13 +436,13 @@ class getMUBUFElements<ValueType vt> {
);
}
-class getMUBUFIns<int addrKind, list<RegisterClass> vdataList, bit isTFE, bit hasGFX12Enc> {
+class getMUBUFIns<int addrKind, list<RegisterClass> vdataList, bit isTFE, bit hasRestrictedSOffset> {
dag ret =
- !if(!eq(addrKind, BUFAddrKind.Offset), getMUBUFInsDA<vdataList, [], isTFE, hasGFX12Enc>.ret,
- !if(!eq(addrKind, BUFAddrKind.OffEn), getMUBUFInsDA<vdataList, [VGPR_32], isTFE, hasGFX12Enc>.ret,
- !if(!eq(addrKind, BUFAddrKind.IdxEn), getMUBUFInsDA<vdataList, [VGPR_32], isTFE, hasGFX12Enc>.ret,
- !if(!eq(addrKind, BUFAddrKind.BothEn), getMUBUFInsDA<vdataList, [VReg_64], isTFE, hasGFX12Enc>.ret,
- !if(!eq(addrKind, BUFAddrKind.Addr64), getMUBUFInsDA<vdataList, [VReg_64], isTFE, hasGFX12Enc>.ret,
+ !if(!eq(addrKind, BUFAddrKind.Offset), getMUBUFInsDA<vdataList, [], isTFE, hasRestrictedSOffset>.ret,
+ !if(!eq(addrKind, BUFAddrKind.OffEn), getMUBUFInsDA<vdataList, [VGPR_32], isTFE, hasRestrictedSOffset>.ret,
+ !if(!eq(addrKind, BUFAddrKind.IdxEn), getMUBUFInsDA<vdataList, [VGPR_32], isTFE, hasRestrictedSOffset>.ret,
+ !if(!eq(addrKind, BUFAddrKind.BothEn), getMUBUFInsDA<vdataList, [VReg_64], isTFE, hasRestrictedSOffset>.ret,
+ !if(!eq(addrKind, BUFAddrKind.Addr64), getMUBUFInsDA<vdataList, [VReg_64], isTFE, hasRestrictedSOffset>.ret,
(ins))))));
}
@@ -481,7 +482,7 @@ class MUBUF_Load_Pseudo <string opName,
bit isLds = 0,
bit isLdsOpc = 0,
bit isTFE = 0,
- bit hasGFX12Enc = 0,
+ bit hasRestrictedSOffset = 0,
list<dag> pattern=[],
// Workaround bug bz30254
int addrKindCopy = addrKind,
@@ -489,7 +490,7 @@ class MUBUF_Load_Pseudo <string opName,
RegisterOperand vdata_op = getLdStVDataRegisterOperand<vdata_rc, isTFE>.ret>
: MUBUF_Pseudo<opName,
!if(!or(isLds, isLdsOpc), (outs), (outs vdata_op:$vdata)),
- !con(getMUBUFIns<addrKindCopy, [], isTFE, hasGFX12Enc>.ret,
+ !con(getMUBUFIns<addrKindCopy, [], isTFE, hasRestrictedSOffset>.ret,
!if(HasTiedDest, (ins vdata_op:$vdata_in), (ins))),
getMUBUFAsmOps<addrKindCopy, !or(isLds, isLdsOpc), isLds, isTFE>.ret,
pattern>,
@@ -535,24 +536,24 @@ multiclass MUBUF_Pseudo_Load_Pats<string BaseInst, ValueType load_vt = i32, SDPa
}
multiclass MUBUF_Pseudo_Loads_Helper<string opName, ValueType load_vt,
- bit TiedDest, bit isLds, bit isTFE, bit hasGFX12Enc> {
+ bit TiedDest, bit isLds, bit isTFE, bit hasRestrictedSOffset> {
defvar legal_load_vt = !if(!eq(load_vt, v3f16), v4f16, load_vt);
- def _OFFSET : MUBUF_Load_Pseudo <opName, BUFAddrKind.Offset, legal_load_vt, TiedDest, isLds, 0, isTFE, hasGFX12Enc>,
+ def _OFFSET : MUBUF_Load_Pseudo <opName, BUFAddrKind.Offset, legal_load_vt, TiedDest, isLds, 0, isTFE, hasRestrictedSOffset>,
MUBUFAddr64Table<0, NAME # !if(isLds, "_LDS", "")>;
- def _ADDR64 : MUBUF_Load_Pseudo <opName, BUFAddrKind.Addr64, legal_load_vt, TiedDest, isLds, 0, isTFE, hasGFX12Enc>,
+ def _ADDR64 : MUBUF_Load_Pseudo <opName, BUFAddrKind.Addr64, legal_load_vt, TiedDest, isLds, 0, isTFE, hasRestrictedSOffset>,
MUBUFAddr64Table<1, NAME # !if(isLds, "_LDS", "")>;
- def _OFFEN : MUBUF_Load_Pseudo <opName, BUFAddrKind.OffEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasGFX12Enc>;
- def _IDXEN : MUBUF_Load_Pseudo <opName, BUFAddrKind.IdxEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasGFX12Enc>;
- def _BOTHEN : MUBUF_Load_Pseudo <opName, BUFAddrKind.BothEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasGFX12Enc>;
+ def _OFFEN : MUBUF_Load_Pseudo <opName, BUFAddrKind.OffEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasRestrictedSOffset>;
+ def _IDXEN : MUBUF_Load_Pseudo <opName, BUFAddrKind.IdxEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasRestrictedSOffset>;
+ def _BOTHEN : MUBUF_Load_Pseudo <opName, BUFAddrKind.BothEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasRestrictedSOffset>;
let DisableWQM = 1 in {
- def _OFFSET_exact : MUBUF_Load_Pseudo <opName, BUFAddrKind.Offset, legal_load_vt, TiedDest, isLds, 0, isTFE, hasGFX12Enc>;
- def _OFFEN_exact : MUBUF_Load_Pseudo <opName, BUFAddrKind.OffEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasGFX12Enc>;
- def _IDXEN_exact : MUBUF_Load_Pseudo <opName, BUFAddrKind.IdxEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasGFX12Enc>;
- def _BOTHEN_exact : MUBUF_Load_Pseudo <opName, BUFAddrKind.BothEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasGFX12Enc>;
+ def _OFFSET_exact : MUBUF_Load_Pseudo <opName, BUFAddrKind.Offset, legal_load_vt, TiedDest, isLds, 0, isTFE, hasRestrictedSOffset>;
+ def _OFFEN_exact : MUBUF_Load_Pseudo <opName, BUFAddrKind.OffEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasRestrictedSOffset>;
+ def _IDXEN_exact : MUBUF_Load_Pseudo <opName, BUFAddrKind.IdxEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasRestrictedSOffset>;
+ def _BOTHEN_exact : MUBUF_Load_Pseudo <opName, BUFAddrKind.BothEn, legal_load_vt, TiedDest, isLds, 0, isTFE, hasRestrictedSOffset>;
}
}
@@ -595,13 +596,13 @@ class MUBUF_Store_Pseudo <string opName,
int addrKind,
ValueType store_vt,
bit isTFE = 0,
- bit hasGFX12Enc = 0,
+ bit hasRestrictedSOffset = 0,
list<dag> pattern=[],
// Workaround bug bz30254
int addrKindCopy = addrKind>
: MUBUF_Pseudo<opName,
(outs),
- getMUBUFIns<addrKindCopy, [getVregSrcForVT<store_vt>.ret.RegClass], isTFE, hasGFX12Enc>.ret,
+ getMUBUFIns<addrKindCopy, [getVregSrcForVT<store_vt>.ret.RegClass], isTFE, hasRestrictedSOffset>.ret,
getMUBUFAsmOps<addrKindCopy, 0, 0, isTFE>.ret,
pattern>,
MUBUF_SetupAddr<addrKindCopy> {
@@ -632,24 +633,24 @@ multiclass MUBUF_Pseudo_Store_Pats<string BaseInst, ValueType store_vt = i32, SD
}
multiclass MUBUF_Pseudo_Stores_Helper<string opName, ValueType store_vt,
- bit isTFE, bit hasGFX12Enc> {
+ bit isTFE, bit hasRestrictedSOffset> {
defvar legal_store_vt = !if(!eq(store_vt, v3f16), v4f16, store_vt);
- def _OFFSET : MUBUF_Store_Pseudo <opName, BUFAddrKind.Offset, legal_store_vt, isTFE, hasGFX12Enc>,
+ def _OFFSET : MUBUF_Store_Pseudo <opName, BUFAddrKind.Offset, legal_store_vt, isTFE, hasRestrictedSOffset>,
MUBUFAddr64Table<0, NAME>;
- def _ADDR64 : MUBUF_Store_Pseudo <opName, BUFAddrKind.Addr64, legal_store_vt, isTFE, hasGFX12Enc>,
+ def _ADDR64 : MUBUF_Store_Pseudo <opName, BUFAddrKind.Addr64, legal_store_vt, isTFE, hasRestrictedSOffset>,
MUBUFAddr64Table<1, NAME>;
- def _OFFEN : MUBUF_Store_Pseudo <opName, BUFAddrKind.OffEn, legal_store_vt, isTFE, hasGFX12Enc>;
- def _IDXEN : MUBUF_Store_Pseudo <opName, BUFAddrKind.IdxEn, legal_store_vt, isTFE, hasGFX12Enc>;
- def _BOTHEN : MUBUF_Store_Pseudo <opName, BUFAddrKind.BothEn, legal_store_vt, isTFE, hasGFX12Enc>;
+ def _OFFEN : MUBUF_Store_Pseudo <opName, BUFAddrKind.OffEn, legal_store_vt, isTFE, hasRestrictedSOffset>;
+ def _IDXEN : MUBUF_Store_Pseudo <opName, BUFAddrKind.IdxEn, legal_store_vt, isTFE, hasRestrictedSOffset>;
+ def _BOTHEN : MUBUF_Store_Pseudo <opName, BUFAddrKind.BothEn, legal_store_vt, isTFE, hasRestrictedSOffset>;
let DisableWQM = 1 in {
- def _OFFSET_exact : MUBUF_Store_Pseudo <opName, BUFAddrKind.Offset, legal_store_vt, isTFE, hasGFX12Enc>;
- def _OFFEN_exact : MUBUF_Store_Pseudo <opName, BUFAddrKind.OffEn, legal_store_vt, isTFE, hasGFX12Enc>;
- def _IDXEN_exact : MUBUF_Store_Pseudo <opName, BUFAddrKind.IdxEn, legal_store_vt, isTFE, hasGFX12Enc>;
- def _BOTHEN_exact : MUBUF_Store_Pseudo <opName, BUFAddrKind.BothEn, legal_store_vt, isTFE, hasGFX12Enc>;
+ def _OFFSET_exact : MUBUF_Store_Pseudo <opName, BUFAddrKind.Offset, legal_store_vt, isTFE, hasRestrictedSOffset>;
+ def _OFFEN_exact : MUBUF_Store_Pseudo <opName, BUFAddrKind.OffEn, legal_store_vt, isTFE, hasRestrictedSOffset>;
+ def _IDXEN_exact : MUBUF_Store_Pseudo <opName, BUFAddrKind.IdxEn, legal_store_vt, isTFE, hasRestrictedSOffset>;
+ def _BOTHEN_exact : MUBUF_Store_Pseudo <opName, BUFAddrKind.BothEn, legal_store_vt, isTFE, hasRestrictedSOffset>;
}
}
@@ -679,14 +680,14 @@ class MUBUF_Pseudo_Store_Lds<string opName>
let AsmMatchConverter = "cvtMubuf";
}
-class getMUBUFAtomicInsDA<RegisterClass vdataClass, bit vdata_in, bit hasGFX12Enc,
+class getMUBUFAtomicInsDA<RegisterClass vdataClass, bit vdata_in, bit hasRestrictedSOffset,
list<RegisterClass> vaddrList=[]> {
RegisterClass vaddrClass = !if(!empty(vaddrList), ?, !head(vaddrList));
RegisterOperand vdata_op = getLdStRegisterOperand<vdataClass>.ret;
dag VData = !if(vdata_in, (ins vdata_op:$vdata_in), (ins vdata_op:$vdata));
dag Data = !if(!empty(vaddrList), VData, !con(VData, (ins vaddrClass:$vaddr)));
- dag SOffset = !if(hasGFX12Enc, (ins SReg_32:$soffset), (ins SCSrc_b32:$soffset));
+ dag SOffset = !if(hasRestrictedSOffset, (ins SReg_32:$soffset), (ins SCSrc_b32:$soffset));
dag MainInputs = !con((ins SReg_128:$srsrc), SOffset, (ins Offset:$offset));
dag CPol = !if(vdata_in, (ins CPol_GLC_WithDefault:$cpol),
(ins CPol_NonGLC_WithDefault:$cpol));
@@ -697,20 +698,20 @@ class getMUBUFAtomicInsDA<RegisterClass vdataClass, bit vdata_in, bit hasGFX12En
class getMUBUFAtomicIns<int addrKind,
RegisterClass vdataClass,
bit vdata_in,
- bit hasGFX12Enc,
+ bit hasRestrictedSOffset,
// Workaround bug bz30254
RegisterClass vdataClassCopy=vdataClass> {
dag ret =
!if(!eq(addrKind, BUFAddrKind.Offset),
- getMUBUFAtomicInsDA<vdataClassCopy, vdata_in, hasGFX12Enc>.ret,
+ getMUBUFAtomicInsDA<vdataClassCopy, vdata_in, hasRestrictedSOffset>.ret,
!if(!eq(addrKind, BUFAddrKind.OffEn),
- getMUBUFAtomicInsDA<vdataClassCopy, vdata_in, hasGFX12Enc, [VGPR_32]>.ret,
+ getMUBUFAtomicInsDA<vdataClassCopy, vdata_in, hasRestrictedSOffset, [VGPR_32]>.ret,
!if(!eq(addrKind, BUFAddrKind.IdxEn),
- getMUBUFAtomicInsDA<vdataClassCopy, vdata_in, hasGFX12Enc, [VGPR_32]>.ret,
+ getMUBUFAtomicInsDA<vdataClassCopy, vdata_in, hasRestrictedSOffset, [VGPR_32]>.ret,
!if(!eq(addrKind, BUFAddrKind.BothEn),
- getMUBUFAtomicInsDA<vdataClassCopy, vdata_in, hasGFX12Enc, [VReg_64]>.ret,
+ getMUBUFAtomicInsDA<vdataClassCopy, vdata_in, hasRestrictedSOffset, [VReg_64]>.ret,
!if(!eq(addrKind, BUFAddrKind.Addr64),
- getMUBUFAtomicInsDA<vdataClassCopy, vdata_in, hasGFX12Enc, [VReg_64]>.ret,
+ getMUBUFAtomicInsDA<vdataClassCopy, vdata_in, hasRestrictedSOffset, [VReg_64]>.ret,
(ins))))));
}
@@ -737,17 +738,16 @@ class MUBUF_Atomic_Pseudo<string opName,
class MUBUF_AtomicNoRet_Pseudo<string opName, int addrKind,
RegisterClass vdataClass,
- bit hasGFX12Enc = 0,
+ bit hasRestrictedSOffset = 0,
list<dag> pattern=[],
// Workaround bug bz30254
int addrKindCopy = addrKind,
RegisterClass vdataClassCopy = vdataClass>
: MUBUF_Atomic_Pseudo<opName, addrKindCopy,
(outs),
- getMUBUFAtomicIns<addrKindCopy, vdataClassCopy, 0, hasGFX12Enc>.ret,
+ getMUBUFAtomicIns<addrKindCopy, vdataClassCopy, 0, hasRestrictedSOffset>.ret,
getMUBUFAsmOps<addrKindCopy>.ret,
- pattern>,
- AtomicNoRet<opName # "_" # getAddrName<addrKindCopy>.ret, 0> {
+ pattern> {
let PseudoInstr = opName # "_" # getAddrName<addrKindCopy>.ret;
let glc_value = 0;
let dlc_value = 0;
@@ -757,7 +757,7 @@ class MUBUF_AtomicNoRet_Pseudo<string opName, int addrKind,
class MUBUF_AtomicRet_Pseudo<string opName, int addrKind,
RegisterClass vdataClass,
- bit hasGFX12Enc = 0,
+ bit hasRestrictedSOffset = 0,
list<dag> pattern=[],
// Workaround bug bz30254
int addrKindCopy = addrKind,
@@ -765,10 +765,9 @@ class MUBUF_AtomicRet_Pseudo<string opName, int addrKind,
RegisterOperand vdata_op = getLdStRegisterOperand<vdataClass>.ret>
: MUBUF_Atomic_Pseudo<opName, addrKindCopy,
(outs vdata_op:$vdata),
- getMUBUFAtomicIns<addrKindCopy, vdataClassCopy, 1, hasGFX12Enc>.ret,
+ getMUBUFAtomicIns<addrKindCopy, vdataClassCopy, 1, hasRestrictedSOffset>.ret,
getMUBUFAsmOps<addrKindCopy>.ret,
- pattern>,
- AtomicNoRet<opName # "_" # getAddrName<addrKindCopy>.ret, 1> {
+ pattern> {
let PseudoInstr = opName # "_rtn_" # getAddrName<addrKindCopy>.ret;
let glc_value = 1;
let dlc_value = 0;
@@ -2508,36 +2507,28 @@ class MUBUF_Real_Atomic_gfx12_impl<bits<8> op, string ps_name,
multiclass MUBUF_Real_Atomic_gfx11_Renamed_impl<bits<8> op, bit is_return,
string real_name> {
- defvar Rtn = !if(!eq(is_return, 1), "_RTN", "");
+ defvar Rtn = !if(is_return, "_RTN", "");
def _BOTHEN#Rtn#_gfx11 :
- MUBUF_Real_Atomic_gfx11_impl<op, NAME # "_BOTHEN" # Rtn, real_name>,
- AtomicNoRet<NAME # "_BOTHEN_gfx11", is_return>;
+ MUBUF_Real_Atomic_gfx11_impl<op, NAME # "_BOTHEN" # Rtn, real_name>;
def _IDXEN#Rtn#_gfx11 :
- MUBUF_Real_Atomic_gfx11_impl<op, NAME # "_IDXEN" # Rtn, real_name>,
- AtomicNoRet<NAME # "_IDXEN_gfx11", is_return>;
+ MUBUF_Real_Atomic_gfx11_impl<op, NAME # "_IDXEN" # Rtn, real_name>;
def _OFFEN#Rtn#_gfx11 :
- MUBUF_Real_Atomic_gfx11_impl<op, NAME # "_OFFEN" # Rtn, real_name>,
- AtomicNoRet<NAME # "_OFFEN_gfx11", is_return>;
+ MUBUF_Real_Atomic_gfx11_impl<op, NAME # "_OFFEN" # Rtn, real_name>;
def _OFFSET#Rtn#_gfx11 :
- MUBUF_Real_Atomic_gfx11_impl<op, NAME # "_OFFSET" # Rtn, real_name>,
- AtomicNoRet<NAME # "_OFFSET_gfx11", is_return>;
+ MUBUF_Real_Atomic_gfx11_impl<op, NAME # "_OFFSET" # Rtn, real_name>;
}
multiclass MUBUF_Real_Atomic_gfx12_Renamed_impl<bits<8> op, bit is_return,
string real_name> {
- defvar Rtn = !if(!eq(is_return, 1), "_RTN", "");
+ defvar Rtn = !if(is_return, "_RTN", "");
def _BOTHEN#Rtn#_gfx12 :
- MUBUF_Real_Atomic_gfx12_impl<op, NAME # "_VBUFFER_BOTHEN" # Rtn, real_name>,
- AtomicNoRet<NAME # "_BOTHEN_gfx12", is_return>;
+ MUBUF_Real_Atomic_gfx12_impl<op, NAME # "_VBUFFER_BOTHEN" # Rtn, real_name>;
def _IDXEN#Rtn#_gfx12 :
- MUBUF_Real_Atomic_gfx12_impl<op, NAME # "_VBUFFER_IDXEN" # Rtn, real_name>,
- AtomicNoRet<NAME # "_IDXEN_gfx12", is_return>;
+ MUBUF_Real_Atomic_gfx12_impl<op, NAME # "_VBUFFER_IDXEN" # Rtn, real_name>;
def _OFFEN#Rtn#_gfx12 :
- MUBUF_Real_Atomic_gfx12_impl<op, NAME # "_VBUFFER_OFFEN" # Rtn, real_name>,
- AtomicNoRet<NAME # "_OFFEN_gfx12", is_return>;
+ MUBUF_Real_Atomic_gfx12_impl<op, NAME # "_VBUFFER_OFFEN" # Rtn, real_name>;
def _OFFSET#Rtn#_gfx12 :
- MUBUF_Real_Atomic_gfx12_impl<op, NAME # "_VBUFFER_OFFSET" # Rtn, real_name>,
- AtomicNoRet<NAME # "_OFFSET_gfx12", is_return>;
+ MUBUF_Real_Atomic_gfx12_impl<op, NAME # "_VBUFFER_OFFSET" # Rtn, real_name>;
}
multiclass MUBUF_Real_Atomic_gfx11_gfx12_Renamed_impl<bits<8> op, bit is_return,
@@ -2694,32 +2685,24 @@ multiclass MUBUF_Real_AllAddr_Lds_gfx10<bits<8> op, bit isTFE = 0> {
}
multiclass MUBUF_Real_Atomics_RTN_gfx10<bits<8> op> {
def _BOTHEN_RTN_gfx10 :
- MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_BOTHEN_RTN")>,
- AtomicNoRet<NAME # "_BOTHEN_gfx10", 1>;
+ MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_BOTHEN_RTN")>;
def _IDXEN_RTN_gfx10 :
- MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_IDXEN_RTN")>,
- AtomicNoRet<NAME # "_IDXEN_gfx10", 1>;
+ MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_IDXEN_RTN")>;
def _OFFEN_RTN_gfx10 :
- MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_OFFEN_RTN")>,
- AtomicNoRet<NAME # "_OFFEN_gfx10", 1>;
+ MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_OFFEN_RTN")>;
def _OFFSET_RTN_gfx10 :
- MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_OFFSET_RTN")>,
- AtomicNoRet<NAME # "_OFFSET_gfx10", 1>;
+ MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_OFFSET_RTN")>;
}
multiclass MUBUF_Real_Atomics_gfx10<bits<8> op> :
MUBUF_Real_Atomics_RTN_gfx10<op> {
def _BOTHEN_gfx10 :
- MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_BOTHEN")>,
- AtomicNoRet<NAME # "_BOTHEN_gfx10", 0>;
+ MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_BOTHEN")>;
def _IDXEN_gfx10 :
- MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_IDXEN")>,
- AtomicNoRet<NAME # "_IDXEN_gfx10", 0>;
+ MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_IDXEN")>;
def _OFFEN_gfx10 :
- MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_OFFEN")>,
- AtomicNoRet<NAME # "_OFFEN_gfx10", 0>;
+ MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_OFFEN")>;
def _OFFSET_gfx10 :
- MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_OFFSET")>,
- AtomicNoRet<NAME # "_OFFSET_gfx10", 0>;
+ MUBUF_Real_gfx10<op, !cast<MUBUF_Pseudo>(NAME#"_OFFSET")>;
}
defm BUFFER_STORE_BYTE_D16_HI : MUBUF_Real_AllAddr_gfx10<0x019>;
@@ -2794,36 +2777,26 @@ multiclass MUBUF_Real_AllAddr_Lds_gfx6_gfx7<bits<8> op, bit isTFE = 0> {
}
multiclass MUBUF_Real_Atomics_gfx6_gfx7<bits<8> op> {
def _ADDR64_gfx6_gfx7 :
- MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_ADDR64")>,
- AtomicNoRet<NAME # "_ADDR64_gfx6_gfx7", 0>;
+ MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_ADDR64")>;
def _BOTHEN_gfx6_gfx7 :
- MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_BOTHEN")>,
- AtomicNoRet<NAME # "_BOTHEN_gfx6_gfx7", 0>;
+ MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_BOTHEN")>;
def _IDXEN_gfx6_gfx7 :
- MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_IDXEN")>,
- AtomicNoRet<NAME # "_IDXEN_gfx6_gfx7", 0>;
+ MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_IDXEN")>;
def _OFFEN_gfx6_gfx7 :
- MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_OFFEN")>,
- AtomicNoRet<NAME # "_OFFEN_gfx6_gfx7", 0>;
+ MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_OFFEN")>;
def _OFFSET_gfx6_gfx7 :
- MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_OFFSET")>,
- AtomicNoRet<NAME # "_OFFSET_gfx6_gfx7", 0>;
+ MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_OFFSET")>;
def _ADDR64_RTN_gfx6_gfx7 :
- MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_ADDR64_RTN")>,
- AtomicNoRet<NAME # "_ADDR64_gfx6_gfx7", 1>;
+ MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_ADDR64_RTN")>;
def _BOTHEN_RTN_gfx6_gfx7 :
- MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_BOTHEN_RTN")>,
- AtomicNoRet<NAME # "_BOTHEN_gfx6_gfx7", 1>;
+ MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_BOTHEN_RTN")>;
def _IDXEN_RTN_gfx6_gfx7 :
- MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_IDXEN_RTN")>,
- AtomicNoRet<NAME # "_IDXEN_gfx6_gfx7", 1>;
+ MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_IDXEN_RTN")>;
def _OFFEN_RTN_gfx6_gfx7 :
- MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_OFFEN_RTN")>,
- AtomicNoRet<NAME # "_OFFEN_gfx6_gfx7", 1>;
+ MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_OFFEN_RTN")>;
def _OFFSET_RTN_gfx6_gfx7 :
- MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_OFFSET_RTN")>,
- AtomicNoRet<NAME # "_OFFSET_gfx6_gfx7", 1>;
+ MUBUF_Real_gfx6_gfx7<op, !cast<MUBUF_Pseudo>(NAME#"_OFFSET_RTN")>;
}
multiclass MUBUF_Real_AllAddr_gfx6_gfx7_gfx10<bits<8> op> :
@@ -3080,9 +3053,7 @@ class MUBUF_Real_Base_vi <bits<7> op, MUBUF_Pseudo ps, int Enc,
bit has_sccb = ps.has_sccb> :
MUBUF_Real<ps>,
Enc64,
- SIMCInstr<ps.PseudoInstr, Enc>,
- AtomicNoRet<!subst("_RTN","",NAME), !if(ps.IsAtomicNoRet, 0,
- !if(ps.IsAtomicRet, 1, ?))> {
+ SIMCInstr<ps.PseudoInstr, Enc> {
let Inst{11-0} = !if(ps.has_offset, offset, ?);
let Inst{12} = ps.offen;
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index 074e13317ef8..219ff37b0a15 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -63,6 +63,7 @@ class DS_Real <DS_Pseudo ps, string opName = ps.Mnemonic> :
// copy relevant pseudo op flags
let GWS = ps.GWS;
let SubtargetPredicate = ps.SubtargetPredicate;
+ let WaveSizePredicate = ps.WaveSizePredicate;
let OtherPredicates = ps.OtherPredicates;
let SchedRW = ps.SchedRW;
let mayLoad = ps.mayLoad;
@@ -115,19 +116,16 @@ class DS_1A1D_NORET<string opName, RegisterClass rc = VGPR_32>
}
multiclass DS_1A1D_NORET_mc<string opName, RegisterClass rc = VGPR_32> {
- def "" : DS_1A1D_NORET<opName, rc>,
- AtomicNoRet<opName, 0>;
+ def "" : DS_1A1D_NORET<opName, rc>;
let has_m0_read = 0 in {
- def _gfx9 : DS_1A1D_NORET<opName, rc>,
- AtomicNoRet<opName#"_gfx9", 0>;
+ def _gfx9 : DS_1A1D_NORET<opName, rc>;
}
}
multiclass DS_1A1D_NORET_mc_gfx9<string opName, RegisterClass rc = VGPR_32> {
let has_m0_read = 0 in {
- def "" : DS_1A1D_NORET<opName, rc>,
- AtomicNoRet<opName, 0>;
+ def "" : DS_1A1D_NORET<opName, rc>;
}
}
@@ -143,12 +141,10 @@ class DS_1A2D_NORET<string opName, RegisterClass rc = VGPR_32,
}
multiclass DS_1A2D_NORET_mc<string opName, RegisterClass rc = VGPR_32> {
- def "" : DS_1A2D_NORET<opName, rc>,
- AtomicNoRet<opName, 0>;
+ def "" : DS_1A2D_NORET<opName, rc>;
let has_m0_read = 0 in {
- def _gfx9 : DS_1A2D_NORET<opName, rc>,
- AtomicNoRet<opName#"_gfx9", 0>;
+ def _gfx9 : DS_1A2D_NORET<opName, rc>;
}
}
@@ -199,24 +195,17 @@ class DS_1A1D_RET <string opName, RegisterClass rc = VGPR_32,
let IsAtomicRet = 1;
}
-multiclass DS_1A1D_RET_mc <string opName, RegisterClass rc = VGPR_32,
- string NoRetOp = ""> {
- def "" : DS_1A1D_RET<opName, rc>,
- AtomicNoRet<NoRetOp, !ne(NoRetOp, "")>;
+multiclass DS_1A1D_RET_mc <string opName, RegisterClass rc = VGPR_32> {
+ def "" : DS_1A1D_RET<opName, rc>;
let has_m0_read = 0 in {
- def _gfx9 : DS_1A1D_RET<opName, rc>,
- AtomicNoRet<!if(!eq(NoRetOp, ""), "", NoRetOp#"_gfx9"),
- !ne(NoRetOp, "")>;
+ def _gfx9 : DS_1A1D_RET<opName, rc>;
}
}
-multiclass DS_1A1D_RET_mc_gfx9 <string opName, RegisterClass rc = VGPR_32,
- string NoRetOp = ""> {
+multiclass DS_1A1D_RET_mc_gfx9 <string opName, RegisterClass rc = VGPR_32> {
let has_m0_read = 0 in {
- def "" : DS_1A1D_RET<opName, rc>,
- AtomicNoRet<!if(!eq(NoRetOp, ""), "", NoRetOp),
- !ne(NoRetOp, "")>;
+ def "" : DS_1A1D_RET<opName, rc>;
}
}
@@ -236,14 +225,11 @@ class DS_1A2D_RET<string opName,
multiclass DS_1A2D_RET_mc<string opName,
RegisterClass rc = VGPR_32,
- string NoRetOp = "",
RegisterClass src = rc> {
- def "" : DS_1A2D_RET<opName, rc, src>,
- AtomicNoRet<NoRetOp, !ne(NoRetOp, "")>;
+ def "" : DS_1A2D_RET<opName, rc, src>;
let has_m0_read = 0 in {
- def _gfx9 : DS_1A2D_RET<opName, rc, src>,
- AtomicNoRet<NoRetOp#"_gfx9", !ne(NoRetOp, "")>;
+ def _gfx9 : DS_1A2D_RET<opName, rc, src>;
}
}
@@ -488,24 +474,24 @@ def DS_WRITE_ADDTID_B32 : DS_0A1D_NORET<"ds_write_addtid_b32">;
let SubtargetPredicate = HasLdsAtomicAddF64 in {
defm DS_ADD_F64 : DS_1A1D_NORET_mc_gfx9<"ds_add_f64", VReg_64>;
- defm DS_ADD_RTN_F64 : DS_1A1D_RET_mc_gfx9<"ds_add_rtn_f64", VReg_64, "ds_add_f64">;
+ defm DS_ADD_RTN_F64 : DS_1A1D_RET_mc_gfx9<"ds_add_rtn_f64", VReg_64>;
} // End SubtargetPredicate = HasLdsAtomicAddF64
let SubtargetPredicate = HasAtomicDsPkAdd16Insts in {
defm DS_PK_ADD_F16 : DS_1A1D_NORET_mc<"ds_pk_add_f16">;
- defm DS_PK_ADD_RTN_F16 : DS_1A1D_RET_mc<"ds_pk_add_rtn_f16", VGPR_32, "ds_pk_add_f16">;
+ defm DS_PK_ADD_RTN_F16 : DS_1A1D_RET_mc<"ds_pk_add_rtn_f16", VGPR_32>;
defm DS_PK_ADD_BF16 : DS_1A1D_NORET_mc<"ds_pk_add_bf16">;
- defm DS_PK_ADD_RTN_BF16 : DS_1A1D_RET_mc<"ds_pk_add_rtn_bf16", VGPR_32, "ds_pk_add_bf16">;
+ defm DS_PK_ADD_RTN_BF16 : DS_1A1D_RET_mc<"ds_pk_add_rtn_bf16", VGPR_32>;
} // End SubtargetPredicate = HasAtomicDsPkAdd16Insts
defm DS_CMPSTORE_B32 : DS_1A2D_NORET_mc<"ds_cmpstore_b32">;
defm DS_CMPSTORE_F32 : DS_1A2D_NORET_mc<"ds_cmpstore_f32">;
defm DS_CMPSTORE_B64 : DS_1A2D_NORET_mc<"ds_cmpstore_b64", VReg_64>;
defm DS_CMPSTORE_F64 : DS_1A2D_NORET_mc<"ds_cmpstore_f64", VReg_64>;
-defm DS_CMPSTORE_RTN_B32 : DS_1A2D_RET_mc<"ds_cmpstore_rtn_b32", VGPR_32, "ds_cmpstore_b32">;
-defm DS_CMPSTORE_RTN_F32 : DS_1A2D_RET_mc<"ds_cmpstore_rtn_f32", VGPR_32, "ds_cmpstore_f32">;
-defm DS_CMPSTORE_RTN_B64 : DS_1A2D_RET_mc<"ds_cmpstore_rtn_b64", VReg_64, "ds_cmpstore_b64">;
-defm DS_CMPSTORE_RTN_F64 : DS_1A2D_RET_mc<"ds_cmpstore_rtn_f64", VReg_64, "ds_cmpstore_f64">;
+defm DS_CMPSTORE_RTN_B32 : DS_1A2D_RET_mc<"ds_cmpstore_rtn_b32", VGPR_32>;
+defm DS_CMPSTORE_RTN_F32 : DS_1A2D_RET_mc<"ds_cmpstore_rtn_f32", VGPR_32>;
+defm DS_CMPSTORE_RTN_B64 : DS_1A2D_RET_mc<"ds_cmpstore_rtn_b64", VReg_64>;
+defm DS_CMPSTORE_RTN_F64 : DS_1A2D_RET_mc<"ds_cmpstore_rtn_f64", VReg_64>;
defm DS_MSKOR_B32 : DS_1A2D_NORET_mc<"ds_mskor_b32">;
defm DS_CMPST_B32 : DS_1A2D_NORET_mc<"ds_cmpst_b32">;
@@ -534,49 +520,49 @@ defm DS_CMPST_F64 : DS_1A2D_NORET_mc<"ds_cmpst_f64", VReg_64>;
defm DS_MIN_F64 : DS_1A1D_NORET_mc<"ds_min_f64", VReg_64>;
defm DS_MAX_F64 : DS_1A1D_NORET_mc<"ds_max_f64", VReg_64>;
-defm DS_ADD_RTN_U32 : DS_1A1D_RET_mc<"ds_add_rtn_u32", VGPR_32, "ds_add_u32">;
+defm DS_ADD_RTN_U32 : DS_1A1D_RET_mc<"ds_add_rtn_u32", VGPR_32>;
let SubtargetPredicate = HasLDSFPAtomicAdd in {
-defm DS_ADD_RTN_F32 : DS_1A1D_RET_mc<"ds_add_rtn_f32", VGPR_32, "ds_add_f32">;
-}
-defm DS_SUB_RTN_U32 : DS_1A1D_RET_mc<"ds_sub_rtn_u32", VGPR_32, "ds_sub_u32">;
-defm DS_RSUB_RTN_U32 : DS_1A1D_RET_mc<"ds_rsub_rtn_u32", VGPR_32, "ds_rsub_u32">;
-defm DS_INC_RTN_U32 : DS_1A1D_RET_mc<"ds_inc_rtn_u32", VGPR_32, "ds_inc_u32">;
-defm DS_DEC_RTN_U32 : DS_1A1D_RET_mc<"ds_dec_rtn_u32", VGPR_32, "ds_dec_u32">;
-defm DS_MIN_RTN_I32 : DS_1A1D_RET_mc<"ds_min_rtn_i32", VGPR_32, "ds_min_i32">;
-defm DS_MAX_RTN_I32 : DS_1A1D_RET_mc<"ds_max_rtn_i32", VGPR_32, "ds_max_i32">;
-defm DS_MIN_RTN_U32 : DS_1A1D_RET_mc<"ds_min_rtn_u32", VGPR_32, "ds_min_u32">;
-defm DS_MAX_RTN_U32 : DS_1A1D_RET_mc<"ds_max_rtn_u32", VGPR_32, "ds_max_u32">;
-defm DS_AND_RTN_B32 : DS_1A1D_RET_mc<"ds_and_rtn_b32", VGPR_32, "ds_and_b32">;
-defm DS_OR_RTN_B32 : DS_1A1D_RET_mc<"ds_or_rtn_b32", VGPR_32, "ds_or_b32">;
-defm DS_XOR_RTN_B32 : DS_1A1D_RET_mc<"ds_xor_rtn_b32", VGPR_32, "ds_xor_b32">;
-defm DS_MSKOR_RTN_B32 : DS_1A2D_RET_mc<"ds_mskor_rtn_b32", VGPR_32, "ds_mskor_b32">;
-defm DS_CMPST_RTN_B32 : DS_1A2D_RET_mc<"ds_cmpst_rtn_b32", VGPR_32, "ds_cmpst_b32">;
-defm DS_CMPST_RTN_F32 : DS_1A2D_RET_mc<"ds_cmpst_rtn_f32", VGPR_32, "ds_cmpst_f32">;
-defm DS_MIN_RTN_F32 : DS_1A1D_RET_mc<"ds_min_rtn_f32", VGPR_32, "ds_min_f32">;
-defm DS_MAX_RTN_F32 : DS_1A1D_RET_mc<"ds_max_rtn_f32", VGPR_32, "ds_max_f32">;
+defm DS_ADD_RTN_F32 : DS_1A1D_RET_mc<"ds_add_rtn_f32", VGPR_32>;
+}
+defm DS_SUB_RTN_U32 : DS_1A1D_RET_mc<"ds_sub_rtn_u32", VGPR_32>;
+defm DS_RSUB_RTN_U32 : DS_1A1D_RET_mc<"ds_rsub_rtn_u32", VGPR_32>;
+defm DS_INC_RTN_U32 : DS_1A1D_RET_mc<"ds_inc_rtn_u32", VGPR_32>;
+defm DS_DEC_RTN_U32 : DS_1A1D_RET_mc<"ds_dec_rtn_u32", VGPR_32>;
+defm DS_MIN_RTN_I32 : DS_1A1D_RET_mc<"ds_min_rtn_i32", VGPR_32>;
+defm DS_MAX_RTN_I32 : DS_1A1D_RET_mc<"ds_max_rtn_i32", VGPR_32>;
+defm DS_MIN_RTN_U32 : DS_1A1D_RET_mc<"ds_min_rtn_u32", VGPR_32>;
+defm DS_MAX_RTN_U32 : DS_1A1D_RET_mc<"ds_max_rtn_u32", VGPR_32>;
+defm DS_AND_RTN_B32 : DS_1A1D_RET_mc<"ds_and_rtn_b32", VGPR_32>;
+defm DS_OR_RTN_B32 : DS_1A1D_RET_mc<"ds_or_rtn_b32", VGPR_32>;
+defm DS_XOR_RTN_B32 : DS_1A1D_RET_mc<"ds_xor_rtn_b32", VGPR_32>;
+defm DS_MSKOR_RTN_B32 : DS_1A2D_RET_mc<"ds_mskor_rtn_b32", VGPR_32>;
+defm DS_CMPST_RTN_B32 : DS_1A2D_RET_mc<"ds_cmpst_rtn_b32", VGPR_32>;
+defm DS_CMPST_RTN_F32 : DS_1A2D_RET_mc<"ds_cmpst_rtn_f32", VGPR_32>;
+defm DS_MIN_RTN_F32 : DS_1A1D_RET_mc<"ds_min_rtn_f32", VGPR_32>;
+defm DS_MAX_RTN_F32 : DS_1A1D_RET_mc<"ds_max_rtn_f32", VGPR_32>;
defm DS_WRXCHG_RTN_B32 : DS_1A1D_RET_mc<"ds_wrxchg_rtn_b32">;
defm DS_WRXCHG2_RTN_B32 : DS_1A2D_Off8_RET_mc<"ds_wrxchg2_rtn_b32", VReg_64, VGPR_32>;
defm DS_WRXCHG2ST64_RTN_B32 : DS_1A2D_Off8_RET_mc<"ds_wrxchg2st64_rtn_b32", VReg_64, VGPR_32>;
-defm DS_ADD_RTN_U64 : DS_1A1D_RET_mc<"ds_add_rtn_u64", VReg_64, "ds_add_u64">;
-defm DS_SUB_RTN_U64 : DS_1A1D_RET_mc<"ds_sub_rtn_u64", VReg_64, "ds_sub_u64">;
-defm DS_RSUB_RTN_U64 : DS_1A1D_RET_mc<"ds_rsub_rtn_u64", VReg_64, "ds_rsub_u64">;
-defm DS_INC_RTN_U64 : DS_1A1D_RET_mc<"ds_inc_rtn_u64", VReg_64, "ds_inc_u64">;
-defm DS_DEC_RTN_U64 : DS_1A1D_RET_mc<"ds_dec_rtn_u64", VReg_64, "ds_dec_u64">;
-defm DS_MIN_RTN_I64 : DS_1A1D_RET_mc<"ds_min_rtn_i64", VReg_64, "ds_min_i64">;
-defm DS_MAX_RTN_I64 : DS_1A1D_RET_mc<"ds_max_rtn_i64", VReg_64, "ds_max_i64">;
-defm DS_MIN_RTN_U64 : DS_1A1D_RET_mc<"ds_min_rtn_u64", VReg_64, "ds_min_u64">;
-defm DS_MAX_RTN_U64 : DS_1A1D_RET_mc<"ds_max_rtn_u64", VReg_64, "ds_max_u64">;
-defm DS_AND_RTN_B64 : DS_1A1D_RET_mc<"ds_and_rtn_b64", VReg_64, "ds_and_b64">;
-defm DS_OR_RTN_B64 : DS_1A1D_RET_mc<"ds_or_rtn_b64", VReg_64, "ds_or_b64">;
-defm DS_XOR_RTN_B64 : DS_1A1D_RET_mc<"ds_xor_rtn_b64", VReg_64, "ds_xor_b64">;
-defm DS_MSKOR_RTN_B64 : DS_1A2D_RET_mc<"ds_mskor_rtn_b64", VReg_64, "ds_mskor_b64">;
-defm DS_CMPST_RTN_B64 : DS_1A2D_RET_mc<"ds_cmpst_rtn_b64", VReg_64, "ds_cmpst_b64">;
-defm DS_CMPST_RTN_F64 : DS_1A2D_RET_mc<"ds_cmpst_rtn_f64", VReg_64, "ds_cmpst_f64">;
-defm DS_MIN_RTN_F64 : DS_1A1D_RET_mc<"ds_min_rtn_f64", VReg_64, "ds_min_f64">;
-defm DS_MAX_RTN_F64 : DS_1A1D_RET_mc<"ds_max_rtn_f64", VReg_64, "ds_max_f64">;
+defm DS_ADD_RTN_U64 : DS_1A1D_RET_mc<"ds_add_rtn_u64", VReg_64>;
+defm DS_SUB_RTN_U64 : DS_1A1D_RET_mc<"ds_sub_rtn_u64", VReg_64>;
+defm DS_RSUB_RTN_U64 : DS_1A1D_RET_mc<"ds_rsub_rtn_u64", VReg_64>;
+defm DS_INC_RTN_U64 : DS_1A1D_RET_mc<"ds_inc_rtn_u64", VReg_64>;
+defm DS_DEC_RTN_U64 : DS_1A1D_RET_mc<"ds_dec_rtn_u64", VReg_64>;
+defm DS_MIN_RTN_I64 : DS_1A1D_RET_mc<"ds_min_rtn_i64", VReg_64>;
+defm DS_MAX_RTN_I64 : DS_1A1D_RET_mc<"ds_max_rtn_i64", VReg_64>;
+defm DS_MIN_RTN_U64 : DS_1A1D_RET_mc<"ds_min_rtn_u64", VReg_64>;
+defm DS_MAX_RTN_U64 : DS_1A1D_RET_mc<"ds_max_rtn_u64", VReg_64>;
+defm DS_AND_RTN_B64 : DS_1A1D_RET_mc<"ds_and_rtn_b64", VReg_64>;
+defm DS_OR_RTN_B64 : DS_1A1D_RET_mc<"ds_or_rtn_b64", VReg_64>;
+defm DS_XOR_RTN_B64 : DS_1A1D_RET_mc<"ds_xor_rtn_b64", VReg_64>;
+defm DS_MSKOR_RTN_B64 : DS_1A2D_RET_mc<"ds_mskor_rtn_b64", VReg_64>;
+defm DS_CMPST_RTN_B64 : DS_1A2D_RET_mc<"ds_cmpst_rtn_b64", VReg_64>;
+defm DS_CMPST_RTN_F64 : DS_1A2D_RET_mc<"ds_cmpst_rtn_f64", VReg_64>;
+defm DS_MIN_RTN_F64 : DS_1A1D_RET_mc<"ds_min_rtn_f64", VReg_64>;
+defm DS_MAX_RTN_F64 : DS_1A1D_RET_mc<"ds_max_rtn_f64", VReg_64>;
defm DS_WRXCHG_RTN_B64 : DS_1A1D_RET_mc<"ds_wrxchg_rtn_b64", VReg_64>;
defm DS_WRXCHG2_RTN_B64 : DS_1A2D_Off8_RET_mc<"ds_wrxchg2_rtn_b64", VReg_128, VReg_64>;
@@ -739,9 +725,9 @@ def DS_BVH_STACK_RTN_B32 : DS_BVH_STACK<"ds_bvh_stack_rtn_b32">;
let SubtargetPredicate = isGFX12Plus in {
defm DS_COND_SUB_U32 : DS_1A1D_NORET_mc<"ds_cond_sub_u32">;
-defm DS_COND_SUB_RTN_U32 : DS_1A1D_RET_mc<"ds_cond_sub_rtn_u32", VGPR_32, "ds_cond_sub_u32">;
+defm DS_COND_SUB_RTN_U32 : DS_1A1D_RET_mc<"ds_cond_sub_rtn_u32", VGPR_32>;
defm DS_SUB_CLAMP_U32 : DS_1A1D_NORET_mc<"ds_sub_clamp_u32">;
-defm DS_SUB_CLAMP_RTN_U32 : DS_1A1D_RET_mc<"ds_sub_clamp_rtn_u32", VGPR_32, "ds_sub_clamp_u32">;
+defm DS_SUB_CLAMP_RTN_U32 : DS_1A1D_RET_mc<"ds_sub_clamp_rtn_u32", VGPR_32>;
multiclass DSAtomicRetNoRetPatIntrinsic_mc<DS_Pseudo inst, DS_Pseudo noRetInst,
ValueType vt, string frag> {
@@ -1261,7 +1247,9 @@ defm DS_PK_ADD_RTN_BF16 : DS_Real_gfx12<0x0ab>;
// New aliases added in GFX12 without renaming the instructions.
def : MnemonicAlias<"ds_subrev_u32", "ds_rsub_u32">, Requires<[isGFX12Plus]>;
+def : MnemonicAlias<"ds_subrev_rtn_u32", "ds_rsub_rtn_u32">, Requires<[isGFX12Plus]>;
def : MnemonicAlias<"ds_subrev_u64", "ds_rsub_u64">, Requires<[isGFX12Plus]>;
+def : MnemonicAlias<"ds_subrev_rtn_u64", "ds_rsub_rtn_u64">, Requires<[isGFX12Plus]>;
//===----------------------------------------------------------------------===//
// GFX11.
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index a7082f550ccb..f42d4ae416bd 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -153,6 +153,7 @@ class VFLAT_Real <bits<8> op, FLAT_Pseudo ps, string opName = ps.Mnemonic> :
// copy relevant pseudo op flags
let SubtargetPredicate = ps.SubtargetPredicate;
+ let WaveSizePredicate = ps.WaveSizePredicate;
let AsmMatchConverter = ps.AsmMatchConverter;
let OtherPredicates = ps.OtherPredicates;
let TSFlags = ps.TSFlags;
@@ -540,8 +541,7 @@ multiclass FLAT_Atomic_Pseudo_NO_RTN<
(outs),
(ins VReg_64:$vaddr, data_op:$vdata, flat_offset:$offset, CPol_0:$cpol),
" $vaddr, $vdata$offset$cpol">,
- GlobalSaddrTable<0, opName>,
- AtomicNoRet <opName, 0> {
+ GlobalSaddrTable<0, opName> {
let PseudoInstr = NAME;
let FPAtomic = data_vt.isFP;
let AddedComplexity = -1; // Prefer global atomics if available
@@ -559,8 +559,7 @@ multiclass FLAT_Atomic_Pseudo_RTN<
(outs getLdStRegisterOperand<vdst_rc>.ret:$vdst),
(ins VReg_64:$vaddr, data_op:$vdata, flat_offset:$offset, CPol_GLC1:$cpol),
" $vdst, $vaddr, $vdata$offset$cpol">,
- GlobalSaddrTable<0, opName#"_rtn">,
- AtomicNoRet <opName, 1> {
+ GlobalSaddrTable<0, opName#"_rtn"> {
let FPAtomic = data_vt.isFP;
let AddedComplexity = -1; // Prefer global atomics if available
}
@@ -589,8 +588,7 @@ multiclass FLAT_Global_Atomic_Pseudo_NO_RTN<
(outs),
(ins VReg_64:$vaddr, data_op:$vdata, flat_offset:$offset, CPol_0:$cpol),
" $vaddr, $vdata, off$offset$cpol">,
- GlobalSaddrTable<0, opName>,
- AtomicNoRet <opName, 0> {
+ GlobalSaddrTable<0, opName> {
let has_saddr = 1;
let PseudoInstr = NAME;
let FPAtomic = data_vt.isFP;
@@ -600,8 +598,7 @@ multiclass FLAT_Global_Atomic_Pseudo_NO_RTN<
(outs),
(ins VGPR_32:$vaddr, data_op:$vdata, SReg_64:$saddr, flat_offset:$offset, CPol_0:$cpol),
" $vaddr, $vdata, $saddr$offset$cpol">,
- GlobalSaddrTable<1, opName>,
- AtomicNoRet <opName#"_saddr", 0> {
+ GlobalSaddrTable<1, opName> {
let has_saddr = 1;
let enabled_saddr = 1;
let PseudoInstr = NAME#"_SADDR";
@@ -622,8 +619,7 @@ multiclass FLAT_Global_Atomic_Pseudo_RTN<
(outs vdst_op:$vdst),
(ins VReg_64:$vaddr, data_op:$vdata, flat_offset:$offset, CPol_GLC1:$cpol),
" $vdst, $vaddr, $vdata, off$offset$cpol">,
- GlobalSaddrTable<0, opName#"_rtn">,
- AtomicNoRet <opName, 1> {
+ GlobalSaddrTable<0, opName#"_rtn"> {
let has_saddr = 1;
let FPAtomic = data_vt.isFP;
}
@@ -632,8 +628,7 @@ multiclass FLAT_Global_Atomic_Pseudo_RTN<
(outs vdst_op:$vdst),
(ins VGPR_32:$vaddr, data_op:$vdata, SReg_64:$saddr, flat_offset:$offset, CPol_GLC1:$cpol),
" $vdst, $vaddr, $vdata, $saddr$offset$cpol">,
- GlobalSaddrTable<1, opName#"_rtn">,
- AtomicNoRet <opName#"_saddr", 1> {
+ GlobalSaddrTable<1, opName#"_rtn"> {
let has_saddr = 1;
let enabled_saddr = 1;
let PseudoInstr = NAME#"_SADDR_RTN";
@@ -2751,12 +2746,12 @@ defm GLOBAL_ATOMIC_MIN_NUM_F32 : VGLOBAL_Real_Atomics_gfx12<0x051, "GLOBAL_A
defm GLOBAL_ATOMIC_MAX_NUM_F32 : VGLOBAL_Real_Atomics_gfx12<0x052, "GLOBAL_ATOMIC_FMAX", "global_atomic_max_num_f32", true, "global_atomic_max_f32">;
defm GLOBAL_ATOMIC_ADD_F32 : VGLOBAL_Real_Atomics_gfx12<0x056>;
-let WaveSizePredicate = isWave32, DecoderNamespace = "GFX12" in {
+let DecoderNamespace = "GFX12" in {
defm GLOBAL_LOAD_TR_B128_w32 : VGLOBAL_Real_AllAddr_gfx12<0x057, "GLOBAL_LOAD_TR_B128_w32", "global_load_tr_b128">;
defm GLOBAL_LOAD_TR_B64_w32 : VGLOBAL_Real_AllAddr_gfx12<0x058, "GLOBAL_LOAD_TR_B64_w32", "global_load_tr_b64">;
}
-let WaveSizePredicate = isWave64, DecoderNamespace = "GFX12W64" in {
+let DecoderNamespace = "GFX12W64" in {
defm GLOBAL_LOAD_TR_B128_w64 : VGLOBAL_Real_AllAddr_gfx12<0x057, "GLOBAL_LOAD_TR_B128_w64", "global_load_tr_b128">;
defm GLOBAL_LOAD_TR_B64_w64 : VGLOBAL_Real_AllAddr_gfx12<0x058, "GLOBAL_LOAD_TR_B64_w64", "global_load_tr_b64">;
}
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index f993ec8409c9..9f419a7fbf68 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -713,8 +713,8 @@ bool UnclusteredHighRPStage::initGCNSchedStage() {
return false;
SavedMutations.swap(DAG.Mutations);
- DAG.addMutation(createIGroupLPDAGMutation(
- AMDGPU::SchedulingPhase::PreRAReentry, nullptr));
+ DAG.addMutation(
+ createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PreRAReentry));
InitialOccupancy = DAG.MinOccupancy;
// Aggressivly try to reduce register pressure in the unclustered high RP
@@ -858,8 +858,7 @@ bool GCNSchedStage::initGCNRegion() {
StageID == GCNSchedStageID::ILPInitialSchedule;
DAG.addMutation(createIGroupLPDAGMutation(
IsInitialStage ? AMDGPU::SchedulingPhase::Initial
- : AMDGPU::SchedulingPhase::PreRAReentry,
- &SavedMutations));
+ : AMDGPU::SchedulingPhase::PreRAReentry));
}
return true;
@@ -1573,8 +1572,7 @@ void GCNPostScheduleDAGMILive::schedule() {
if (HasIGLPInstrs) {
SavedMutations.clear();
SavedMutations.swap(Mutations);
- addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA,
- &SavedMutations));
+ addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
}
ScheduleDAGMI::schedule();
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 84ef9679ab95..34c603811532 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1415,6 +1415,23 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
}
}
+void SITargetLowering::CollectTargetIntrinsicOperands(
+ const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG) const {
+ switch (cast<IntrinsicInst>(I).getIntrinsicID()) {
+ case Intrinsic::amdgcn_addrspacecast_nonnull: {
+ // The DAG's ValueType loses the addrspaces.
+ // Add them as 2 extra Constant operands "from" and "to".
+ unsigned SrcAS = I.getOperand(0)->getType()->getPointerAddressSpace();
+ unsigned DstAS = I.getType()->getPointerAddressSpace();
+ Ops.push_back(DAG.getTargetConstant(SrcAS, SDLoc(), MVT::i32));
+ Ops.push_back(DAG.getTargetConstant(DstAS, SDLoc(), MVT::i32));
+ break;
+ }
+ default:
+ break;
+ }
+}
+
bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
SmallVectorImpl<Value*> &Ops,
Type *&AccessTy) const {
@@ -6635,24 +6652,36 @@ static bool isKnownNonNull(SDValue Val, SelectionDAG &DAG,
SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
SelectionDAG &DAG) const {
SDLoc SL(Op);
- const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
-
- SDValue Src = ASC->getOperand(0);
- SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
- unsigned SrcAS = ASC->getSrcAddressSpace();
const AMDGPUTargetMachine &TM =
static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
+ unsigned DestAS, SrcAS;
+ SDValue Src;
+ bool IsNonNull = false;
+ if (const auto *ASC = dyn_cast<AddrSpaceCastSDNode>(Op)) {
+ SrcAS = ASC->getSrcAddressSpace();
+ Src = ASC->getOperand(0);
+ DestAS = ASC->getDestAddressSpace();
+ } else {
+ assert(Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
+ Op.getConstantOperandVal(0) ==
+ Intrinsic::amdgcn_addrspacecast_nonnull);
+ Src = Op->getOperand(1);
+ SrcAS = Op->getConstantOperandVal(2);
+ DestAS = Op->getConstantOperandVal(3);
+ IsNonNull = true;
+ }
+
+ SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
+
// flat -> local/private
if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
- unsigned DestAS = ASC->getDestAddressSpace();
-
if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
- if (isKnownNonNull(Src, DAG, TM, SrcAS))
+ if (IsNonNull || isKnownNonNull(Op, DAG, TM, SrcAS))
return Ptr;
unsigned NullVal = TM.getNullPointerValue(DestAS);
@@ -6665,16 +6694,16 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
}
// local/private -> flat
- if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
+ if (DestAS == AMDGPUAS::FLAT_ADDRESS) {
if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
- SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
+ SDValue Aperture = getSegmentAperture(SrcAS, SL, DAG);
SDValue CvtPtr =
DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
CvtPtr = DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr);
- if (isKnownNonNull(Src, DAG, TM, SrcAS))
+ if (IsNonNull || isKnownNonNull(Op, DAG, TM, SrcAS))
return CvtPtr;
unsigned NullVal = TM.getNullPointerValue(SrcAS);
@@ -6697,7 +6726,7 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
}
- if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
+ if (DestAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
Src.getValueType() == MVT::i64)
return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
@@ -6708,7 +6737,7 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
DAG.getContext()->diagnose(InvalidAddrSpaceCast);
- return DAG.getUNDEF(ASC->getValueType(0));
+ return DAG.getUNDEF(Op->getValueType(0));
}
// This lowers an INSERT_SUBVECTOR by extracting the individual elements from
@@ -8325,6 +8354,8 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
Op.getOperand(3), Op.getOperand(4), Op.getOperand(5),
IndexKeyi32, Op.getOperand(7)});
}
+ case Intrinsic::amdgcn_addrspacecast_nonnull:
+ return lowerADDRSPACECAST(Op, DAG);
default:
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index f6e1d198f40a..fc90a208fa0b 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -305,6 +305,10 @@ public:
MachineFunction &MF,
unsigned IntrinsicID) const override;
+ void CollectTargetIntrinsicOperands(const CallInst &I,
+ SmallVectorImpl<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
+
bool getAddrModeArguments(IntrinsicInst * /*I*/,
SmallVectorImpl<Value*> &/*Ops*/,
Type *&/*AccessTy*/) const override;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index d774826c1d08..82c6117292ae 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -949,6 +949,8 @@ public:
return AMDGPU::S_WAIT_BVHCNT;
case AMDGPU::S_WAIT_DSCNT_soft:
return AMDGPU::S_WAIT_DSCNT;
+ case AMDGPU::S_WAIT_KMCNT_soft:
+ return AMDGPU::S_WAIT_KMCNT;
default:
return Opcode;
}
@@ -1458,9 +1460,6 @@ namespace AMDGPU {
int getIfAddr64Inst(uint16_t Opcode);
LLVM_READONLY
- int getAtomicNoRetOp(uint16_t Opcode);
-
- LLVM_READONLY
int getSOPKOp(uint16_t Opcode);
/// \returns SADDR form of a FLAT Global instruction given an \p Opcode
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 34cdb09b0e15..835a5a247231 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -2591,11 +2591,6 @@ class Commutable_REV <string revOp, bit isOrig> {
bit IsOrig = isOrig;
}
-class AtomicNoRet <string noRetOp, bit isRet> {
- string NoRetOp = noRetOp;
- bit IsRet = isRet;
-}
-
//===----------------------------------------------------------------------===//
// Interpolation opcodes
//===----------------------------------------------------------------------===//
@@ -2766,15 +2761,6 @@ def getIfAddr64Inst : InstrMapping {
let ValueCols = [["1"]];
}
-// Maps an atomic opcode to its returnless version.
-def getAtomicNoRetOp : InstrMapping {
- let FilterClass = "AtomicNoRet";
- let RowFields = ["NoRetOp"];
- let ColFields = ["IsRet"];
- let KeyCol = ["1"];
- let ValueCols = [["0"]];
-}
-
// Maps a GLOBAL to its SADDR form.
def getGlobalSaddrOp : InstrMapping {
let FilterClass = "GlobalSaddrTable";
diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index f62e808b33e4..4069a368f687 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -312,6 +312,10 @@ public:
SIMemOp Op, bool IsVolatile,
bool IsNonTemporal) const = 0;
+ virtual bool expandSystemScopeStore(MachineBasicBlock::iterator &MI) const {
+ return false;
+ };
+
/// Inserts any necessary instructions at position \p Pos relative
/// to instruction \p MI to ensure memory instructions before \p Pos of kind
/// \p Op associated with address spaces \p AddrSpace have completed. Used
@@ -589,6 +593,15 @@ protected:
bool setScope(const MachineBasicBlock::iterator MI,
AMDGPU::CPol::CPol Value) const;
+ // Stores with system scope (SCOPE_SYS) need to wait for:
+ // - loads or atomics(returning) - wait for {LOAD|SAMPLE|BVH|KM}CNT==0
+ // - non-returning-atomics - wait for STORECNT==0
+ // TODO: SIInsertWaitcnts will not always be able to remove STORECNT waits
+ // since it does not distinguish atomics-with-return from regular stores.
+ // There is no need to wait if memory is cached (mtype != UC).
+ bool
+ insertWaitsBeforeSystemScopeStore(const MachineBasicBlock::iterator MI) const;
+
public:
SIGfx12CacheControl(const GCNSubtarget &ST) : SIGfx11CacheControl(ST) {}
@@ -603,6 +616,8 @@ public:
SIAtomicAddrSpace AddrSpace, SIMemOp Op,
bool IsVolatile,
bool IsNonTemporal) const override;
+
+ bool expandSystemScopeStore(MachineBasicBlock::iterator &MI) const override;
};
class SIMemoryLegalizer final : public MachineFunctionPass {
@@ -2194,6 +2209,22 @@ bool SIGfx12CacheControl::setScope(const MachineBasicBlock::iterator MI,
return false;
}
+bool SIGfx12CacheControl::insertWaitsBeforeSystemScopeStore(
+ const MachineBasicBlock::iterator MI) const {
+ // TODO: implement flag for frontend to give us a hint not to insert waits.
+
+ MachineBasicBlock &MBB = *MI->getParent();
+ const DebugLoc &DL = MI->getDebugLoc();
+
+ BuildMI(MBB, MI, DL, TII->get(S_WAIT_LOADCNT_soft)).addImm(0);
+ BuildMI(MBB, MI, DL, TII->get(S_WAIT_SAMPLECNT_soft)).addImm(0);
+ BuildMI(MBB, MI, DL, TII->get(S_WAIT_BVHCNT_soft)).addImm(0);
+ BuildMI(MBB, MI, DL, TII->get(S_WAIT_KMCNT_soft)).addImm(0);
+ BuildMI(MBB, MI, DL, TII->get(S_WAIT_STORECNT_soft)).addImm(0);
+
+ return true;
+}
+
bool SIGfx12CacheControl::insertWait(MachineBasicBlock::iterator &MI,
SIAtomicScope Scope,
SIAtomicAddrSpace AddrSpace, SIMemOp Op,
@@ -2364,6 +2395,9 @@ bool SIGfx12CacheControl::enableVolatileAndOrNonTemporal(
if (IsVolatile) {
Changed |= setScope(MI, AMDGPU::CPol::SCOPE_SYS);
+ if (Op == SIMemOp::STORE)
+ Changed |= insertWaitsBeforeSystemScopeStore(MI);
+
// Ensure operation has completed at system scope to cause all volatile
// operations to be visible outside the program in a global order. Do not
// request cross address space as only the global address space can be
@@ -2381,6 +2415,15 @@ bool SIGfx12CacheControl::enableVolatileAndOrNonTemporal(
return Changed;
}
+bool SIGfx12CacheControl::expandSystemScopeStore(
+ MachineBasicBlock::iterator &MI) const {
+ MachineOperand *CPol = TII->getNamedOperand(*MI, OpName::cpol);
+ if (CPol && ((CPol->getImm() & CPol::SCOPE) == CPol::SCOPE_SYS))
+ return insertWaitsBeforeSystemScopeStore(MI);
+
+ return false;
+}
+
bool SIMemoryLegalizer::removeAtomicPseudoMIs() {
if (AtomicPseudoMIs.empty())
return false;
@@ -2467,6 +2510,10 @@ bool SIMemoryLegalizer::expandStore(const SIMemOpInfo &MOI,
Changed |= CC->enableVolatileAndOrNonTemporal(
MI, MOI.getInstrAddrSpace(), SIMemOp::STORE, MOI.isVolatile(),
MOI.isNonTemporal());
+
+ // GFX12 specific, scope(desired coherence domain in cache hierarchy) is
+ // instruction field, do not confuse it with atomic scope.
+ Changed |= CC->expandSystemScopeStore(MI);
return Changed;
}
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 53fc2c068624..afc380b42034 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -472,12 +472,11 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
}
// Move MI before v_or_b32
- auto MBB = MI.getParent();
- MBB->remove(&MI);
- MBB->insert(getParentInst(), &MI);
+ MI.getParent()->remove(&MI);
+ getParentInst()->getParent()->insert(getParentInst(), &MI);
// Add Implicit use of preserved register
- MachineInstrBuilder MIB(*MBB->getParent(), MI);
+ MachineInstrBuilder MIB(*MI.getMF(), MI);
MIB.addReg(getPreservedOperand()->getReg(),
RegState::ImplicitKill,
getPreservedOperand()->getSubReg());
diff --git a/llvm/lib/Target/AMDGPU/SMInstructions.td b/llvm/lib/Target/AMDGPU/SMInstructions.td
index 29651a839039..a91fb87998fe 100644
--- a/llvm/lib/Target/AMDGPU/SMInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SMInstructions.td
@@ -277,8 +277,7 @@ class SM_Pseudo_Atomic<string opName,
(ins CPolTy:$cpol)),
!if(isRet, " $sdst", " $sdata") #
", $sbase, " # offsets.Asm # "$cpol",
- isRet>,
- AtomicNoRet <opNameWithSuffix, isRet> {
+ isRet> {
let has_offset = offsets.HasOffset;
let has_soffset = offsets.HasSOffset;
let PseudoInstr = opNameWithSuffix;
@@ -662,8 +661,7 @@ defm S_ATC_PROBE_BUFFER : SM_Real_Probe_vi <0x27>;
//===----------------------------------------------------------------------===//
class SMEM_Atomic_Real_vi <bits<8> op, SM_Atomic_Pseudo ps>
- : SMEM_Real_vi <op, ps>,
- AtomicNoRet <!subst("_RTN","",NAME), ps.glc> {
+ : SMEM_Real_vi <op, ps> {
bits<7> sdata;
@@ -1222,8 +1220,7 @@ defm S_ATC_PROBE : SM_Real_Probe_gfx10 <0x26>;
defm S_ATC_PROBE_BUFFER : SM_Real_Probe_gfx10 <0x27>;
class SMEM_Atomic_Real_gfx10 <bits<8> op, SM_Atomic_Pseudo ps>
- : SMEM_Real_gfx10 <op, ps>,
- AtomicNoRet <!subst("_RTN","",NAME), ps.glc> {
+ : SMEM_Real_gfx10 <op, ps> {
bits<7> sdata;
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index 0fe2845f8edc..b5de311f8c58 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -1601,6 +1601,7 @@ let SubtargetPredicate = isGFX12Plus in {
def S_WAIT_SAMPLECNT_soft : SOPP_Pseudo <"s_soft_wait_samplecnt", (ins s16imm:$simm16), "$simm16">;
def S_WAIT_BVHCNT_soft : SOPP_Pseudo <"s_soft_wait_bvhcnt", (ins s16imm:$simm16), "$simm16">;
def S_WAIT_DSCNT_soft : SOPP_Pseudo <"s_soft_wait_dscnt", (ins s16imm:$simm16), "$simm16">;
+ def S_WAIT_KMCNT_soft : SOPP_Pseudo <"s_soft_wait_kmcnt", (ins s16imm:$simm16), "$simm16">;
}
def S_SETHALT : SOPP_Pseudo <"s_sethalt" , (ins i32imm:$simm16), "$simm16",
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 177d99a0ac0a..963dc2882fcc 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -1700,16 +1700,6 @@ int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) {
return (Idx < 0) ? Idx : Opr[Idx].Encoding;
}
-bool isValidHwreg(int64_t Id) { return 0 <= Id && isUInt<HwregId::Width>(Id); }
-
-bool isValidHwregOffset(int64_t Offset) {
- return 0 <= Offset && isUInt<HwregOffset::Width>(Offset);
-}
-
-bool isValidHwregWidth(int64_t Width) {
- return 0 <= (Width - 1) && isUInt<HwregSize::Width>(Width - 1);
-}
-
StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI);
return (Idx < 0) ? "" : Opr[Idx].Name;
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 9a6d0834679e..6edf01d1217f 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -856,14 +856,6 @@ bool isReadOnlySegment(const GlobalValue *GV);
/// target triple \p TT, false otherwise.
bool shouldEmitConstantsToTextSection(const Triple &TT);
-/// \returns Integer value requested using \p F's \p Name attribute.
-///
-/// \returns \p Default if attribute is not present.
-///
-/// \returns \p Default and emits error if requested value cannot be converted
-/// to integer.
-int getIntegerAttribute(const Function &F, StringRef Name, int Default);
-
/// \returns A pair of integer values requested using \p F's \p Name attribute
/// in "first[,second]" format ("second" is optional unless \p OnlyFirstRequired
/// is false).
@@ -1069,15 +1061,6 @@ LLVM_READONLY
int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI);
LLVM_READNONE
-bool isValidHwreg(int64_t Id);
-
-LLVM_READNONE
-bool isValidHwregOffset(int64_t Offset);
-
-LLVM_READNONE
-bool isValidHwregWidth(int64_t Width);
-
-LLVM_READNONE
StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI);
} // namespace Hwreg
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index 918bdb9506b0..fa8d46608f5d 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -190,6 +190,7 @@ class VOP3_Real <VOP_Pseudo ps, int EncodingFamily, string asm_name = ps.Mnemoni
// copy relevant pseudo op flags
let SubtargetPredicate = ps.SubtargetPredicate;
+ let WaveSizePredicate = ps.WaveSizePredicate;
let OtherPredicates = ps.OtherPredicates;
let AsmMatchConverter = ps.AsmMatchConverter;
let AsmVariantName = ps.AsmVariantName;
diff --git a/llvm/lib/Target/ARM/ARM.td b/llvm/lib/Target/ARM/ARM.td
index 877781568307..b62e1a032631 100644
--- a/llvm/lib/Target/ARM/ARM.td
+++ b/llvm/lib/Target/ARM/ARM.td
@@ -1682,6 +1682,7 @@ def : ProcNoItin<"neoverse-n1", [ARMv82a,
def : ProcNoItin<"neoverse-n2", [ARMv9a,
FeatureBF16,
+ FeatureFP16FML,
FeatureMatMulInt8]>;
def : ProcessorModel<"cyclone", SwiftModel, [ARMv8a, ProcSwift,
diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp
index a77aeda96aae..61d2928fe6d4 100644
--- a/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -2953,8 +2953,7 @@ bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
}
unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) {
- bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
-
+ bool UseGOT_PREL = !GV->isDSOLocal();
LLVMContext *Context = &MF->getFunction().getContext();
unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index b98006ed0cb3..06d4a39cde77 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -3921,20 +3921,18 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDLoc dl(Op);
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- const TargetMachine &TM = getTargetMachine();
bool IsRO = isReadOnly(GV);
// promoteToConstantPool only if not generating XO text section
- if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
+ if (GV->isDSOLocal() && !Subtarget->genExecuteOnly())
if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl))
return V;
if (isPositionIndependent()) {
- bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
- SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
- UseGOT_PREL ? ARMII::MO_GOT : 0);
+ SDValue G = DAG.getTargetGlobalAddress(
+ GV, dl, PtrVT, 0, GV->isDSOLocal() ? 0 : ARMII::MO_GOT);
SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
- if (UseGOT_PREL)
+ if (!GV->isDSOLocal())
Result =
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 922fa93226f2..691715dc2963 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -366,8 +366,7 @@ bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue *GV) const {
}
bool ARMSubtarget::isGVInGOT(const GlobalValue *GV) const {
- return isTargetELF() && TM.isPositionIndependent() &&
- !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
+ return isTargetELF() && TM.isPositionIndependent() && !GV->isDSOLocal();
}
unsigned ARMSubtarget::getMispredictionPenalty() const {
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 37bfb76a494d..efec163c6ed6 100644
--- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -506,9 +506,10 @@ class ARMAsmParser : public MCTargetAsmParser {
bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
- unsigned &PredicationCode,
- unsigned &VPTPredicationCode, bool &CarrySetting,
- unsigned &ProcessorIMod, StringRef &ITMask);
+ ARMCC::CondCodes &PredicationCode,
+ ARMVCC::VPTCodes &VPTPredicationCode,
+ bool &CarrySetting, unsigned &ProcessorIMod,
+ StringRef &ITMask);
void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
StringRef FullInst, bool &CanAcceptCarrySet,
bool &CanAcceptPredicationCode,
@@ -6283,10 +6284,9 @@ bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
//
// FIXME: Would be nice to autogen this.
// FIXME: This is a bit of a maze of special cases.
-StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
- StringRef ExtraToken,
- unsigned &PredicationCode,
- unsigned &VPTPredicationCode,
+StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
+ ARMCC::CondCodes &PredicationCode,
+ ARMVCC::VPTCodes &VPTPredicationCode,
bool &CarrySetting,
unsigned &ProcessorIMod,
StringRef &ITMask) {
@@ -6340,7 +6340,7 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
if (CC != ~0U) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
- PredicationCode = CC;
+ PredicationCode = static_cast<ARMCC::CondCodes>(CC);
}
}
@@ -6384,10 +6384,11 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
- unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1));
- if (CC != ~0U) {
+ unsigned VCC =
+ ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size() - 1));
+ if (VCC != ~0U) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
- VPTPredicationCode = CC;
+ VPTPredicationCode = static_cast<ARMVCC::VPTCodes>(VCC);
}
return Mnemonic;
}
@@ -6966,8 +6967,8 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
// Split out the predication code and carry setting flag from the mnemonic.
- unsigned PredicationCode;
- unsigned VPTPredicationCode;
+ ARMCC::CondCodes PredicationCode;
+ ARMVCC::VPTCodes VPTPredicationCode;
unsigned ProcessorIMod;
bool CarrySetting;
StringRef ITMask;
diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td
index 8a3454c89542..33b08ed93e3d 100644
--- a/llvm/lib/Target/DirectX/DXIL.td
+++ b/llvm/lib/Target/DirectX/DXIL.td
@@ -12,139 +12,230 @@
//===----------------------------------------------------------------------===//
include "llvm/IR/Intrinsics.td"
-include "llvm/IR/Attributes.td"
-// Abstract representation of the class a DXIL Operation belongs to.
-class DXILOpClass<string name> {
- string Name = name;
+class DXILOpClass;
+
+// Following is a set of DXIL Operation classes whose names appear to be
+// arbitrary, yet need to be a substring of the function name used during
+// lowering to DXIL Operation calls. These class name strings are specified
+// as the third argument of add_dixil_op in utils/hct/hctdb.py and case converted
+// in utils/hct/hctdb_instrhelp.py of DirectXShaderCompiler repo. The function
+// name has the format "dx.op.<class-name>.<return-type>".
+
+defset list<DXILOpClass> OpClasses = {
+ def acceptHitAndEndSearch : DXILOpClass;
+ def allocateNodeOutputRecords : DXILOpClass;
+ def allocateRayQuery : DXILOpClass;
+ def annotateHandle : DXILOpClass;
+ def annotateNodeHandle : DXILOpClass;
+ def annotateNodeRecordHandle : DXILOpClass;
+ def atomicBinOp : DXILOpClass;
+ def atomicCompareExchange : DXILOpClass;
+ def attributeAtVertex : DXILOpClass;
+ def barrier : DXILOpClass;
+ def barrierByMemoryHandle : DXILOpClass;
+ def barrierByMemoryType : DXILOpClass;
+ def barrierByNodeRecordHandle : DXILOpClass;
+ def binary : DXILOpClass;
+ def binaryWithCarryOrBorrow : DXILOpClass;
+ def binaryWithTwoOuts : DXILOpClass;
+ def bitcastF16toI16 : DXILOpClass;
+ def bitcastF32toI32 : DXILOpClass;
+ def bitcastF64toI64 : DXILOpClass;
+ def bitcastI16toF16 : DXILOpClass;
+ def bitcastI32toF32 : DXILOpClass;
+ def bitcastI64toF64 : DXILOpClass;
+ def bufferLoad : DXILOpClass;
+ def bufferStore : DXILOpClass;
+ def bufferUpdateCounter : DXILOpClass;
+ def calculateLOD : DXILOpClass;
+ def callShader : DXILOpClass;
+ def cbufferLoad : DXILOpClass;
+ def cbufferLoadLegacy : DXILOpClass;
+ def checkAccessFullyMapped : DXILOpClass;
+ def coverage : DXILOpClass;
+ def createHandle : DXILOpClass;
+ def createHandleForLib : DXILOpClass;
+ def createHandleFromBinding : DXILOpClass;
+ def createHandleFromHeap : DXILOpClass;
+ def createNodeInputRecordHandle : DXILOpClass;
+ def createNodeOutputHandle : DXILOpClass;
+ def cutStream : DXILOpClass;
+ def cycleCounterLegacy : DXILOpClass;
+ def discard : DXILOpClass;
+ def dispatchMesh : DXILOpClass;
+ def dispatchRaysDimensions : DXILOpClass;
+ def dispatchRaysIndex : DXILOpClass;
+ def domainLocation : DXILOpClass;
+ def dot2 : DXILOpClass;
+ def dot2AddHalf : DXILOpClass;
+ def dot3 : DXILOpClass;
+ def dot4 : DXILOpClass;
+ def dot4AddPacked : DXILOpClass;
+ def emitIndices : DXILOpClass;
+ def emitStream : DXILOpClass;
+ def emitThenCutStream : DXILOpClass;
+ def evalCentroid : DXILOpClass;
+ def evalSampleIndex : DXILOpClass;
+ def evalSnapped : DXILOpClass;
+ def finishedCrossGroupSharing : DXILOpClass;
+ def flattenedThreadIdInGroup : DXILOpClass;
+ def geometryIndex : DXILOpClass;
+ def getDimensions : DXILOpClass;
+ def getInputRecordCount : DXILOpClass;
+ def getMeshPayload : DXILOpClass;
+ def getNodeRecordPtr : DXILOpClass;
+ def getRemainingRecursionLevels : DXILOpClass;
+ def groupId : DXILOpClass;
+ def gsInstanceID : DXILOpClass;
+ def hitKind : DXILOpClass;
+ def ignoreHit : DXILOpClass;
+ def incrementOutputCount : DXILOpClass;
+ def indexNodeHandle : DXILOpClass;
+ def innerCoverage : DXILOpClass;
+ def instanceID : DXILOpClass;
+ def instanceIndex : DXILOpClass;
+ def isHelperLane : DXILOpClass;
+ def isSpecialFloat : DXILOpClass;
+ def legacyDoubleToFloat : DXILOpClass;
+ def legacyDoubleToSInt32 : DXILOpClass;
+ def legacyDoubleToUInt32 : DXILOpClass;
+ def legacyF16ToF32 : DXILOpClass;
+ def legacyF32ToF16 : DXILOpClass;
+ def loadInput : DXILOpClass;
+ def loadOutputControlPoint : DXILOpClass;
+ def loadPatchConstant : DXILOpClass;
+ def makeDouble : DXILOpClass;
+ def minPrecXRegLoad : DXILOpClass;
+ def minPrecXRegStore : DXILOpClass;
+ def nodeOutputIsValid : DXILOpClass;
+ def objectRayDirection : DXILOpClass;
+ def objectRayOrigin : DXILOpClass;
+ def objectToWorld : DXILOpClass;
+ def outputComplete : DXILOpClass;
+ def outputControlPointID : DXILOpClass;
+ def pack4x8 : DXILOpClass;
+ def primitiveID : DXILOpClass;
+ def primitiveIndex : DXILOpClass;
+ def quadOp : DXILOpClass;
+ def quadReadLaneAt : DXILOpClass;
+ def quadVote : DXILOpClass;
+ def quaternary : DXILOpClass;
+ def rawBufferLoad : DXILOpClass;
+ def rawBufferStore : DXILOpClass;
+ def rayFlags : DXILOpClass;
+ def rayQuery_Abort : DXILOpClass;
+ def rayQuery_CommitNonOpaqueTriangleHit : DXILOpClass;
+ def rayQuery_CommitProceduralPrimitiveHit : DXILOpClass;
+ def rayQuery_Proceed : DXILOpClass;
+ def rayQuery_StateMatrix : DXILOpClass;
+ def rayQuery_StateScalar : DXILOpClass;
+ def rayQuery_StateVector : DXILOpClass;
+ def rayQuery_TraceRayInline : DXILOpClass;
+ def rayTCurrent : DXILOpClass;
+ def rayTMin : DXILOpClass;
+ def renderTargetGetSampleCount : DXILOpClass;
+ def renderTargetGetSamplePosition : DXILOpClass;
+ def reportHit : DXILOpClass;
+ def sample : DXILOpClass;
+ def sampleBias : DXILOpClass;
+ def sampleCmp : DXILOpClass;
+ def sampleCmpBias : DXILOpClass;
+ def sampleCmpGrad : DXILOpClass;
+ def sampleCmpLevel : DXILOpClass;
+ def sampleCmpLevelZero : DXILOpClass;
+ def sampleGrad : DXILOpClass;
+ def sampleIndex : DXILOpClass;
+ def sampleLevel : DXILOpClass;
+ def setMeshOutputCounts : DXILOpClass;
+ def splitDouble : DXILOpClass;
+ def startInstanceLocation : DXILOpClass;
+ def startVertexLocation : DXILOpClass;
+ def storeOutput : DXILOpClass;
+ def storePatchConstant : DXILOpClass;
+ def storePrimitiveOutput : DXILOpClass;
+ def storeVertexOutput : DXILOpClass;
+ def tempRegLoad : DXILOpClass;
+ def tempRegStore : DXILOpClass;
+ def tertiary : DXILOpClass;
+ def texture2DMSGetSamplePosition : DXILOpClass;
+ def textureGather : DXILOpClass;
+ def textureGatherCmp : DXILOpClass;
+ def textureGatherRaw : DXILOpClass;
+ def textureLoad : DXILOpClass;
+ def textureStore : DXILOpClass;
+ def textureStoreSample : DXILOpClass;
+ def threadId : DXILOpClass;
+ def threadIdInGroup : DXILOpClass;
+ def traceRay : DXILOpClass;
+ def unary : DXILOpClass;
+ def unaryBits : DXILOpClass;
+ def unpack4x8 : DXILOpClass;
+ def viewID : DXILOpClass;
+ def waveActiveAllEqual : DXILOpClass;
+ def waveActiveBallot : DXILOpClass;
+ def waveActiveBit : DXILOpClass;
+ def waveActiveOp : DXILOpClass;
+ def waveAllOp : DXILOpClass;
+ def waveAllTrue : DXILOpClass;
+ def waveAnyTrue : DXILOpClass;
+ def waveGetLaneCount : DXILOpClass;
+ def waveGetLaneIndex : DXILOpClass;
+ def waveIsFirstLane : DXILOpClass;
+ def waveMatch : DXILOpClass;
+ def waveMatrix_Accumulate : DXILOpClass;
+ def waveMatrix_Annotate : DXILOpClass;
+ def waveMatrix_Depth : DXILOpClass;
+ def waveMatrix_Fill : DXILOpClass;
+ def waveMatrix_LoadGroupShared : DXILOpClass;
+ def waveMatrix_LoadRawBuf : DXILOpClass;
+ def waveMatrix_Multiply : DXILOpClass;
+ def waveMatrix_ScalarOp : DXILOpClass;
+ def waveMatrix_StoreGroupShared : DXILOpClass;
+ def waveMatrix_StoreRawBuf : DXILOpClass;
+ def waveMultiPrefixBitCount : DXILOpClass;
+ def waveMultiPrefixOp : DXILOpClass;
+ def wavePrefixOp : DXILOpClass;
+ def waveReadLaneAt : DXILOpClass;
+ def waveReadLaneFirst : DXILOpClass;
+ def worldRayDirection : DXILOpClass;
+ def worldRayOrigin : DXILOpClass;
+ def worldToObject : DXILOpClass;
+ def writeSamplerFeedback : DXILOpClass;
+ def writeSamplerFeedbackBias : DXILOpClass;
+ def writeSamplerFeedbackGrad : DXILOpClass;
+ def writeSamplerFeedbackLevel: DXILOpClass;
}
-// Abstract representation of the category a DXIL Operation belongs to
-class DXILOpCategory<string name> {
- string Name = name;
+// Abstraction DXIL Operation to LLVM intrinsic
+class DXILOpMapping<int opCode, DXILOpClass opClass, Intrinsic intrinsic, string doc> {
+ int OpCode = opCode; // Opcode corresponding to DXIL Operation
+ DXILOpClass OpClass = opClass; // Class of DXIL Operation.
+ Intrinsic LLVMIntrinsic = intrinsic; // LLVM Intrinsic the DXIL Operation maps
+ string Doc = doc; // to a short description of the operation
}
-def UnaryClass : DXILOpClass<"Unary">;
-def BinaryClass : DXILOpClass<"Binary">;
-def FlattenedThreadIdInGroupClass : DXILOpClass<"FlattenedThreadIdInGroup">;
-def ThreadIdInGroupClass : DXILOpClass<"ThreadIdInGroup">;
-def ThreadIdClass : DXILOpClass<"ThreadId">;
-def GroupIdClass : DXILOpClass<"GroupId">;
-
-def BinaryUintCategory : DXILOpCategory<"Binary uint">;
-def UnaryFloatCategory : DXILOpCategory<"Unary float">;
-def ComputeIDCategory : DXILOpCategory<"Compute/Mesh/Amplification shader">;
-
-// Represent as any pointer type with an option to change to a qualified pointer
-// type with address space specified.
-def dxil_handle_ty : LLVMAnyPointerType;
-def dxil_cbuffer_ty : LLVMAnyPointerType;
-def dxil_resource_ty : LLVMAnyPointerType;
-
-// The parameter description for a DXIL operation
-class DXILOpParameter<int pos, LLVMType type, string name, string doc,
- bit isConstant = 0, string enumName = "",
- int maxValue = 0> {
- int Pos = pos; // Position in parameter list
- LLVMType ParamType = type; // Parameter type
- string Name = name; // Short, unique parameter name
- string Doc = doc; // Description of this parameter
- bit IsConstant = isConstant; // Whether this parameter requires a constant value in the IR
- string EnumName = enumName; // Name of the enum type, if applicable
- int MaxValue = maxValue; // Maximum value for this parameter, if applicable
-}
-
-// A representation for a DXIL operation
-class DXILOperationDesc {
- string OpName = ""; // Name of DXIL operation
- int OpCode = 0; // Unique non-negative integer associated with the operation
- DXILOpClass OpClass; // Class of the operation
- DXILOpCategory OpCategory; // Category of the operation
- string Doc = ""; // Description of the operation
- list<DXILOpParameter> Params = []; // Parameter list of the operation
- list<LLVMType> OverloadTypes = []; // Overload types, if applicable
- EnumAttr Attribute; // Operation Attribute. Leverage attributes defined in Attributes.td
- // ReadNone - operation does not access memory.
- // ReadOnly - only reads from memory.
- // "ReadMemory" - reads memory
- bit IsDerivative = 0; // Whether this is some kind of derivative
- bit IsGradient = 0; // Whether this requires a gradient calculation
- bit IsFeedback = 0; // Whether this is a sampler feedback operation
- bit IsWave = 0; // Whether this requires in-wave, cross-lane functionality
- bit NeedsUniformInputs = 0; // Whether this operation requires that all
- // of its inputs are uniform across the wave
- // Group DXIL operation for stats - e.g., to accumulate the number of atomic/float/uint/int/...
- // operations used in the program.
- list<string> StatsGroup = [];
-}
-
-class DXILOperation<string name, int opCode, DXILOpClass opClass, DXILOpCategory opCategory, string doc,
- list<LLVMType> oloadTypes, EnumAttr attrs, list<DXILOpParameter> params,
- list<string> statsGroup = []> : DXILOperationDesc {
- let OpName = name;
- let OpCode = opCode;
- let Doc = doc;
- let Params = params;
- let OpClass = opClass;
- let OpCategory = opCategory;
- let OverloadTypes = oloadTypes;
- let Attribute = attrs;
- let StatsGroup = statsGroup;
-}
-
-// LLVM intrinsic that DXIL operation maps to.
-class LLVMIntrinsic<Intrinsic llvm_intrinsic_> { Intrinsic llvm_intrinsic = llvm_intrinsic_; }
-
-def Sin : DXILOperation<"Sin", 13, UnaryClass, UnaryFloatCategory, "returns sine(theta) for theta in radians.",
- [llvm_half_ty, llvm_float_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_anyfloat_ty, "", "operation result">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
- DXILOpParameter<2, llvm_anyfloat_ty, "value", "input value">
- ],
- ["floats"]>,
- LLVMIntrinsic<int_sin>;
-
-def UMax : DXILOperation< "UMax", 39, BinaryClass, BinaryUintCategory, "unsigned integer maximum. UMax(a,b) = a > b ? a : b",
- [llvm_i16_ty, llvm_i32_ty, llvm_i64_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_anyint_ty, "", "operation result">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
- DXILOpParameter<2, llvm_anyint_ty, "a", "input value">,
- DXILOpParameter<3, llvm_anyint_ty, "b", "input value">
- ],
- ["uints"]>,
- LLVMIntrinsic<int_umax>;
-
-def ThreadId : DXILOperation< "ThreadId", 93, ThreadIdClass, ComputeIDCategory, "reads the thread ID", [llvm_i32_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_i32_ty, "", "thread ID component">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
- DXILOpParameter<2, llvm_i32_ty, "component", "component to read (x,y,z)">
- ]>,
- LLVMIntrinsic<int_dx_thread_id>;
-
-def GroupId : DXILOperation< "GroupId", 94, GroupIdClass, ComputeIDCategory, "reads the group ID (SV_GroupID)", [llvm_i32_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_i32_ty, "", "group ID component">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
- DXILOpParameter<2, llvm_i32_ty, "component", "component to read">
- ]>,
- LLVMIntrinsic<int_dx_group_id>;
-
-def ThreadIdInGroup : DXILOperation< "ThreadIdInGroup", 95, ThreadIdInGroupClass, ComputeIDCategory,
- "reads the thread ID within the group (SV_GroupThreadID)", [llvm_i32_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_i32_ty, "", "thread ID in group component">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
- DXILOpParameter<2, llvm_i32_ty, "component", "component to read (x,y,z)">
- ]>,
- LLVMIntrinsic<int_dx_thread_id_in_group>;
-
-def FlattenedThreadIdInGroup : DXILOperation< "FlattenedThreadIdInGroup", 96, FlattenedThreadIdInGroupClass, ComputeIDCategory,
- "provides a flattened index for a given thread within a given group (SV_GroupIndex)", [llvm_i32_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_i32_ty, "", "result">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">
- ]>,
- LLVMIntrinsic<int_dx_flattened_thread_id_in_group>;
+// Concrete definition of DXIL Operation mapping to corresponding LLVM intrinsic
+def Sin : DXILOpMapping<13, unary, int_sin,
+ "Returns sine(theta) for theta in radians.">;
+def Frac : DXILOpMapping<22, unary, int_dx_frac,
+ "Returns a fraction from 0 to 1 that represents the "
+ "decimal part of the input.">;
+def Round : DXILOpMapping<26, unary, int_round,
+ "Returns the input rounded to the nearest integer"
+ "within a floating-point type.">;
+def UMax : DXILOpMapping<39, binary, int_umax,
+ "Unsigned integer maximum. UMax(a,b) = a > b ? a : b">;
+def ThreadId : DXILOpMapping<93, threadId, int_dx_thread_id,
+ "Reads the thread ID">;
+def GroupId : DXILOpMapping<94, groupId, int_dx_group_id,
+ "Reads the group ID (SV_GroupID)">;
+def ThreadIdInGroup : DXILOpMapping<95, threadIdInGroup,
+ int_dx_thread_id_in_group,
+ "Reads the thread ID within the group "
+ "(SV_GroupThreadID)">;
+def FlattenedThreadIdInGroup : DXILOpMapping<96, flattenedThreadIdInGroup,
+ int_dx_flattened_thread_id_in_group,
+ "Provides a flattened index for a "
+ "given thread within a given "
+ "group (SV_GroupIndex)">;
diff --git a/llvm/lib/Target/DirectX/DXILOpBuilder.cpp b/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
index 42180a865b72..21a20d45b922 100644
--- a/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
+++ b/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
@@ -221,12 +221,26 @@ static Type *getTypeFromParameterKind(ParameterKind Kind, Type *OverloadTy) {
return nullptr;
}
+/// Construct DXIL function type. This is the type of a function with
+/// the following prototype
+/// OverloadType dx.op.<opclass>.<return-type>(int opcode, <param types>)
+/// <param-types> are constructed from types in Prop.
+/// \param Prop Structure containing DXIL Operation properties based on
+/// its specification in DXIL.td.
+/// \param OverloadTy Return type to be used to construct DXIL function type.
static FunctionType *getDXILOpFunctionType(const OpCodeProperty *Prop,
Type *OverloadTy) {
SmallVector<Type *> ArgTys;
auto ParamKinds = getOpCodeParameterKind(*Prop);
+ // Add OverloadTy as return type of the function
+ ArgTys.emplace_back(OverloadTy);
+
+ // Add DXIL Opcode value type viz., Int32 as first argument
+ ArgTys.emplace_back(Type::getInt32Ty(OverloadTy->getContext()));
+
+ // Add DXIL Operation parameter types as specified in DXIL properties
for (unsigned I = 0; I < Prop->NumOfParameters; ++I) {
ParameterKind Kind = ParamKinds[I];
ArgTys.emplace_back(getTypeFromParameterKind(Kind, OverloadTy));
@@ -267,13 +281,13 @@ CallInst *DXILOpBuilder::createDXILOpCall(dxil::OpCode OpCode, Type *OverloadTy,
return B.CreateCall(Fn, FullArgs);
}
-Type *DXILOpBuilder::getOverloadTy(dxil::OpCode OpCode, FunctionType *FT,
- bool NoOpCodeParam) {
+Type *DXILOpBuilder::getOverloadTy(dxil::OpCode OpCode, FunctionType *FT) {
const OpCodeProperty *Prop = getOpCodeProperty(OpCode);
+ // If DXIL Op has no overload parameter, just return the
+ // precise return type specified.
if (Prop->OverloadParamIndex < 0) {
auto &Ctx = FT->getContext();
- // When only has 1 overload type, just return it.
switch (Prop->OverloadTys) {
case OverloadKind::VOID:
return Type::getVoidTy(Ctx);
@@ -302,9 +316,8 @@ Type *DXILOpBuilder::getOverloadTy(dxil::OpCode OpCode, FunctionType *FT,
// Prop->OverloadParamIndex is 0, overload type is FT->getReturnType().
Type *OverloadType = FT->getReturnType();
if (Prop->OverloadParamIndex != 0) {
- // Skip Return Type and Type for DXIL opcode.
- const unsigned SkipedParam = NoOpCodeParam ? 2 : 1;
- OverloadType = FT->getParamType(Prop->OverloadParamIndex - SkipedParam);
+ // Skip Return Type.
+ OverloadType = FT->getParamType(Prop->OverloadParamIndex - 1);
}
auto ParamKinds = getOpCodeParameterKind(*Prop);
diff --git a/llvm/lib/Target/DirectX/DXILOpBuilder.h b/llvm/lib/Target/DirectX/DXILOpBuilder.h
index 940ed538c7ce..1c15f109184a 100644
--- a/llvm/lib/Target/DirectX/DXILOpBuilder.h
+++ b/llvm/lib/Target/DirectX/DXILOpBuilder.h
@@ -31,8 +31,7 @@ public:
DXILOpBuilder(Module &M, IRBuilderBase &B) : M(M), B(B) {}
CallInst *createDXILOpCall(dxil::OpCode OpCode, Type *OverloadTy,
llvm::iterator_range<Use *> Args);
- Type *getOverloadTy(dxil::OpCode OpCode, FunctionType *FT,
- bool NoOpCodeParam);
+ Type *getOverloadTy(dxil::OpCode OpCode, FunctionType *FT);
static const char *getOpCodeName(dxil::OpCode DXILOp);
private:
diff --git a/llvm/lib/Target/DirectX/DXILOpLowering.cpp b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
index f6e2297e9af4..6b649b76beec 100644
--- a/llvm/lib/Target/DirectX/DXILOpLowering.cpp
+++ b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
@@ -33,8 +33,7 @@ static void lowerIntrinsic(dxil::OpCode DXILOp, Function &F, Module &M) {
IRBuilder<> B(M.getContext());
Value *DXILOpArg = B.getInt32(static_cast<unsigned>(DXILOp));
DXILOpBuilder DXILB(M, B);
- Type *OverloadTy =
- DXILB.getOverloadTy(DXILOp, F.getFunctionType(), /*NoOpCodeParam*/ true);
+ Type *OverloadTy = DXILB.getOverloadTy(DXILOp, F.getFunctionType());
for (User *U : make_early_inc_range(F.users())) {
CallInst *CI = dyn_cast<CallInst>(U);
if (!CI)
diff --git a/llvm/lib/Target/DirectX/DXILShaderFlags.cpp b/llvm/lib/Target/DirectX/DXILShaderFlags.cpp
index bbb564356602..66a9dc46bcbf 100644
--- a/llvm/lib/Target/DirectX/DXILShaderFlags.cpp
+++ b/llvm/lib/Target/DirectX/DXILShaderFlags.cpp
@@ -51,7 +51,7 @@ void ComputedShaderFlags::print(raw_ostream &OS) const {
if (FlagVal == 0)
return;
OS << "; Note: shader requires additional functionality:\n";
-#define SHADER_FLAG(bit, FlagName, Str) \
+#define SHADER_FEATURE_FLAG(bit, FlagName, Str) \
if (FlagName) \
OS << "; " Str "\n";
#include "llvm/BinaryFormat/DXContainerConstants.def"
diff --git a/llvm/lib/Target/DirectX/DXILShaderFlags.h b/llvm/lib/Target/DirectX/DXILShaderFlags.h
index 4f51873a2d0b..574a7b090f52 100644
--- a/llvm/lib/Target/DirectX/DXILShaderFlags.h
+++ b/llvm/lib/Target/DirectX/DXILShaderFlags.h
@@ -29,17 +29,17 @@ class GlobalVariable;
namespace dxil {
struct ComputedShaderFlags {
-#define SHADER_FLAG(bit, FlagName, Str) bool FlagName : 1;
+#define SHADER_FEATURE_FLAG(bit, FlagName, Str) bool FlagName : 1;
#include "llvm/BinaryFormat/DXContainerConstants.def"
-#define SHADER_FLAG(bit, FlagName, Str) FlagName = false;
+#define SHADER_FEATURE_FLAG(bit, FlagName, Str) FlagName = false;
ComputedShaderFlags() {
#include "llvm/BinaryFormat/DXContainerConstants.def"
}
operator uint64_t() const {
uint64_t FlagValue = 0;
-#define SHADER_FLAG(bit, FlagName, Str) \
+#define SHADER_FEATURE_FLAG(bit, FlagName, Str) \
FlagValue |= \
FlagName ? static_cast<uint64_t>(dxbc::FeatureFlags::FlagName) : 0ull;
#include "llvm/BinaryFormat/DXContainerConstants.def"
diff --git a/llvm/lib/Target/Hexagon/CMakeLists.txt b/llvm/lib/Target/Hexagon/CMakeLists.txt
index a22a5c11e6ab..cdc062eee72b 100644
--- a/llvm/lib/Target/Hexagon/CMakeLists.txt
+++ b/llvm/lib/Target/Hexagon/CMakeLists.txt
@@ -43,6 +43,7 @@ add_llvm_target(HexagonCodeGen
HexagonISelDAGToDAGHVX.cpp
HexagonISelLowering.cpp
HexagonISelLoweringHVX.cpp
+ HexagonLoopAlign.cpp
HexagonLoopIdiomRecognition.cpp
HexagonMachineFunctionInfo.cpp
HexagonMachineScheduler.cpp
diff --git a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 3b8234c01184..4c18e076c439 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -1002,7 +1002,7 @@ namespace {
bool DeadCodeElimination::isDead(unsigned R) const {
for (const MachineOperand &MO : MRI.use_operands(R)) {
const MachineInstr *UseI = MO.getParent();
- if (UseI->isDebugValue())
+ if (UseI->isDebugInstr())
continue;
if (UseI->isPHI()) {
assert(!UseI->getOperand(0).getSubReg());
diff --git a/llvm/lib/Target/Hexagon/HexagonLoopAlign.cpp b/llvm/lib/Target/Hexagon/HexagonLoopAlign.cpp
new file mode 100644
index 000000000000..c79b528ff2f3
--- /dev/null
+++ b/llvm/lib/Target/Hexagon/HexagonLoopAlign.cpp
@@ -0,0 +1,216 @@
+//===----- HexagonLoopAlign.cpp - Generate loop alignment directives -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Inspect a basic block and if its single basic block loop with a small
+// number of instructions, set the prefLoopAlignment to 32 bytes (5).
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "hexagon-loop-align"
+
+#include "HexagonTargetMachine.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+static cl::opt<bool>
+ DisableLoopAlign("disable-hexagon-loop-align", cl::Hidden,
+ cl::desc("Disable Hexagon loop alignment pass"));
+
+static cl::opt<uint32_t> HVXLoopAlignLimitUB(
+ "hexagon-hvx-loop-align-limit-ub", cl::Hidden, cl::init(16),
+ cl::desc("Set hexagon hvx loop upper bound align limit"));
+
+static cl::opt<uint32_t> TinyLoopAlignLimitUB(
+ "hexagon-tiny-loop-align-limit-ub", cl::Hidden, cl::init(16),
+ cl::desc("Set hexagon tiny-core loop upper bound align limit"));
+
+static cl::opt<uint32_t>
+ LoopAlignLimitUB("hexagon-loop-align-limit-ub", cl::Hidden, cl::init(8),
+ cl::desc("Set hexagon loop upper bound align limit"));
+
+static cl::opt<uint32_t>
+ LoopAlignLimitLB("hexagon-loop-align-limit-lb", cl::Hidden, cl::init(4),
+ cl::desc("Set hexagon loop lower bound align limit"));
+
+static cl::opt<uint32_t>
+ LoopBndlAlignLimit("hexagon-loop-bundle-align-limit", cl::Hidden,
+ cl::init(4),
+ cl::desc("Set hexagon loop align bundle limit"));
+
+static cl::opt<uint32_t> TinyLoopBndlAlignLimit(
+ "hexagon-tiny-loop-bundle-align-limit", cl::Hidden, cl::init(8),
+ cl::desc("Set hexagon tiny-core loop align bundle limit"));
+
+static cl::opt<uint32_t>
+ LoopEdgeThreshold("hexagon-loop-edge-threshold", cl::Hidden, cl::init(7500),
+ cl::desc("Set hexagon loop align edge theshold"));
+
+namespace llvm {
+FunctionPass *createHexagonLoopAlign();
+void initializeHexagonLoopAlignPass(PassRegistry &);
+} // namespace llvm
+
+namespace {
+
+class HexagonLoopAlign : public MachineFunctionPass {
+ const HexagonSubtarget *HST = nullptr;
+ const TargetMachine *HTM = nullptr;
+ const HexagonInstrInfo *HII = nullptr;
+
+public:
+ static char ID;
+ HexagonLoopAlign() : MachineFunctionPass(ID) {
+ initializeHexagonLoopAlignPass(*PassRegistry::getPassRegistry());
+ }
+ bool shouldBalignLoop(MachineBasicBlock &BB, bool AboveThres);
+ bool isSingleLoop(MachineBasicBlock &MBB);
+ bool attemptToBalignSmallLoop(MachineFunction &MF, MachineBasicBlock &MBB);
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineBranchProbabilityInfo>();
+ AU.addRequired<MachineBlockFrequencyInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ StringRef getPassName() const override { return "Hexagon LoopAlign pass"; }
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+char HexagonLoopAlign::ID = 0;
+
+bool HexagonLoopAlign::shouldBalignLoop(MachineBasicBlock &BB,
+ bool AboveThres) {
+ bool isVec = false;
+ unsigned InstCnt = 0;
+ unsigned BndlCnt = 0;
+
+ for (MachineBasicBlock::instr_iterator II = BB.instr_begin(),
+ IE = BB.instr_end();
+ II != IE; ++II) {
+
+ // End if the instruction is endloop.
+ if (HII->isEndLoopN(II->getOpcode()))
+ break;
+ // Count the number of bundles.
+ if (II->isBundle()) {
+ BndlCnt++;
+ continue;
+ }
+ // Skip over debug instructions.
+ if (II->isDebugInstr())
+ continue;
+ // Check if there are any HVX instructions in loop.
+ isVec |= HII->isHVXVec(*II);
+ // Count the number of instructions.
+ InstCnt++;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Bundle Count : " << BndlCnt << "\n";
+ dbgs() << "Instruction Count : " << InstCnt << "\n";
+ });
+
+ unsigned LimitUB = 0;
+ unsigned LimitBndl = LoopBndlAlignLimit;
+ // The conditions in the order of priority.
+ if (HST->isTinyCore()) {
+ LimitUB = TinyLoopAlignLimitUB;
+ LimitBndl = TinyLoopBndlAlignLimit;
+ } else if (isVec)
+ LimitUB = HVXLoopAlignLimitUB;
+ else if (AboveThres)
+ LimitUB = LoopAlignLimitUB;
+
+ // if the upper bound is not set to a value, implies we didn't meet
+ // the criteria.
+ if (LimitUB == 0)
+ return false;
+
+ return InstCnt >= LoopAlignLimitLB && InstCnt <= LimitUB &&
+ BndlCnt <= LimitBndl;
+}
+
+bool HexagonLoopAlign::isSingleLoop(MachineBasicBlock &MBB) {
+ int Succs = MBB.succ_size();
+ return (MBB.isSuccessor(&MBB) && (Succs == 2));
+}
+
+bool HexagonLoopAlign::attemptToBalignSmallLoop(MachineFunction &MF,
+ MachineBasicBlock &MBB) {
+ if (!isSingleLoop(MBB))
+ return false;
+
+ const MachineBranchProbabilityInfo *MBPI =
+ &getAnalysis<MachineBranchProbabilityInfo>();
+ const MachineBlockFrequencyInfo *MBFI =
+ &getAnalysis<MachineBlockFrequencyInfo>();
+
+ // Compute frequency of back edge,
+ BlockFrequency BlockFreq = MBFI->getBlockFreq(&MBB);
+ BranchProbability BrProb = MBPI->getEdgeProbability(&MBB, &MBB);
+ BlockFrequency EdgeFreq = BlockFreq * BrProb;
+ LLVM_DEBUG({
+ dbgs() << "Loop Align Pass:\n";
+ dbgs() << "\tedge with freq(" << EdgeFreq.getFrequency() << ")\n";
+ });
+
+ bool AboveThres = EdgeFreq.getFrequency() > LoopEdgeThreshold;
+ if (shouldBalignLoop(MBB, AboveThres)) {
+ // We found a loop, change its alignment to be 32 (5).
+ MBB.setAlignment(llvm::Align(1 << 5));
+ return true;
+ }
+ return false;
+}
+
+// Inspect each basic block, and if its a single BB loop, see if it
+// meets the criteria for increasing alignment to 32.
+
+bool HexagonLoopAlign::runOnMachineFunction(MachineFunction &MF) {
+
+ HST = &MF.getSubtarget<HexagonSubtarget>();
+ HII = HST->getInstrInfo();
+ HTM = &MF.getTarget();
+
+ if (skipFunction(MF.getFunction()))
+ return false;
+ if (DisableLoopAlign)
+ return false;
+
+ // This optimization is performed at
+ // i) -O2 and above, and when the loop has a HVX instruction.
+ // ii) -O3
+ if (HST->useHVXOps()) {
+ if (HTM->getOptLevel() < CodeGenOptLevel::Default)
+ return false;
+ } else {
+ if (HTM->getOptLevel() < CodeGenOptLevel::Aggressive)
+ return false;
+ }
+
+ bool Changed = false;
+ for (MachineFunction::iterator MBBi = MF.begin(), MBBe = MF.end();
+ MBBi != MBBe; ++MBBi) {
+ MachineBasicBlock &MBB = *MBBi;
+ Changed |= attemptToBalignSmallLoop(MF, MBB);
+ }
+ return Changed;
+}
+
+} // namespace
+
+INITIALIZE_PASS(HexagonLoopAlign, "hexagon-loop-align",
+ "Hexagon LoopAlign pass", false, false)
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createHexagonLoopAlign() { return new HexagonLoopAlign(); }
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 7d7728633939..3c346c334d6d 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -164,6 +164,7 @@ namespace llvm {
void initializeHexagonGenMuxPass(PassRegistry&);
void initializeHexagonHardwareLoopsPass(PassRegistry&);
void initializeHexagonLoopIdiomRecognizeLegacyPassPass(PassRegistry &);
+ void initializeHexagonLoopAlignPass(PassRegistry &);
void initializeHexagonNewValueJumpPass(PassRegistry&);
void initializeHexagonOptAddrModePass(PassRegistry&);
void initializeHexagonPacketizerPass(PassRegistry&);
@@ -194,6 +195,7 @@ namespace llvm {
FunctionPass *createHexagonHardwareLoops();
FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM,
CodeGenOptLevel OptLevel);
+ FunctionPass *createHexagonLoopAlign();
FunctionPass *createHexagonLoopRescheduling();
FunctionPass *createHexagonNewValueJump();
FunctionPass *createHexagonOptAddrMode();
@@ -256,8 +258,10 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, const Triple &TT,
TT, CPU, FS, Options, getEffectiveRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small),
(HexagonNoOpt ? CodeGenOptLevel::None : OL)),
- TLOF(std::make_unique<HexagonTargetObjectFile>()) {
+ TLOF(std::make_unique<HexagonTargetObjectFile>()),
+ Subtarget(Triple(TT), CPU, FS, *this) {
initializeHexagonExpandCondsetsPass(*PassRegistry::getPassRegistry());
+ initializeHexagonLoopAlignPass(*PassRegistry::getPassRegistry());
initializeHexagonTfrCleanupPass(*PassRegistry::getPassRegistry());
initAsmInfo();
}
@@ -476,6 +480,9 @@ void HexagonPassConfig::addPreEmitPass() {
// Packetization is mandatory: it handles gather/scatter at all opt levels.
addPass(createHexagonPacketizer(NoOpt));
+ if (!NoOpt)
+ addPass(createHexagonLoopAlign());
+
if (EnableVectorPrint)
addPass(createHexagonVectorPrint());
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.h b/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
index c5fed0cd65a8..34ff45b6acf3 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
+++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
@@ -23,6 +23,7 @@ namespace llvm {
class HexagonTargetMachine : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ HexagonSubtarget Subtarget;
mutable StringMap<std::unique_ptr<HexagonSubtarget>> SubtargetMap;
public:
diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index e38c8bacaf2b..56472d633694 100644
--- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -1180,7 +1180,7 @@ void HexagonPacketizerList::unpacketizeSoloInstrs(MachineFunction &MF) {
bool InsertBeforeBundle;
if (MI.isInlineAsm())
InsertBeforeBundle = !hasWriteToReadDep(MI, *BundleIt, HRI);
- else if (MI.isDebugValue())
+ else if (MI.isDebugInstr())
InsertBeforeBundle = true;
else
continue;
diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index a804dd823daa..b849b7be7b7b 100644
--- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -231,12 +231,19 @@ public:
MCSymbolXCOFF *TCSym =
cast<MCSectionXCOFF>(Streamer.getCurrentSectionOnly())
->getQualNameSymbol();
- // On AIX, we have a region handle (symbol@m) and the variable offset
- // (symbol@{gd|ie|le}) for TLS variables, depending on the TLS model.
+ // On AIX, we have TLS variable offsets (symbol@({gd|ie|le|ld}) depending
+ // on the TLS access method (or model). For the general-dynamic access
+ // method, we also have region handle (symbol@m) for each variable. For
+ // local-dynamic, there is a module handle (_$TLSML[TC]@ml) for all
+ // variables. Finally for local-exec and initial-exec, we have a thread
+ // pointer, in r13 for 64-bit mode and returned by .__get_tpointer for
+ // 32-bit mode.
if (Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSGD ||
Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSGDM ||
Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSIE ||
- Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSLE)
+ Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSLE ||
+ Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSLD ||
+ Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSML)
OS << "\t.tc " << TCSym->getName() << "," << XSym->getName() << "@"
<< MCSymbolRefExpr::getVariantKindName(Kind) << '\n';
else
diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp
index 065daf42fe6e..f4998e9b9dcb 100644
--- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp
+++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp
@@ -116,6 +116,10 @@ std::pair<uint8_t, uint8_t> PPCXCOFFObjectWriter::getRelocTypeAndSignSize(
return {XCOFF::RelocationType::R_TLS_IE, SignAndSizeForFKData};
case MCSymbolRefExpr::VK_PPC_AIX_TLSLE:
return {XCOFF::RelocationType::R_TLS_LE, SignAndSizeForFKData};
+ case MCSymbolRefExpr::VK_PPC_AIX_TLSLD:
+ return {XCOFF::RelocationType::R_TLS_LD, SignAndSizeForFKData};
+ case MCSymbolRefExpr::VK_PPC_AIX_TLSML:
+ return {XCOFF::RelocationType::R_TLSML, SignAndSizeForFKData};
case MCSymbolRefExpr::VK_None:
return {XCOFF::RelocationType::R_POS, SignAndSizeForFKData};
}
diff --git a/llvm/lib/Target/PowerPC/PPC.h b/llvm/lib/Target/PowerPC/PPC.h
index 3d9ea5608193..eb8886dcc907 100644
--- a/llvm/lib/Target/PowerPC/PPC.h
+++ b/llvm/lib/Target/PowerPC/PPC.h
@@ -139,6 +139,12 @@ class ModulePass;
/// and Local Exec models.
MO_TPREL_FLAG,
+ /// MO_TLSLDM_FLAG - on AIX the ML relocation type is only valid for a
+ /// reference to a TOC symbol from the symbol itself, and right now its only
+ /// user is the symbol "_$TLSML". The symbol name is used to decide that
+ /// the R_TLSML relocation is expected.
+ MO_TLSLDM_FLAG,
+
/// MO_TLSLD_FLAG - If this bit is set the symbol reference is relative to
/// TLS Local Dynamic model.
MO_TLSLD_FLAG,
diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index 483cd788ebfe..9396ca22dacf 100644
--- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -621,12 +621,23 @@ void PPCAsmPrinter::LowerPATCHPOINT(StackMaps &SM, const MachineInstr &MI) {
EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::NOP));
}
-/// This helper function creates the TlsGetAddr MCSymbol for AIX. We will
-/// create the csect and use the qual-name symbol instead of creating just the
-/// external symbol.
+/// This helper function creates the TlsGetAddr/TlsGetMod MCSymbol for AIX. We
+/// will create the csect and use the qual-name symbol instead of creating just
+/// the external symbol.
static MCSymbol *createMCSymbolForTlsGetAddr(MCContext &Ctx, unsigned MIOpc) {
- StringRef SymName =
- MIOpc == PPC::GETtlsTpointer32AIX ? ".__get_tpointer" : ".__tls_get_addr";
+ StringRef SymName;
+ switch (MIOpc) {
+ default:
+ SymName = ".__tls_get_addr";
+ break;
+ case PPC::GETtlsTpointer32AIX:
+ SymName = ".__get_tpointer";
+ break;
+ case PPC::GETtlsMOD32AIX:
+ case PPC::GETtlsMOD64AIX:
+ SymName = ".__tls_get_mod";
+ break;
+ }
return Ctx
.getXCOFFSection(SymName, SectionKind::getText(),
XCOFF::CsectProperties(XCOFF::XMC_PR, XCOFF::XTY_ER))
@@ -668,14 +679,16 @@ void PPCAsmPrinter::EmitTlsCall(const MachineInstr *MI,
"GETtls[ld]ADDR[32] must read GPR3");
if (Subtarget->isAIXABI()) {
- // On AIX, the variable offset should already be in R4 and the region handle
- // should already be in R3.
- // For TLSGD, which currently is the only supported access model, we only
- // need to generate an absolute branch to .__tls_get_addr.
+ // For TLSGD, the variable offset should already be in R4 and the region
+ // handle should already be in R3. We generate an absolute branch to
+ // .__tls_get_addr. For TLSLD, the module handle should already be in R3.
+ // We generate an absolute branch to .__tls_get_mod.
Register VarOffsetReg = Subtarget->isPPC64() ? PPC::X4 : PPC::R4;
(void)VarOffsetReg;
- assert(MI->getOperand(2).isReg() &&
- MI->getOperand(2).getReg() == VarOffsetReg &&
+ assert((MI->getOpcode() == PPC::GETtlsMOD32AIX ||
+ MI->getOpcode() == PPC::GETtlsMOD64AIX ||
+ (MI->getOperand(2).isReg() &&
+ MI->getOperand(2).getReg() == VarOffsetReg)) &&
"GETtls[ld]ADDR[32] must read GPR4");
EmitAIXTlsCallHelper(MI);
return;
@@ -844,6 +857,13 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
return MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSGDM;
if (Flag == PPCII::MO_TLSGD_FLAG || Flag == PPCII::MO_GOT_TLSGD_PCREL_FLAG)
return MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSGD;
+ // For local-dynamic TLS access on AIX, we have one TOC entry for the symbol
+ // (the variable offset) and one shared TOC entry for the module handle.
+ // They are differentiated by MO_TLSLD_FLAG and MO_TLSLDM_FLAG.
+ if (Flag == PPCII::MO_TLSLD_FLAG && IsAIX)
+ return MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSLD;
+ if (Flag == PPCII::MO_TLSLDM_FLAG && IsAIX)
+ return MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSML;
return MCSymbolRefExpr::VariantKind::VK_None;
};
@@ -1354,6 +1374,11 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
.addExpr(SymGotTlsGD));
return;
}
+ case PPC::GETtlsMOD32AIX:
+ case PPC::GETtlsMOD64AIX:
+ // Transform: %r3 = GETtlsMODNNAIX %r3 (for NN == 32/64).
+ // Into: BLA .__tls_get_mod()
+ // Input parameter is a module handle (_$TLSML[TC]@ml) for all variables.
case PPC::GETtlsADDR:
// Transform: %x3 = GETtlsADDR %x3, @sym
// Into: BL8_NOP_TLS __tls_get_addr(sym at tlsgd)
@@ -2167,6 +2192,11 @@ void PPCAIXAsmPrinter::emitLinkage(const GlobalValue *GV,
}
}
+ // Do not emit the _$TLSML symbol.
+ if (GV->getThreadLocalMode() == GlobalVariable::LocalDynamicTLSModel &&
+ GV->hasName() && GV->getName() == "_$TLSML")
+ return;
+
OutStreamer->emitXCOFFSymbolLinkageWithVisibility(GVSym, LinkageAttr,
VisibilityAttr);
}
@@ -2981,11 +3011,13 @@ void PPCAIXAsmPrinter::emitInstruction(const MachineInstr *MI) {
MMI->hasDebugInfo());
break;
}
+ case PPC::GETtlsMOD32AIX:
+ case PPC::GETtlsMOD64AIX:
case PPC::GETtlsTpointer32AIX:
case PPC::GETtlsADDR64AIX:
case PPC::GETtlsADDR32AIX: {
- // A reference to .__tls_get_addr/.__get_tpointer is unknown to the
- // assembler so we need to emit an external symbol reference.
+ // A reference to .__tls_get_mod/.__tls_get_addr/.__get_tpointer is unknown
+ // to the assembler so we need to emit an external symbol reference.
MCSymbol *TlsGetAddr =
createMCSymbolForTlsGetAddr(OutContext, MI->getOpcode());
ExtSymSDNodeSymbols.insert(TlsGetAddr);
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 51becf1d5b85..9fa17bac5545 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1774,9 +1774,11 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA";
case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L";
case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR";
+ case PPCISD::GET_TLS_MOD_AIX: return "PPCISD::GET_TLS_MOD_AIX";
case PPCISD::GET_TPOINTER: return "PPCISD::GET_TPOINTER";
case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
case PPCISD::TLSGD_AIX: return "PPCISD::TLSGD_AIX";
+ case PPCISD::TLSLD_AIX: return "PPCISD::TLSLD_AIX";
case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA";
case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L";
case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR";
@@ -3415,13 +3417,36 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, VariableOffset);
}
- // Only Local-Exec, Initial-Exec and General-Dynamic TLS models are currently
- // supported models. If Local- or Initial-exec are not possible or specified,
- // all GlobalTLSAddress nodes are lowered using the general-dynamic model.
- // We need to generate two TOC entries, one for the variable offset, one for
- // the region handle. The global address for the TOC entry of the region
- // handle is created with the MO_TLSGDM_FLAG flag and the global address
- // for the TOC entry of the variable offset is created with MO_TLSGD_FLAG.
+ if (Model == TLSModel::LocalDynamic) {
+ // For local-dynamic on AIX, we need to generate one TOC entry for each
+ // variable offset, and a single module-handle TOC entry for the entire
+ // file.
+
+ SDValue VariableOffsetTGA =
+ DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSLD_FLAG);
+ SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
+
+ Module *M = DAG.getMachineFunction().getFunction().getParent();
+ GlobalVariable *TLSGV =
+ dyn_cast_or_null<GlobalVariable>(M->getOrInsertGlobal(
+ StringRef("_$TLSML"), PointerType::getUnqual(*DAG.getContext())));
+ TLSGV->setThreadLocalMode(GlobalVariable::LocalDynamicTLSModel);
+ assert(TLSGV && "Not able to create GV for _$TLSML.");
+ SDValue ModuleHandleTGA =
+ DAG.getTargetGlobalAddress(TLSGV, dl, PtrVT, 0, PPCII::MO_TLSLDM_FLAG);
+ SDValue ModuleHandleTOC = getTOCEntry(DAG, dl, ModuleHandleTGA);
+ SDValue ModuleHandle =
+ DAG.getNode(PPCISD::TLSLD_AIX, dl, PtrVT, ModuleHandleTOC);
+
+ return DAG.getNode(ISD::ADD, dl, PtrVT, ModuleHandle, VariableOffset);
+ }
+
+ // If Local- or Initial-exec or Local-dynamic is not possible or specified,
+ // all GlobalTLSAddress nodes are lowered using the general-dynamic model. We
+ // need to generate two TOC entries, one for the variable offset, one for the
+ // region handle. The global address for the TOC entry of the region handle is
+ // created with the MO_TLSGDM_FLAG flag and the global address for the TOC
+ // entry of the variable offset is created with MO_TLSGD_FLAG.
SDValue VariableOffsetTGA =
DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGD_FLAG);
SDValue RegionHandleTGA =
@@ -12661,6 +12686,44 @@ PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
return TailMBB;
}
+static bool IsSelectCC(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case PPC::SELECT_CC_I4:
+ case PPC::SELECT_CC_I8:
+ case PPC::SELECT_CC_F4:
+ case PPC::SELECT_CC_F8:
+ case PPC::SELECT_CC_F16:
+ case PPC::SELECT_CC_VRRC:
+ case PPC::SELECT_CC_VSFRC:
+ case PPC::SELECT_CC_VSSRC:
+ case PPC::SELECT_CC_VSRC:
+ case PPC::SELECT_CC_SPE4:
+ case PPC::SELECT_CC_SPE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool IsSelect(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case PPC::SELECT_I4:
+ case PPC::SELECT_I8:
+ case PPC::SELECT_F4:
+ case PPC::SELECT_F8:
+ case PPC::SELECT_F16:
+ case PPC::SELECT_SPE:
+ case PPC::SELECT_SPE4:
+ case PPC::SELECT_VRRC:
+ case PPC::SELECT_VSFRC:
+ case PPC::SELECT_VSSRC:
+ case PPC::SELECT_VSRC:
+ return true;
+ default:
+ return false;
+ }
+}
+
MachineBasicBlock *
PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
@@ -12698,9 +12761,10 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineFunction *F = BB->getParent();
MachineRegisterInfo &MRI = F->getRegInfo();
- if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
- MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
- MI.getOpcode() == PPC::SELECT_I8) {
+ if (Subtarget.hasISEL() &&
+ (MI.getOpcode() == PPC::SELECT_CC_I4 ||
+ MI.getOpcode() == PPC::SELECT_CC_I8 ||
+ MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8)) {
SmallVector<MachineOperand, 2> Cond;
if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
MI.getOpcode() == PPC::SELECT_CC_I8)
@@ -12712,24 +12776,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
DebugLoc dl = MI.getDebugLoc();
TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
- } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
- MI.getOpcode() == PPC::SELECT_CC_F8 ||
- MI.getOpcode() == PPC::SELECT_CC_F16 ||
- MI.getOpcode() == PPC::SELECT_CC_VRRC ||
- MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
- MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
- MI.getOpcode() == PPC::SELECT_CC_VSRC ||
- MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
- MI.getOpcode() == PPC::SELECT_CC_SPE ||
- MI.getOpcode() == PPC::SELECT_F4 ||
- MI.getOpcode() == PPC::SELECT_F8 ||
- MI.getOpcode() == PPC::SELECT_F16 ||
- MI.getOpcode() == PPC::SELECT_SPE ||
- MI.getOpcode() == PPC::SELECT_SPE4 ||
- MI.getOpcode() == PPC::SELECT_VRRC ||
- MI.getOpcode() == PPC::SELECT_VSFRC ||
- MI.getOpcode() == PPC::SELECT_VSSRC ||
- MI.getOpcode() == PPC::SELECT_VSRC) {
+ } else if (IsSelectCC(MI) || IsSelect(MI)) {
// The incoming instruction knows the destination vreg to set, the
// condition code register to branch on, the true/false values to
// select between, and a branch opcode to use.
@@ -12738,7 +12785,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
// ...
// TrueVal = ...
// cmpTY ccX, r1, r2
- // bCC copy1MBB
+ // bCC sinkMBB
// fallthrough --> copy0MBB
MachineBasicBlock *thisMBB = BB;
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
@@ -12747,6 +12794,12 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
+ // Set the call frame size on entry to the new basic blocks.
+ // See https://reviews.llvm.org/D156113.
+ unsigned CallFrameSize = TII->getCallFrameSizeAt(MI);
+ copy0MBB->setCallFrameSize(CallFrameSize);
+ sinkMBB->setCallFrameSize(CallFrameSize);
+
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
std::next(MachineBasicBlock::iterator(MI)), BB->end());
@@ -12756,15 +12809,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
- if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
- MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
- MI.getOpcode() == PPC::SELECT_F16 ||
- MI.getOpcode() == PPC::SELECT_SPE4 ||
- MI.getOpcode() == PPC::SELECT_SPE ||
- MI.getOpcode() == PPC::SELECT_VRRC ||
- MI.getOpcode() == PPC::SELECT_VSFRC ||
- MI.getOpcode() == PPC::SELECT_VSSRC ||
- MI.getOpcode() == PPC::SELECT_VSRC) {
+ if (IsSelect(MI)) {
BuildMI(BB, dl, TII->get(PPC::BC))
.addReg(MI.getOperand(1).getReg())
.addMBB(sinkMBB);
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 509a22b0bbf4..0bdfdcd15441 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -370,11 +370,22 @@ namespace llvm {
/// G8RC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY
/// Op that combines two register copies of TOC entries
/// (region handle into R3 and variable offset into R4) followed by a
- /// GET_TLS_ADDR node which will be expanded to a call to __get_tls_addr.
+ /// GET_TLS_ADDR node which will be expanded to a call to .__tls_get_addr.
/// This node is used in 64-bit mode as well (in which case the result is
/// G8RC and inputs are X3/X4).
TLSGD_AIX,
+ /// %x3 = GET_TLS_MOD_AIX _$TLSML - For the AIX local-dynamic TLS model,
+ /// produces a call to .__tls_get_mod(_$TLSML\@ml).
+ GET_TLS_MOD_AIX,
+
+ /// [GP|G8]RC = TLSLD_AIX, TOC_ENTRY(module handle)
+ /// Op that requires a single input of the module handle TOC entry in R3,
+ /// and generates a GET_TLS_MOD_AIX node which will be expanded into a call
+ /// to .__tls_get_mod. This node is used in both 32-bit and 64-bit modes.
+ /// The only difference is the register class.
+ TLSLD_AIX,
+
/// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS
/// model, produces an ADDIS8 instruction that adds the GOT base
/// register to sym\@got\@tlsld\@ha.
diff --git a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index 0322bb37b1fd..2949d58ab664 100644
--- a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -1557,12 +1557,19 @@ def GETtlsldADDRPCREL : GETtlsldADDRPseudo <"#GETtlsldADDRPCREL">;
// so we don't need to mark it with a size of 8 bytes. Finally, the assembly
// manual mentions this exact set of registers as the clobbered set, others
// are guaranteed not to be clobbered.
-let Defs = [X0,X4,X5,X11,LR8,CR0] in
+let Defs = [X0,X4,X5,X11,LR8,CR0] in {
def GETtlsADDR64AIX :
PPCEmitTimePseudo<(outs g8rc:$rD),(ins g8rc:$offset, g8rc:$handle),
"GETtlsADDR64AIX",
[(set i64:$rD,
(PPCgetTlsAddr i64:$offset, i64:$handle))]>, isPPC64;
+// On AIX, the call to .__tls_get_mod needs one input in X3 for the module handle.
+def GETtlsMOD64AIX :
+ PPCEmitTimePseudo<(outs g8rc:$rD),(ins g8rc:$handle),
+ "GETtlsMOD64AIX",
+ [(set i64:$rD,
+ (PPCgetTlsMod i64:$handle))]>, isPPC64;
+}
}
// Combined op for ADDItlsgdL and GETtlsADDR, late expanded. X3 and LR8
@@ -1595,6 +1602,9 @@ def TLSGDAIX8 :
"#TLSGDAIX8",
[(set i64:$rD,
(PPCTlsgdAIX i64:$offset, i64:$handle))]>;
+// This pseudo is expanded to the call to GETtlsMOD64AIX.
+def TLSLDAIX8 : PPCEmitTimePseudo<(outs g8rc:$rD), (ins g8rc:$handle),
+ "#TLSLDAIX8", [(set i64:$rD, (PPCTlsldAIX i64:$handle))]>;
// Combined op for ADDItlsldL and GETtlsADDR, late expanded. X3 and LR8
// are true defines, while the rest of the Defs are clobbers.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 68cc76a98ff8..1c610b269d32 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2965,6 +2965,7 @@ PPCInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
{MO_PCREL_OPT_FLAG, "ppc-opt-pcrel"},
{MO_TLSGD_FLAG, "ppc-tlsgd"},
{MO_TPREL_FLAG, "ppc-tprel"},
+ {MO_TLSLDM_FLAG, "ppc-tlsldm"},
{MO_TLSLD_FLAG, "ppc-tlsld"},
{MO_TLSGDM_FLAG, "ppc-tlsgdm"},
{MO_GOT_TLSGD_PCREL_FLAG, "ppc-got-tlsgd-pcrel"},
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index 3abd97f2c38c..82da1a3c3059 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -213,12 +213,14 @@ def PPCaddTls : SDNode<"PPCISD::ADD_TLS", SDTIntBinOp, []>;
def PPCaddisTlsgdHA : SDNode<"PPCISD::ADDIS_TLSGD_HA", SDTIntBinOp>;
def PPCaddiTlsgdL : SDNode<"PPCISD::ADDI_TLSGD_L", SDTIntBinOp>;
def PPCgetTlsAddr : SDNode<"PPCISD::GET_TLS_ADDR", SDTIntBinOp>;
+def PPCgetTlsMod : SDNode<"PPCISD::GET_TLS_MOD_AIX", SDTIntUnaryOp>;
def PPCgetTpointer : SDNode<"PPCISD::GET_TPOINTER", SDTIntLeaf, []>;
def PPCaddiTlsgdLAddr : SDNode<"PPCISD::ADDI_TLSGD_L_ADDR",
SDTypeProfile<1, 3, [
SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>, SDTCisInt<0> ]>>;
def PPCTlsgdAIX : SDNode<"PPCISD::TLSGD_AIX", SDTIntBinOp>;
+def PPCTlsldAIX : SDNode<"PPCISD::TLSLD_AIX", SDTIntUnaryOp>;
def PPCaddisTlsldHA : SDNode<"PPCISD::ADDIS_TLSLD_HA", SDTIntBinOp>;
def PPCaddiTlsldL : SDNode<"PPCISD::ADDI_TLSLD_L", SDTIntBinOp>;
def PPCgetTlsldAddr : SDNode<"PPCISD::GET_TLSLD_ADDR", SDTIntBinOp>;
@@ -3249,11 +3251,16 @@ def GETtlsADDR32 : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc:$reg, tlsgd32:$s
// The rest of the Defs are the exact set of registers that will be clobbered by
// the call.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
- Defs = [R0,R4,R5,R11,LR,CR0] in
+ Defs = [R0,R4,R5,R11,LR,CR0] in {
def GETtlsADDR32AIX : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc:$offset, gprc:$handle),
"GETtlsADDR32AIX",
[(set i32:$rD,
(PPCgetTlsAddr i32:$offset, i32:$handle))]>;
+def GETtlsMOD32AIX : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc:$handle),
+ "GETtlsMOD32AIX",
+ [(set i32:$rD,
+ (PPCgetTlsMod i32:$handle))]>;
+}
// For local-exec accesses on 32-bit AIX, a call to .__get_tpointer is
// generated to retrieve the thread pointer. GETtlsTpointer32AIX clobbers both
@@ -3293,6 +3300,9 @@ def TLSGDAIX : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc:$offset, gprc:$handl
"#TLSGDAIX",
[(set i32:$rD,
(PPCTlsgdAIX i32:$offset, i32:$handle))]>;
+// This pseudo is expanded to the call to GETtlsMOD32AIX.
+def TLSLDAIX : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc:$handle),
+ "#TLSLDAIX", [(set i32:$rD, (PPCTlsldAIX i32:$handle))]>;
// LR is a true define, while the rest of the Defs are clobbers. R3 is
// explicitly defined when this op is created, so not mentioned here.
let hasExtraSrcRegAllocReq = 1, hasExtraDefRegAllocReq = 1,
diff --git a/llvm/lib/Target/PowerPC/PPCTLSDynamicCall.cpp b/llvm/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
index 9518d5347065..147438dfedd8 100644
--- a/llvm/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
@@ -48,9 +48,15 @@ protected:
bool processBlock(MachineBasicBlock &MBB) {
bool Changed = false;
bool NeedFence = true;
- bool Is64Bit = MBB.getParent()->getSubtarget<PPCSubtarget>().isPPC64();
- bool IsAIX = MBB.getParent()->getSubtarget<PPCSubtarget>().isAIXABI();
+ const PPCSubtarget &Subtarget =
+ MBB.getParent()->getSubtarget<PPCSubtarget>();
+ bool Is64Bit = Subtarget.isPPC64();
+ bool IsAIX = Subtarget.isAIXABI();
+ bool IsLargeModel =
+ Subtarget.getTargetMachine().getCodeModel() == CodeModel::Large;
bool IsPCREL = false;
+ MachineFunction *MF = MBB.getParent();
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
I != IE;) {
@@ -59,13 +65,16 @@ protected:
// There are a number of slight differences in code generation
// when we call .__get_tpointer (32-bit AIX TLS).
bool IsTLSTPRelMI = MI.getOpcode() == PPC::GETtlsTpointer32AIX;
+ bool IsTLSLDAIXMI = (MI.getOpcode() == PPC::TLSLDAIX8 ||
+ MI.getOpcode() == PPC::TLSLDAIX);
if (MI.getOpcode() != PPC::ADDItlsgdLADDR &&
MI.getOpcode() != PPC::ADDItlsldLADDR &&
MI.getOpcode() != PPC::ADDItlsgdLADDR32 &&
MI.getOpcode() != PPC::ADDItlsldLADDR32 &&
MI.getOpcode() != PPC::TLSGDAIX &&
- MI.getOpcode() != PPC::TLSGDAIX8 && !IsTLSTPRelMI && !IsPCREL) {
+ MI.getOpcode() != PPC::TLSGDAIX8 && !IsTLSTPRelMI && !IsPCREL &&
+ !IsTLSLDAIXMI) {
// Although we create ADJCALLSTACKDOWN and ADJCALLSTACKUP
// as scheduling fences, we skip creating fences if we already
// have existing ADJCALLSTACKDOWN/UP to avoid nesting,
@@ -109,6 +118,16 @@ protected:
Opc1 = PPC::ADDItlsldL32;
Opc2 = PPC::GETtlsldADDR32;
break;
+ case PPC::TLSLDAIX:
+ // TLSLDAIX is expanded to one copy and GET_TLS_MOD, so we only set
+ // Opc2 here.
+ Opc2 = PPC::GETtlsMOD32AIX;
+ break;
+ case PPC::TLSLDAIX8:
+ // TLSLDAIX8 is expanded to one copy and GET_TLS_MOD, so we only set
+ // Opc2 here.
+ Opc2 = PPC::GETtlsMOD64AIX;
+ break;
case PPC::TLSGDAIX8:
// TLSGDAIX8 is expanded to two copies and GET_TLS_ADDR, so we only
// set Opc2 here.
@@ -145,9 +164,97 @@ protected:
.addImm(0);
if (IsAIX) {
- // The variable offset and region handle are copied in r4 and r3. The
- // copies are followed by GETtlsADDR32AIX/GETtlsADDR64AIX.
- if (!IsTLSTPRelMI) {
+ if (IsTLSLDAIXMI) {
+ // The relative order between the node that loads the variable
+ // offset from the TOC, and the .__tls_get_mod node is being tuned
+ // here. It is better to put the variable offset TOC load after the
+ // call, since this node can use clobbers r4/r5.
+ // Search for the pattern of the two nodes that load from the TOC
+ // (either for the variable offset or for the module handle), and
+ // then move the variable offset TOC load right before the node that
+ // uses the OutReg of the .__tls_get_mod node.
+ unsigned LDTocOp =
+ Is64Bit ? (IsLargeModel ? PPC::LDtocL : PPC::LDtoc)
+ : (IsLargeModel ? PPC::LWZtocL : PPC::LWZtoc);
+ if (!RegInfo.use_empty(OutReg)) {
+ std::set<MachineInstr *> Uses;
+ // Collect all instructions that use the OutReg.
+ for (MachineOperand &MO : RegInfo.use_operands(OutReg))
+ Uses.insert(MO.getParent());
+ // Find the first user (e.g.: lwax/stfdx) of the OutReg within the
+ // current BB.
+ MachineBasicBlock::iterator UseIter = MBB.begin();
+ for (MachineBasicBlock::iterator IE = MBB.end(); UseIter != IE;
+ ++UseIter)
+ if (Uses.count(&*UseIter))
+ break;
+
+ // Additional handling is required when UserIter (the first user
+ // of OutReg) is pointing to a valid node that loads from the TOC.
+ // Check the pattern and do the movement if the pattern matches.
+ if (UseIter != MBB.end()) {
+ // Collect all associated nodes that load from the TOC. Use
+ // hasOneDef() to guard against unexpected scenarios.
+ std::set<MachineInstr *> LoadFromTocs;
+ for (MachineOperand &MO : UseIter->operands())
+ if (MO.isReg() && MO.isUse()) {
+ Register MOReg = MO.getReg();
+ if (RegInfo.hasOneDef(MOReg)) {
+ MachineInstr *Temp =
+ RegInfo.getOneDef(MOReg)->getParent();
+ // For the current TLSLDAIX node, get the corresponding
+ // node that loads from the TOC for the InReg. Otherwise,
+ // Temp probably pointed to the variable offset TOC load
+ // we would like to move.
+ if (Temp == &MI && RegInfo.hasOneDef(InReg))
+ Temp = RegInfo.getOneDef(InReg)->getParent();
+ if (Temp->getOpcode() == LDTocOp)
+ LoadFromTocs.insert(Temp);
+ } else {
+ // FIXME: analyze this scenario if there is one.
+ LoadFromTocs.clear();
+ break;
+ }
+ }
+
+ // Check the two nodes that loaded from the TOC: one should be
+ // "_$TLSML", and the other will be moved before the node that
+ // uses the OutReg of the .__tls_get_mod node.
+ if (LoadFromTocs.size() == 2) {
+ MachineBasicBlock::iterator TLSMLIter = MBB.end();
+ MachineBasicBlock::iterator OffsetIter = MBB.end();
+ // Make sure the two nodes that loaded from the TOC are within
+ // the current BB, and that one of them is from the "_$TLSML"
+ // pseudo symbol, while the other is from the variable.
+ for (MachineBasicBlock::iterator I = MBB.begin(),
+ IE = MBB.end();
+ I != IE; ++I)
+ if (LoadFromTocs.count(&*I)) {
+ MachineOperand MO = I->getOperand(1);
+ if (MO.isGlobal() && MO.getGlobal()->hasName() &&
+ MO.getGlobal()->getName() == "_$TLSML")
+ TLSMLIter = I;
+ else
+ OffsetIter = I;
+ }
+ // Perform the movement when the desired scenario has been
+ // identified, which should be when both of the iterators are
+ // valid.
+ if (TLSMLIter != MBB.end() && OffsetIter != MBB.end())
+ OffsetIter->moveBefore(&*UseIter);
+ }
+ }
+ }
+ // The module-handle is copied into r3. The copy is followed by
+ // GETtlsMOD32AIX/GETtlsMOD64AIX.
+ BuildMI(MBB, I, DL, TII->get(TargetOpcode::COPY), GPR3)
+ .addReg(InReg);
+ // The call to .__tls_get_mod.
+ BuildMI(MBB, I, DL, TII->get(Opc2), GPR3).addReg(GPR3);
+ } else if (!IsTLSTPRelMI) {
+ // The variable offset and region handle (for TLSGD) are copied in
+ // r4 and r3. The copies are followed by
+ // GETtlsADDR32AIX/GETtlsADDR64AIX.
BuildMI(MBB, I, DL, TII->get(TargetOpcode::COPY), GPR4)
.addReg(MI.getOperand(1).getReg());
BuildMI(MBB, I, DL, TII->get(TargetOpcode::COPY), GPR3)
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
index b375e8bb4b8f..cdf7c048a4bf 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
@@ -31,12 +31,13 @@ using namespace llvm;
// This part is for ELF object output.
RISCVTargetELFStreamer::RISCVTargetELFStreamer(MCStreamer &S,
const MCSubtargetInfo &STI)
- : RISCVTargetStreamer(S), CurrentVendor("riscv"), STI(STI) {
+ : RISCVTargetStreamer(S), CurrentVendor("riscv") {
MCAssembler &MCA = getStreamer().getAssembler();
const FeatureBitset &Features = STI.getFeatureBits();
auto &MAB = static_cast<RISCVAsmBackend &>(MCA.getBackend());
setTargetABI(RISCVABI::computeTargetABI(STI.getTargetTriple(), Features,
MAB.getTargetOptions().getABIName()));
+ setFlagsFromFeatures(STI);
// `j label` in `.option norelax; j label; .option relax; ...; label:` needs a
// relocation to ensure the jump target is correct after linking. This is due
// to a limitation that shouldForceRelocation has to make the decision upfront
@@ -91,10 +92,9 @@ void RISCVTargetELFStreamer::finish() {
unsigned EFlags = MCA.getELFHeaderEFlags();
- if (STI.hasFeature(RISCV::FeatureStdExtC) ||
- STI.hasFeature(RISCV::FeatureStdExtZca))
+ if (hasRVC())
EFlags |= ELF::EF_RISCV_RVC;
- if (STI.hasFeature(RISCV::FeatureStdExtZtso))
+ if (hasTSO())
EFlags |= ELF::EF_RISCV_TSO;
switch (ABI) {
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
index a6f54bf67b5d..e8f29cd8449b 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
@@ -46,7 +46,6 @@ private:
StringRef CurrentVendor;
MCSection *AttributeSection = nullptr;
- const MCSubtargetInfo &STI;
void emitAttribute(unsigned Attribute, unsigned Value) override;
void emitTextAttribute(unsigned Attribute, StringRef String) override;
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
index 071a3a5aa5d6..4a4b1e13c2b9 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
@@ -48,6 +48,12 @@ void RISCVTargetStreamer::setTargetABI(RISCVABI::ABI ABI) {
TargetABI = ABI;
}
+void RISCVTargetStreamer::setFlagsFromFeatures(const MCSubtargetInfo &STI) {
+ HasRVC = STI.hasFeature(RISCV::FeatureStdExtC) ||
+ STI.hasFeature(RISCV::FeatureStdExtZca);
+ HasTSO = STI.hasFeature(RISCV::FeatureStdExtZtso);
+}
+
void RISCVTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI,
bool EmitStackAlign) {
if (EmitStackAlign) {
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
index 070e72fb157a..cb8bc21cb635 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
@@ -33,6 +33,8 @@ struct RISCVOptionArchArg {
class RISCVTargetStreamer : public MCTargetStreamer {
RISCVABI::ABI TargetABI = RISCVABI::ABI_Unknown;
+ bool HasRVC = false;
+ bool HasTSO = false;
public:
RISCVTargetStreamer(MCStreamer &S);
@@ -58,6 +60,9 @@ public:
void emitTargetAttributes(const MCSubtargetInfo &STI, bool EmitStackAlign);
void setTargetABI(RISCVABI::ABI ABI);
RISCVABI::ABI getTargetABI() const { return TargetABI; }
+ void setFlagsFromFeatures(const MCSubtargetInfo &STI);
+ bool hasRVC() const { return HasRVC; }
+ bool hasTSO() const { return HasTSO; }
};
// This part is for ascii assembly output
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index bcaf44777494..9773b2998c7d 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -185,7 +185,7 @@ def HasStdExtZabha : Predicate<"Subtarget->hasStdExtZabha()">,
"'Zabha' (Byte and Halfword Atomic Memory Operations)">;
def FeatureStdExtZacas
- : SubtargetFeature<"experimental-zacas", "HasStdExtZacas", "true",
+ : SubtargetFeature<"zacas", "HasStdExtZacas", "true",
"'Zacas' (Atomic Compare-And-Swap Instructions)">;
def HasStdExtZacas : Predicate<"Subtarget->hasStdExtZacas()">,
AssemblerPredicate<(all_of FeatureStdExtZacas),
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e34750d05730..e647f56416bf 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1999,6 +1999,10 @@ bool RISCVTargetLowering::canSplatOperand(Instruction *I, int Operand) const {
case Intrinsic::vp_sdiv:
case Intrinsic::vp_urem:
case Intrinsic::vp_srem:
+ case Intrinsic::ssub_sat:
+ case Intrinsic::vp_ssub_sat:
+ case Intrinsic::usub_sat:
+ case Intrinsic::vp_usub_sat:
return Operand == 1;
// These intrinsics are commutative.
case Intrinsic::vp_add:
@@ -2010,6 +2014,18 @@ bool RISCVTargetLowering::canSplatOperand(Instruction *I, int Operand) const {
case Intrinsic::vp_fmul:
case Intrinsic::vp_icmp:
case Intrinsic::vp_fcmp:
+ case Intrinsic::smin:
+ case Intrinsic::vp_smin:
+ case Intrinsic::umin:
+ case Intrinsic::vp_umin:
+ case Intrinsic::smax:
+ case Intrinsic::vp_smax:
+ case Intrinsic::umax:
+ case Intrinsic::vp_umax:
+ case Intrinsic::sadd_sat:
+ case Intrinsic::vp_sadd_sat:
+ case Intrinsic::uadd_sat:
+ case Intrinsic::vp_uadd_sat:
// These intrinsics have 'vr' versions.
case Intrinsic::vp_sub:
case Intrinsic::vp_fsub:
@@ -9728,8 +9744,15 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
auto [Mask, VL] = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
+ ElementCount EndIndex =
+ ElementCount::getScalable(RemIdx) + SubVecVT.getVectorElementCount();
VL = computeVLMax(SubVecVT, DL, DAG);
+ // Use tail agnostic policy if we're inserting over InterSubVT's tail.
+ unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ if (EndIndex == InterSubVT.getVectorElementCount())
+ Policy = RISCVII::TAIL_AGNOSTIC;
+
// If we're inserting into the lowest elements, use a tail undisturbed
// vmv.v.v.
if (RemIdx == 0) {
@@ -9743,7 +9766,7 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
SubVec = getVSlideup(DAG, Subtarget, DL, InterSubVT, AlignedExtract, SubVec,
- SlideupAmt, Mask, VL);
+ SlideupAmt, Mask, VL, Policy);
}
// If required, insert this subvector back into the correct vector register.
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 9d1f01dffaaf..a68674b221d3 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -30,6 +30,8 @@
using namespace llvm;
+static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
+ cl::init(false), cl::Hidden);
static cl::opt<bool>
DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
cl::init(false),
@@ -712,7 +714,10 @@ void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
unsigned
RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
- return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 1 : 0;
+ return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() &&
+ !DisableCostPerUse
+ ? 1
+ : 0;
}
// Add two address hints to improve chances of being able to use a compressed
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 381e0082c49b..53838d6e5401 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -508,19 +508,23 @@ defvar VM8VTs = [vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t,
vfloat16m8_t, vbfloat16m8_t,
vfloat32m8_t, vfloat64m8_t];
+// We reverse the order of last 8 registers so that we don't needlessly prevent
+// allocation of higher lmul register groups while still putting v0 last in the
+// allocation order.
+
def VR : VReg<!listconcat(VM1VTs, VMaskVTs),
(add (sequence "V%u", 8, 31),
- (sequence "V%u", 0, 7)), 1>;
+ (sequence "V%u", 7, 0)), 1>;
def VRNoV0 : VReg<!listconcat(VM1VTs, VMaskVTs), (sub VR, V0), 1>;
def VRM2 : VReg<VM2VTs, (add (sequence "V%uM2", 8, 31, 2),
- (sequence "V%uM2", 0, 7, 2)), 2>;
+ (sequence "V%uM2", 6, 0, 2)), 2>;
def VRM2NoV0 : VReg<VM2VTs, (sub VRM2, V0M2), 2>;
def VRM4 : VReg<VM4VTs, (add V8M4, V12M4, V16M4, V20M4,
- V24M4, V28M4, V0M4, V4M4), 4>;
+ V24M4, V28M4, V4M4, V0M4), 4>;
def VRM4NoV0 : VReg<VM4VTs, (sub VRM4, V0M4), 4>;
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
index 040cec426740..0430d603620b 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
@@ -198,6 +198,7 @@ def SiFive7Model : SchedMachineModel {
let LoadLatency = 3;
let MispredictPenalty = 3;
let CompleteModel = 0;
+ let PostRAScheduler = true;
let EnableIntervals = true;
let UnsupportedFeatures = [HasStdExtZbkb, HasStdExtZbkc, HasStdExtZbkx,
HasStdExtZcmt, HasStdExtZknd, HasStdExtZkne,
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index f04968d82e86..2e4e69fb4f92 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -488,9 +488,8 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
// vmv.v.x v8, a0
// vmsne.vi v0, v8, 0
return LT.first *
- (TLI->getLMULCost(LT.second) + // FIXME: should be 1 for andi
- getRISCVInstructionCost({RISCV::VMV_V_X, RISCV::VMSNE_VI},
- LT.second, CostKind));
+ (1 + getRISCVInstructionCost({RISCV::VMV_V_X, RISCV::VMSNE_VI},
+ LT.second, CostKind));
}
// Example sequence:
// vsetivli zero, 2, e8, mf8, ta, mu (ignored)
@@ -502,11 +501,10 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
// vmsne.vi v0, v8, 0
return LT.first *
- (TLI->getLMULCost(LT.second) + // FIXME: this should be 1 for andi
- getRISCVInstructionCost({RISCV::VMV_V_I, RISCV::VMERGE_VIM,
- RISCV::VMV_X_S, RISCV::VMV_V_X,
- RISCV::VMSNE_VI},
- LT.second, CostKind));
+ (1 + getRISCVInstructionCost({RISCV::VMV_V_I, RISCV::VMERGE_VIM,
+ RISCV::VMV_X_S, RISCV::VMV_V_X,
+ RISCV::VMSNE_VI},
+ LT.second, CostKind));
}
if (HasScalar) {
@@ -551,9 +549,12 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
if (LT.second.isFixedLengthVector())
// vrsub.vi has a 5 bit immediate field, otherwise an li suffices
LenCost = isInt<5>(LT.second.getVectorNumElements() - 1) ? 0 : 1;
- // FIXME: replace the constant `2` below with cost of {VID_V,VRSUB_VX}
+ unsigned Opcodes[] = {RISCV::VID_V, RISCV::VRSUB_VX, RISCV::VRGATHER_VV};
+ if (LT.second.isFixedLengthVector() &&
+ isInt<5>(LT.second.getVectorNumElements() - 1))
+ Opcodes[1] = RISCV::VRSUB_VI;
InstructionCost GatherCost =
- 2 + getRISCVInstructionCost(RISCV::VRGATHER_VV, LT.second, CostKind);
+ getRISCVInstructionCost(Opcodes, LT.second, CostKind);
// Mask operation additionally required extend and truncate
InstructionCost ExtendCost = Tp->getElementType()->isIntegerTy(1) ? 3 : 0;
return LT.first * (LenCost + GatherCost + ExtendCost);
diff --git a/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
index 7a956636831d..be4ec1e9dce2 100644
--- a/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
+++ b/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -57,6 +57,7 @@ class SparcOperand;
class SparcAsmParser : public MCTargetAsmParser {
MCAsmParser &Parser;
+ const MCRegisterInfo &MRI;
enum class TailRelocKind { Load_GOT, Add_TLS, Load_TLS, Call_TLS };
@@ -109,8 +110,7 @@ class SparcAsmParser : public MCTargetAsmParser {
const MCExpr *subExpr);
// returns true if Tok is matched to a register and returns register in RegNo.
- bool matchRegisterName(const AsmToken &Tok, MCRegister &RegNo,
- unsigned &RegKind);
+ MCRegister matchRegisterName(const AsmToken &Tok, unsigned &RegKind);
bool matchSparcAsmModifiers(const MCExpr *&EVal, SMLoc &EndLoc);
@@ -128,9 +128,9 @@ class SparcAsmParser : public MCTargetAsmParser {
public:
SparcAsmParser(const MCSubtargetInfo &sti, MCAsmParser &parser,
- const MCInstrInfo &MII,
- const MCTargetOptions &Options)
- : MCTargetAsmParser(Options, sti, MII), Parser(parser) {
+ const MCInstrInfo &MII, const MCTargetOptions &Options)
+ : MCTargetAsmParser(Options, sti, MII), Parser(parser),
+ MRI(*Parser.getContext().getRegisterInfo()) {
Parser.addAliasForDirective(".half", ".2byte");
Parser.addAliasForDirective(".uahalf", ".2byte");
Parser.addAliasForDirective(".word", ".4byte");
@@ -156,16 +156,6 @@ public:
Sparc::I0, Sparc::I1, Sparc::I2, Sparc::I3,
Sparc::I4, Sparc::I5, Sparc::I6, Sparc::I7 };
- static const MCPhysReg FloatRegs[32] = {
- Sparc::F0, Sparc::F1, Sparc::F2, Sparc::F3,
- Sparc::F4, Sparc::F5, Sparc::F6, Sparc::F7,
- Sparc::F8, Sparc::F9, Sparc::F10, Sparc::F11,
- Sparc::F12, Sparc::F13, Sparc::F14, Sparc::F15,
- Sparc::F16, Sparc::F17, Sparc::F18, Sparc::F19,
- Sparc::F20, Sparc::F21, Sparc::F22, Sparc::F23,
- Sparc::F24, Sparc::F25, Sparc::F26, Sparc::F27,
- Sparc::F28, Sparc::F29, Sparc::F30, Sparc::F31 };
-
static const MCPhysReg DoubleRegs[32] = {
Sparc::D0, Sparc::D1, Sparc::D2, Sparc::D3,
Sparc::D4, Sparc::D5, Sparc::D6, Sparc::D7,
@@ -182,32 +172,12 @@ public:
Sparc::Q8, Sparc::Q9, Sparc::Q10, Sparc::Q11,
Sparc::Q12, Sparc::Q13, Sparc::Q14, Sparc::Q15 };
- static const MCPhysReg ASRRegs[32] = {
- SP::Y, SP::ASR1, SP::ASR2, SP::ASR3,
- SP::ASR4, SP::ASR5, SP::ASR6, SP::ASR7,
- SP::ASR8, SP::ASR9, SP::ASR10, SP::ASR11,
- SP::ASR12, SP::ASR13, SP::ASR14, SP::ASR15,
- SP::ASR16, SP::ASR17, SP::ASR18, SP::ASR19,
- SP::ASR20, SP::ASR21, SP::ASR22, SP::ASR23,
- SP::ASR24, SP::ASR25, SP::ASR26, SP::ASR27,
- SP::ASR28, SP::ASR29, SP::ASR30, SP::ASR31};
-
static const MCPhysReg IntPairRegs[] = {
Sparc::G0_G1, Sparc::G2_G3, Sparc::G4_G5, Sparc::G6_G7,
Sparc::O0_O1, Sparc::O2_O3, Sparc::O4_O5, Sparc::O6_O7,
Sparc::L0_L1, Sparc::L2_L3, Sparc::L4_L5, Sparc::L6_L7,
Sparc::I0_I1, Sparc::I2_I3, Sparc::I4_I5, Sparc::I6_I7};
- static const MCPhysReg CoprocRegs[32] = {
- Sparc::C0, Sparc::C1, Sparc::C2, Sparc::C3,
- Sparc::C4, Sparc::C5, Sparc::C6, Sparc::C7,
- Sparc::C8, Sparc::C9, Sparc::C10, Sparc::C11,
- Sparc::C12, Sparc::C13, Sparc::C14, Sparc::C15,
- Sparc::C16, Sparc::C17, Sparc::C18, Sparc::C19,
- Sparc::C20, Sparc::C21, Sparc::C22, Sparc::C23,
- Sparc::C24, Sparc::C25, Sparc::C26, Sparc::C27,
- Sparc::C28, Sparc::C29, Sparc::C30, Sparc::C31 };
-
static const MCPhysReg CoprocPairRegs[] = {
Sparc::C0_C1, Sparc::C2_C3, Sparc::C4_C5, Sparc::C6_C7,
Sparc::C8_C9, Sparc::C10_C11, Sparc::C12_C13, Sparc::C14_C15,
@@ -816,8 +786,9 @@ ParseStatus SparcAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
if (getLexer().getKind() != AsmToken::Percent)
return ParseStatus::NoMatch;
Parser.Lex();
- unsigned regKind = SparcOperand::rk_None;
- if (matchRegisterName(Tok, Reg, regKind)) {
+ unsigned RegKind = SparcOperand::rk_None;
+ Reg = matchRegisterName(Tok, RegKind);
+ if (Reg) {
Parser.Lex();
return ParseStatus::Success;
}
@@ -1168,14 +1139,14 @@ ParseStatus SparcAsmParser::parseOperand(OperandVector &Operands,
return ParseStatus::NoMatch;
Parser.Lex(); // eat %
- MCRegister RegNo;
unsigned RegKind;
- if (!matchRegisterName(Parser.getTok(), RegNo, RegKind))
+ MCRegister Reg = matchRegisterName(Parser.getTok(), RegKind);
+ if (!Reg)
return ParseStatus::NoMatch;
Parser.Lex(); // Eat the identifier token.
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer()-1);
- Operands.push_back(SparcOperand::CreateReg(RegNo, RegKind, S, E));
+ Operands.push_back(SparcOperand::CreateReg(Reg, RegKind, S, E));
Res = ParseStatus::Success;
} else {
Res = parseMEMOperand(Operands);
@@ -1261,9 +1232,8 @@ SparcAsmParser::parseSparcAsmOperand(std::unique_ptr<SparcOperand> &Op,
case AsmToken::Percent: {
Parser.Lex(); // Eat the '%'.
- MCRegister Reg;
unsigned RegKind;
- if (matchRegisterName(Parser.getTok(), Reg, RegKind)) {
+ if (MCRegister Reg = matchRegisterName(Parser.getTok(), RegKind)) {
StringRef Name = Parser.getTok().getString();
Parser.Lex(); // Eat the identifier token.
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
@@ -1325,314 +1295,131 @@ ParseStatus SparcAsmParser::parseBranchModifiers(OperandVector &Operands) {
return ParseStatus::Success;
}
-bool SparcAsmParser::matchRegisterName(const AsmToken &Tok, MCRegister &RegNo,
- unsigned &RegKind) {
- int64_t intVal = 0;
- RegNo = 0;
- RegKind = SparcOperand::rk_None;
- if (Tok.is(AsmToken::Identifier)) {
- StringRef name = Tok.getString();
-
- // %fp
- if (name.equals("fp")) {
- RegNo = Sparc::I6;
- RegKind = SparcOperand::rk_IntReg;
- return true;
- }
- // %sp
- if (name.equals("sp")) {
- RegNo = Sparc::O6;
- RegKind = SparcOperand::rk_IntReg;
- return true;
- }
-
- if (name.equals("y")) {
- RegNo = Sparc::Y;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
+#define GET_REGISTER_MATCHER
+#include "SparcGenAsmMatcher.inc"
- if (name.starts_with_insensitive("asr") &&
- !name.substr(3).getAsInteger(10, intVal) && intVal > 0 && intVal < 32) {
- RegNo = ASRRegs[intVal];
- RegKind = SparcOperand::rk_Special;
- return true;
- }
+MCRegister SparcAsmParser::matchRegisterName(const AsmToken &Tok,
+ unsigned &RegKind) {
+ RegKind = SparcOperand::rk_None;
+ if (!Tok.is(AsmToken::Identifier))
+ return SP::NoRegister;
- if (name.equals("fprs")) {
- RegNo = Sparc::ASR6;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
+ StringRef Name = Tok.getString();
+ MCRegister Reg = MatchRegisterName(Name.lower());
+ if (!Reg)
+ Reg = MatchRegisterAltName(Name.lower());
- if (name.equals("icc")) {
- RegNo = Sparc::ICC;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
+ if (Reg) {
+ // Some registers have identical spellings. The generated matcher might
+ // have chosen one or another spelling, e.g. "%fp" or "%i6" might have been
+ // matched to either SP::I6 or SP::I6_I7. Other parts of SparcAsmParser
+ // are not prepared for this, so we do some canonicalization.
- if (name.equals("psr")) {
- RegNo = Sparc::PSR;
+ // See the note in SparcRegisterInfo.td near ASRRegs register class.
+ if (Reg == SP::ASR4 && Name == "tick") {
RegKind = SparcOperand::rk_Special;
- return true;
+ return SP::TICK;
}
- if (name.equals("fsr")) {
- RegNo = Sparc::FSR;
- RegKind = SparcOperand::rk_Special;
- return true;
+ if (MRI.getRegClass(SP::IntRegsRegClassID).contains(Reg)) {
+ RegKind = SparcOperand::rk_IntReg;
+ return Reg;
}
-
- if (name.equals("fq")) {
- RegNo = Sparc::FQ;
- RegKind = SparcOperand::rk_Special;
- return true;
+ if (MRI.getRegClass(SP::FPRegsRegClassID).contains(Reg)) {
+ RegKind = SparcOperand::rk_FloatReg;
+ return Reg;
}
-
- if (name.equals("csr")) {
- RegNo = Sparc::CPSR;
- RegKind = SparcOperand::rk_Special;
- return true;
+ if (MRI.getRegClass(SP::CoprocRegsRegClassID).contains(Reg)) {
+ RegKind = SparcOperand::rk_CoprocReg;
+ return Reg;
}
- if (name.equals("cq")) {
- RegNo = Sparc::CPQ;
- RegKind = SparcOperand::rk_Special;
- return true;
+ // Canonicalize G0_G1 ... G30_G31 etc. to G0 ... G30.
+ if (MRI.getRegClass(SP::IntPairRegClassID).contains(Reg)) {
+ RegKind = SparcOperand::rk_IntReg;
+ return MRI.getSubReg(Reg, SP::sub_even);
}
- if (name.equals("wim")) {
- RegNo = Sparc::WIM;
- RegKind = SparcOperand::rk_Special;
- return true;
+ // Canonicalize D0 ... D15 to F0 ... F30.
+ if (MRI.getRegClass(SP::DFPRegsRegClassID).contains(Reg)) {
+ // D16 ... D31 do not have sub-registers.
+ if (MCRegister SubReg = MRI.getSubReg(Reg, SP::sub_even)) {
+ RegKind = SparcOperand::rk_FloatReg;
+ return SubReg;
+ }
+ RegKind = SparcOperand::rk_DoubleReg;
+ return Reg;
}
- if (name.equals("tbr")) {
- RegNo = Sparc::TBR;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
+ // The generated matcher does not currently return QFP registers.
+ // If it changes, we will need to handle them in a similar way.
+ assert(!MRI.getRegClass(SP::QFPRegsRegClassID).contains(Reg));
- if (name.equals("xcc")) {
- // FIXME:: check 64bit.
- RegNo = Sparc::ICC;
- RegKind = SparcOperand::rk_Special;
- return true;
+ // Canonicalize C0_C1 ... C30_C31 to C0 ... C30.
+ if (MRI.getRegClass(SP::CoprocPairRegClassID).contains(Reg)) {
+ RegKind = SparcOperand::rk_CoprocReg;
+ return MRI.getSubReg(Reg, SP::sub_even);
}
- // %fcc0 - %fcc3
- if (name.starts_with_insensitive("fcc") &&
- !name.substr(3).getAsInteger(10, intVal) && intVal < 4) {
- // FIXME: check 64bit and handle %fcc1 - %fcc3
- RegNo = Sparc::FCC0 + intVal;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
+ // Other registers do not need special handling.
+ RegKind = SparcOperand::rk_Special;
+ return Reg;
+ }
- // %g0 - %g7
- if (name.starts_with_insensitive("g") &&
- !name.substr(1).getAsInteger(10, intVal) && intVal < 8) {
- RegNo = IntRegs[intVal];
- RegKind = SparcOperand::rk_IntReg;
- return true;
- }
- // %o0 - %o7
- if (name.starts_with_insensitive("o") &&
- !name.substr(1).getAsInteger(10, intVal) && intVal < 8) {
- RegNo = IntRegs[8 + intVal];
- RegKind = SparcOperand::rk_IntReg;
- return true;
- }
- if (name.starts_with_insensitive("l") &&
- !name.substr(1).getAsInteger(10, intVal) && intVal < 8) {
- RegNo = IntRegs[16 + intVal];
- RegKind = SparcOperand::rk_IntReg;
- return true;
- }
- if (name.starts_with_insensitive("i") &&
- !name.substr(1).getAsInteger(10, intVal) && intVal < 8) {
- RegNo = IntRegs[24 + intVal];
- RegKind = SparcOperand::rk_IntReg;
- return true;
- }
- // %f0 - %f31
- if (name.starts_with_insensitive("f") &&
- !name.substr(1, 2).getAsInteger(10, intVal) && intVal < 32) {
- RegNo = FloatRegs[intVal];
- RegKind = SparcOperand::rk_FloatReg;
- return true;
- }
- // %f32 - %f62
- if (name.starts_with_insensitive("f") &&
- !name.substr(1, 2).getAsInteger(10, intVal) && intVal >= 32 &&
- intVal <= 62 && (intVal % 2 == 0)) {
- // FIXME: Check V9
- RegNo = DoubleRegs[intVal/2];
- RegKind = SparcOperand::rk_DoubleReg;
- return true;
- }
+ // If we still have no match, try custom parsing.
+ // Not all registers and their spellings are modeled in td files.
- // %r0 - %r31
- if (name.starts_with_insensitive("r") &&
- !name.substr(1, 2).getAsInteger(10, intVal) && intVal < 31) {
- RegNo = IntRegs[intVal];
- RegKind = SparcOperand::rk_IntReg;
- return true;
- }
+ // %r0 - %r31
+ int64_t RegNo = 0;
+ if (Name.starts_with_insensitive("r") &&
+ !Name.substr(1, 2).getAsInteger(10, RegNo) && RegNo < 31) {
+ RegKind = SparcOperand::rk_IntReg;
+ return IntRegs[RegNo];
+ }
- // %c0 - %c31
- if (name.starts_with_insensitive("c") &&
- !name.substr(1).getAsInteger(10, intVal) && intVal < 32) {
- RegNo = CoprocRegs[intVal];
- RegKind = SparcOperand::rk_CoprocReg;
- return true;
- }
+ if (Name.equals("xcc")) {
+ // FIXME:: check 64bit.
+ RegKind = SparcOperand::rk_Special;
+ return SP::ICC;
+ }
- if (name.equals("tpc")) {
- RegNo = Sparc::TPC;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("tnpc")) {
- RegNo = Sparc::TNPC;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("tstate")) {
- RegNo = Sparc::TSTATE;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("tt")) {
- RegNo = Sparc::TT;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("tick")) {
- RegNo = Sparc::TICK;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("tba")) {
- RegNo = Sparc::TBA;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("pstate")) {
- RegNo = Sparc::PSTATE;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("tl")) {
- RegNo = Sparc::TL;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("pil")) {
- RegNo = Sparc::PIL;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("cwp")) {
- RegNo = Sparc::CWP;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("cansave")) {
- RegNo = Sparc::CANSAVE;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("canrestore")) {
- RegNo = Sparc::CANRESTORE;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("cleanwin")) {
- RegNo = Sparc::CLEANWIN;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("otherwin")) {
- RegNo = Sparc::OTHERWIN;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("wstate")) {
- RegNo = Sparc::WSTATE;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("pc")) {
- RegNo = Sparc::ASR5;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("asi")) {
- RegNo = Sparc::ASR3;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("ccr")) {
- RegNo = Sparc::ASR2;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("gl")) {
- RegNo = Sparc::GL;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("ver")) {
- RegNo = Sparc::VER;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
+ // JPS1 extension - aliases for ASRs
+ // Section A.51 - Read State Register
+ if (Name.equals("pcr")) {
+ RegKind = SparcOperand::rk_Special;
+ return SP::ASR16;
+ }
- // JPS1 extension - aliases for ASRs
- // Section A.51 - Read State Register
- if (name.equals("pcr")) {
- RegNo = Sparc::ASR16;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("pic")) {
- RegNo = Sparc::ASR17;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("dcr")) {
- RegNo = Sparc::ASR18;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("gsr")) {
- RegNo = Sparc::ASR19;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("softint")) {
- RegNo = Sparc::ASR22;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("tick_cmpr")) {
- RegNo = Sparc::ASR23;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("stick") || name.equals("sys_tick")) {
- RegNo = Sparc::ASR24;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
- if (name.equals("stick_cmpr") || name.equals("sys_tick_cmpr")) {
- RegNo = Sparc::ASR25;
- RegKind = SparcOperand::rk_Special;
- return true;
- }
+ if (Name.equals("pic")) {
+ RegKind = SparcOperand::rk_Special;
+ return SP::ASR17;
}
- return false;
+ if (Name.equals("dcr")) {
+ RegKind = SparcOperand::rk_Special;
+ return SP::ASR18;
+ }
+ if (Name.equals("gsr")) {
+ RegKind = SparcOperand::rk_Special;
+ return SP::ASR19;
+ }
+ if (Name.equals("softint")) {
+ RegKind = SparcOperand::rk_Special;
+ return SP::ASR22;
+ }
+ if (Name.equals("tick_cmpr")) {
+ RegKind = SparcOperand::rk_Special;
+ return SP::ASR23;
+ }
+ if (Name.equals("stick") || Name.equals("sys_tick")) {
+ RegKind = SparcOperand::rk_Special;
+ return SP::ASR24;
+ }
+ if (Name.equals("stick_cmpr") || Name.equals("sys_tick_cmpr")) {
+ RegKind = SparcOperand::rk_Special;
+ return SP::ASR25;
+ }
+
+ return SP::NoRegister;
}
// Determine if an expression contains a reference to the symbol
@@ -1737,7 +1524,6 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeSparcAsmParser() {
RegisterMCAsmParser<SparcAsmParser> C(getTheSparcelTarget());
}
-#define GET_REGISTER_MATCHER
#define GET_MATCHER_IMPLEMENTATION
#include "SparcGenAsmMatcher.inc"
diff --git a/llvm/lib/Target/Sparc/Sparc.td b/llvm/lib/Target/Sparc/Sparc.td
index 38a59e650f33..45cf985cfa06 100644
--- a/llvm/lib/Target/Sparc/Sparc.td
+++ b/llvm/lib/Target/Sparc/Sparc.td
@@ -99,7 +99,8 @@ include "SparcInstrInfo.td"
def SparcInstrInfo : InstrInfo;
def SparcAsmParser : AsmParser {
- bit ShouldEmitMatchRegisterName = 0;
+ let ShouldEmitMatchRegisterAltName = true;
+ let AllowDuplicateRegisterNames = true;
}
def SparcAsmParserVariant : AsmParserVariant {
diff --git a/llvm/lib/Target/WebAssembly/CMakeLists.txt b/llvm/lib/Target/WebAssembly/CMakeLists.txt
index bb2ccea5c145..f430be2653b4 100644
--- a/llvm/lib/Target/WebAssembly/CMakeLists.txt
+++ b/llvm/lib/Target/WebAssembly/CMakeLists.txt
@@ -43,6 +43,7 @@ add_llvm_target(WebAssemblyCodeGen
WebAssemblyOptimizeLiveIntervals.cpp
WebAssemblyOptimizeReturned.cpp
WebAssemblyPeephole.cpp
+ WebAssemblyRefTypeMem2Local.cpp
WebAssemblyRegisterInfo.cpp
WebAssemblyRegColoring.cpp
WebAssemblyRegNumbering.cpp
diff --git a/llvm/lib/Target/WebAssembly/WebAssembly.h b/llvm/lib/Target/WebAssembly/WebAssembly.h
index 91765ad117bd..1c40addb6d6f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssembly.h
+++ b/llvm/lib/Target/WebAssembly/WebAssembly.h
@@ -30,6 +30,7 @@ ModulePass *createWebAssemblyAddMissingPrototypes();
ModulePass *createWebAssemblyFixFunctionBitcasts();
FunctionPass *createWebAssemblyOptimizeReturned();
FunctionPass *createWebAssemblyLowerRefTypesIntPtrConv();
+FunctionPass *createWebAssemblyRefTypeMem2Local();
// ISel and immediate followup passes.
FunctionPass *createWebAssemblyISelDag(WebAssemblyTargetMachine &TM,
@@ -59,6 +60,7 @@ ModulePass *createWebAssemblyMCLowerPrePass();
// PassRegistry initialization declarations.
void initializeFixFunctionBitcastsPass(PassRegistry &);
void initializeOptimizeReturnedPass(PassRegistry &);
+void initializeWebAssemblyRefTypeMem2LocalPass(PassRegistry &);
void initializeWebAssemblyAddMissingPrototypesPass(PassRegistry &);
void initializeWebAssemblyArgumentMovePass(PassRegistry &);
void initializeWebAssemblyCFGSortPass(PassRegistry &);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 36f067956e63..7c47790d1e35 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -43,8 +43,6 @@ using namespace llvm;
#define DEBUG_TYPE "wasm-lower"
-extern cl::opt<bool> WasmEmitMultiValue;
-
WebAssemblyTargetLowering::WebAssemblyTargetLowering(
const TargetMachine &TM, const WebAssemblySubtarget &STI)
: TargetLowering(TM), Subtarget(&STI) {
@@ -1290,7 +1288,7 @@ bool WebAssemblyTargetLowering::CanLowerReturn(
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext & /*Context*/) const {
// WebAssembly can only handle returning tuples with multivalue enabled
- return (Subtarget->hasMultivalue() && WasmEmitMultiValue) || Outs.size() <= 1;
+ return Subtarget->hasMultivalue() || Outs.size() <= 1;
}
SDValue WebAssemblyTargetLowering::LowerReturn(
@@ -1298,8 +1296,7 @@ SDValue WebAssemblyTargetLowering::LowerReturn(
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const {
- assert(((Subtarget->hasMultivalue() && WasmEmitMultiValue) ||
- Outs.size() <= 1) &&
+ assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
"MVP WebAssembly can only return up to one value");
if (!callingConvSupported(CallConv))
fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
index b969b8370a3e..1e959111a4db 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
@@ -22,8 +22,6 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
-extern cl::opt<bool> WasmEmitMultiValue;
-
WebAssemblyFunctionInfo::~WebAssemblyFunctionInfo() = default; // anchor.
MachineFunctionInfo *WebAssemblyFunctionInfo::clone(
@@ -73,8 +71,7 @@ void llvm::computeSignatureVTs(const FunctionType *Ty,
MVT PtrVT = MVT::getIntegerVT(TM.createDataLayout().getPointerSizeInBits());
if (Results.size() > 1 &&
- (!TM.getSubtarget<WebAssemblySubtarget>(ContextFunc).hasMultivalue() ||
- !WasmEmitMultiValue)) {
+ !TM.getSubtarget<WebAssemblySubtarget>(ContextFunc).hasMultivalue()) {
// WebAssembly can't lower returns of multiple values without demoting to
// sret unless multivalue is enabled (see
// WebAssemblyTargetLowering::CanLowerReturn). So replace multiple return
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRefTypeMem2Local.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRefTypeMem2Local.cpp
new file mode 100644
index 000000000000..d3c60ee289df
--- /dev/null
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRefTypeMem2Local.cpp
@@ -0,0 +1,91 @@
+//=== WebAssemblyRefTypeMem2Local.cpp - WebAssembly RefType Mem2Local -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Assign reference type allocas to local addrspace (addrspace(1)) so that
+/// their loads and stores can be lowered to local.gets/local.sets.
+///
+//===----------------------------------------------------------------------===//
+
+#include "Utils/WasmAddressSpaces.h"
+#include "Utils/WebAssemblyTypeUtilities.h"
+#include "WebAssembly.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-ref-type-mem2local"
+
+namespace {
+class WebAssemblyRefTypeMem2Local final
+ : public FunctionPass,
+ public InstVisitor<WebAssemblyRefTypeMem2Local> {
+ StringRef getPassName() const override {
+ return "WebAssembly Reference Types Memory to Local";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ FunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnFunction(Function &F) override;
+ bool Changed = false;
+
+public:
+ static char ID;
+ WebAssemblyRefTypeMem2Local() : FunctionPass(ID) {}
+
+ void visitAllocaInst(AllocaInst &AI);
+};
+} // End anonymous namespace
+
+char WebAssemblyRefTypeMem2Local::ID = 0;
+INITIALIZE_PASS(WebAssemblyRefTypeMem2Local, DEBUG_TYPE,
+ "Assign reference type allocas to local address space", true,
+ false)
+
+FunctionPass *llvm::createWebAssemblyRefTypeMem2Local() {
+ return new WebAssemblyRefTypeMem2Local();
+}
+
+void WebAssemblyRefTypeMem2Local::visitAllocaInst(AllocaInst &AI) {
+ if (WebAssembly::isWebAssemblyReferenceType(AI.getAllocatedType())) {
+ Changed = true;
+ IRBuilder<> IRB(AI.getContext());
+ IRB.SetInsertPoint(&AI);
+ auto *NewAI = IRB.CreateAlloca(AI.getAllocatedType(),
+ WebAssembly::WASM_ADDRESS_SPACE_VAR, nullptr,
+ AI.getName() + ".var");
+
+ // The below is basically equivalent to AI.replaceAllUsesWith(NewAI), but we
+ // cannot use it because it requires the old and new types be the same,
+ // which is not true here because the address spaces are different.
+ if (AI.hasValueHandle())
+ ValueHandleBase::ValueIsRAUWd(&AI, NewAI);
+ if (AI.isUsedByMetadata())
+ ValueAsMetadata::handleRAUW(&AI, NewAI);
+ while (!AI.materialized_use_empty()) {
+ Use &U = *AI.materialized_use_begin();
+ U.set(NewAI);
+ }
+
+ AI.eraseFromParent();
+ }
+}
+
+bool WebAssemblyRefTypeMem2Local::runOnFunction(Function &F) {
+ LLVM_DEBUG(dbgs() << "********** WebAssembly RefType Mem2Local **********\n"
+ "********** Function: "
+ << F.getName() << '\n');
+
+ visit(F);
+ return Changed;
+}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
index 2a84c90c8960..3e2e029695ab 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
@@ -24,8 +24,6 @@
using namespace llvm;
-extern cl::opt<bool> WasmEmitMultiValue;
-
namespace {
enum RuntimeLibcallSignature {
@@ -696,7 +694,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(PtrTy);
break;
case i64_i64_func_f32:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -705,7 +703,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::F32);
break;
case i64_i64_func_f64:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -714,7 +712,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::F64);
break;
case i16_i16_func_i16_i16:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I32);
Rets.push_back(wasm::ValType::I32);
} else {
@@ -724,7 +722,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I32);
break;
case i32_i32_func_i32_i32:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I32);
Rets.push_back(wasm::ValType::I32);
} else {
@@ -734,7 +732,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I32);
break;
case i64_i64_func_i64_i64:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -744,7 +742,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I64);
break;
case i64_i64_func_i64_i64_i64_i64:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -756,7 +754,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I64);
break;
case i64_i64_func_i64_i64_i64_i64_iPTR:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -769,7 +767,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(PtrTy);
break;
case i64_i64_i64_i64_func_i64_i64_i64_i64:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
@@ -783,7 +781,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I64);
break;
case i64_i64_func_i64_i64_i32:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -853,7 +851,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I64);
break;
case i64_i64_func_i64_i64_i64_i64_i64_i64:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -867,7 +865,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I64);
break;
case i64_i64_func_i32:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -876,7 +874,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I32);
break;
case i64_i64_func_i64:
- if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
+ if (Subtarget.hasMultivalue()) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
index b2f7ee970a73..4d4cae110148 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -54,15 +54,6 @@ static cl::opt<bool> WasmDisableFixIrreducibleControlFlowPass(
" irreducible control flow optimization pass"),
cl::init(false));
-// A temporary option to control emission of multivalue until multivalue
-// implementation is stable enough. We currently don't emit multivalue by
-// default even if the feature section allows it.
-// TODO Stabilize multivalue and delete this option
-cl::opt<bool>
- WasmEmitMultiValue("wasm-emit-multivalue", cl::Hidden,
- cl::desc("WebAssembly: Emit multivalue in the backend"),
- cl::init(false));
-
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeWebAssemblyTarget() {
// Register the target.
RegisterTargetMachine<WebAssemblyTargetMachine> X(
@@ -77,6 +68,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeWebAssemblyTarget() {
initializeLowerGlobalDtorsLegacyPassPass(PR);
initializeFixFunctionBitcastsPass(PR);
initializeOptimizeReturnedPass(PR);
+ initializeWebAssemblyRefTypeMem2LocalPass(PR);
initializeWebAssemblyArgumentMovePass(PR);
initializeWebAssemblySetP2AlignOperandsPass(PR);
initializeWebAssemblyReplacePhysRegsPass(PR);
diff --git a/llvm/lib/Target/X86/X86CompressEVEX.cpp b/llvm/lib/Target/X86/X86CompressEVEX.cpp
index b16ee87487ef..d2aa712772bf 100644
--- a/llvm/lib/Target/X86/X86CompressEVEX.cpp
+++ b/llvm/lib/Target/X86/X86CompressEVEX.cpp
@@ -189,7 +189,7 @@ static bool isRedundantNewDataDest(MachineInstr &MI, const X86Subtarget &ST) {
const MCInstrDesc &Desc = MI.getDesc();
Register Reg0 = MI.getOperand(0).getReg();
const MachineOperand &Op1 = MI.getOperand(1);
- if (!Op1.isReg())
+ if (!Op1.isReg() || X86::getFirstAddrOperandIdx(MI) == 1)
return false;
Register Reg1 = Op1.getReg();
if (Reg1 == Reg0)
diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
index 95c4b02842ac..b9fb3fdb239e 100644
--- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
@@ -61,22 +61,22 @@ public:
}
private:
- void ExpandICallBranchFunnel(MachineBasicBlock *MBB,
+ void expandICallBranchFunnel(MachineBasicBlock *MBB,
MachineBasicBlock::iterator MBBI);
void expandCALL_RVMARKER(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI);
- bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
- bool ExpandMBB(MachineBasicBlock &MBB);
+ bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
+ bool expandMBB(MachineBasicBlock &MBB);
/// This function expands pseudos which affects control flow.
/// It is done in separate pass to simplify blocks navigation in main
- /// pass(calling ExpandMBB).
- bool ExpandPseudosWhichAffectControlFlow(MachineFunction &MF);
+ /// pass(calling expandMBB).
+ bool expandPseudosWhichAffectControlFlow(MachineFunction &MF);
/// Expand X86::VASTART_SAVE_XMM_REGS into set of xmm copying instructions,
/// placed into separate block guarded by check for al register(for SystemV
/// abi).
- void ExpandVastartSaveXmmRegs(
+ void expandVastartSaveXmmRegs(
MachineBasicBlock *EntryBlk,
MachineBasicBlock::iterator VAStartPseudoInstr) const;
};
@@ -87,7 +87,7 @@ char X86ExpandPseudo::ID = 0;
INITIALIZE_PASS(X86ExpandPseudo, DEBUG_TYPE, X86_EXPAND_PSEUDO_NAME, false,
false)
-void X86ExpandPseudo::ExpandICallBranchFunnel(
+void X86ExpandPseudo::expandICallBranchFunnel(
MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI) {
MachineBasicBlock *JTMBB = MBB;
MachineInstr *JTInst = &*MBBI;
@@ -259,12 +259,12 @@ void X86ExpandPseudo::expandCALL_RVMARKER(MachineBasicBlock &MBB,
/// If \p MBBI is a pseudo instruction, this method expands
/// it to the corresponding (sequence of) actual instruction(s).
/// \returns true if \p MBBI has been expanded.
-bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
+bool X86ExpandPseudo::expandMI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
const DebugLoc &DL = MBBI->getDebugLoc();
- bool HasEGPR = STI->hasEGPR();
+#define GET_EGPR_IF_ENABLED(OPC) (STI->hasEGPR() ? OPC##_EVEX : OPC)
switch (Opcode) {
default:
return false;
@@ -468,12 +468,10 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
Register Reg1 = TRI->getSubReg(Reg, X86::sub_mask_1);
auto MIBLo =
- BuildMI(MBB, MBBI, DL,
- TII->get(HasEGPR ? X86::KMOVWkm_EVEX : X86::KMOVWkm))
+ BuildMI(MBB, MBBI, DL, TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWkm)))
.addReg(Reg0, RegState::Define | getDeadRegState(DstIsDead));
auto MIBHi =
- BuildMI(MBB, MBBI, DL,
- TII->get(HasEGPR ? X86::KMOVWkm_EVEX : X86::KMOVWkm))
+ BuildMI(MBB, MBBI, DL, TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWkm)))
.addReg(Reg1, RegState::Define | getDeadRegState(DstIsDead));
for (int i = 0; i < X86::AddrNumOperands; ++i) {
@@ -505,10 +503,10 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
Register Reg0 = TRI->getSubReg(Reg, X86::sub_mask_0);
Register Reg1 = TRI->getSubReg(Reg, X86::sub_mask_1);
- auto MIBLo = BuildMI(MBB, MBBI, DL,
- TII->get(HasEGPR ? X86::KMOVWmk_EVEX : X86::KMOVWmk));
- auto MIBHi = BuildMI(MBB, MBBI, DL,
- TII->get(HasEGPR ? X86::KMOVWmk_EVEX : X86::KMOVWmk));
+ auto MIBLo =
+ BuildMI(MBB, MBBI, DL, TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWmk)));
+ auto MIBHi =
+ BuildMI(MBB, MBBI, DL, TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWmk)));
for (int i = 0; i < X86::AddrNumOperands; ++i) {
MIBLo.add(MBBI->getOperand(i));
@@ -554,9 +552,8 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
return true;
}
case TargetOpcode::ICALL_BRANCH_FUNNEL:
- ExpandICallBranchFunnel(&MBB, MBBI);
+ expandICallBranchFunnel(&MBB, MBBI);
return true;
-#define GET_EGPR_IF_ENABLED(OPC) (STI->hasEGPR() ? OPC##_EVEX : OPC)
case X86::PLDTILECFGV: {
MI.setDesc(TII->get(GET_EGPR_IF_ENABLED(X86::LDTILECFG)));
return true;
@@ -634,7 +631,7 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
// | |
// | |
//
-void X86ExpandPseudo::ExpandVastartSaveXmmRegs(
+void X86ExpandPseudo::expandVastartSaveXmmRegs(
MachineBasicBlock *EntryBlk,
MachineBasicBlock::iterator VAStartPseudoInstr) const {
assert(VAStartPseudoInstr->getOpcode() == X86::VASTART_SAVE_XMM_REGS);
@@ -719,27 +716,27 @@ void X86ExpandPseudo::ExpandVastartSaveXmmRegs(
/// Expand all pseudo instructions contained in \p MBB.
/// \returns true if any expansion occurred for \p MBB.
-bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
+bool X86ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
bool Modified = false;
// MBBI may be invalidated by the expansion.
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
MachineBasicBlock::iterator NMBBI = std::next(MBBI);
- Modified |= ExpandMI(MBB, MBBI);
+ Modified |= expandMI(MBB, MBBI);
MBBI = NMBBI;
}
return Modified;
}
-bool X86ExpandPseudo::ExpandPseudosWhichAffectControlFlow(MachineFunction &MF) {
+bool X86ExpandPseudo::expandPseudosWhichAffectControlFlow(MachineFunction &MF) {
// Currently pseudo which affects control flow is only
// X86::VASTART_SAVE_XMM_REGS which is located in Entry block.
// So we do not need to evaluate other blocks.
for (MachineInstr &Instr : MF.front().instrs()) {
if (Instr.getOpcode() == X86::VASTART_SAVE_XMM_REGS) {
- ExpandVastartSaveXmmRegs(&(MF.front()), Instr);
+ expandVastartSaveXmmRegs(&(MF.front()), Instr);
return true;
}
}
@@ -754,10 +751,10 @@ bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
X86FI = MF.getInfo<X86MachineFunctionInfo>();
X86FL = STI->getFrameLowering();
- bool Modified = ExpandPseudosWhichAffectControlFlow(MF);
+ bool Modified = expandPseudosWhichAffectControlFlow(MF);
for (MachineBasicBlock &MBB : MF)
- Modified |= ExpandMBB(MBB);
+ Modified |= expandMBB(MBB);
return Modified;
}
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index be416fb0db06..d914e1b61ab0 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1418,6 +1418,34 @@ bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const {
return !isWin64Prologue(MF) && MF.needsFrameMoves();
}
+/// Return true if an opcode is part of the REP group of instructions
+static bool isOpcodeRep(unsigned Opcode) {
+ switch (Opcode) {
+ case X86::REPNE_PREFIX:
+ case X86::REP_MOVSB_32:
+ case X86::REP_MOVSB_64:
+ case X86::REP_MOVSD_32:
+ case X86::REP_MOVSD_64:
+ case X86::REP_MOVSQ_32:
+ case X86::REP_MOVSQ_64:
+ case X86::REP_MOVSW_32:
+ case X86::REP_MOVSW_64:
+ case X86::REP_PREFIX:
+ case X86::REP_STOSB_32:
+ case X86::REP_STOSB_64:
+ case X86::REP_STOSD_32:
+ case X86::REP_STOSD_64:
+ case X86::REP_STOSQ_32:
+ case X86::REP_STOSQ_64:
+ case X86::REP_STOSW_32:
+ case X86::REP_STOSW_64:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
/// emitPrologue - Push callee-saved registers onto the stack, which
/// automatically adjust the stack pointer. Adjust the stack pointer to allocate
/// space for local variables. Also emit labels used by the exception handler to
@@ -2194,13 +2222,44 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
// in each prologue of interrupt handler function.
//
- // FIXME: Create "cld" instruction only in these cases:
+ // Create "cld" instruction only in these cases:
// 1. The interrupt handling function uses any of the "rep" instructions.
// 2. Interrupt handling function calls another function.
+ // 3. If there are any inline asm blocks, as we do not know what they do
//
- if (Fn.getCallingConv() == CallingConv::X86_INTR)
- BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
- .setMIFlag(MachineInstr::FrameSetup);
+ // TODO: We should also emit cld if we detect the use of std, but as of now,
+ // the compiler does not even emit that instruction or even define it, so in
+ // practice, this would only happen with inline asm, which we cover anyway.
+ if (Fn.getCallingConv() == CallingConv::X86_INTR) {
+ bool NeedsCLD = false;
+
+ for (const MachineBasicBlock &B : MF) {
+ for (const MachineInstr &MI : B) {
+ if (MI.isCall()) {
+ NeedsCLD = true;
+ break;
+ }
+
+ if (isOpcodeRep(MI.getOpcode())) {
+ NeedsCLD = true;
+ break;
+ }
+
+ if (MI.isInlineAsm()) {
+ // TODO: Parse asm for rep instructions or call sites?
+ // For now, let's play it safe and emit a cld instruction
+ // just in case.
+ NeedsCLD = true;
+ break;
+ }
+ }
+ }
+
+ if (NeedsCLD) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
// At this point we know if the function has WinCFI or not.
MF.setHasWinCFI(HasWinCFI);
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index c8f80ced3545..5cbd9ab4dc2d 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2732,13 +2732,15 @@ bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
insertDAGNode(*CurDAG, N, Zext);
SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, ShlAmt);
insertDAGNode(*CurDAG, N, NewShl);
+ CurDAG->ReplaceAllUsesWith(N, NewShl);
+ CurDAG->RemoveDeadNode(N.getNode());
// Convert the shift to scale factor.
AM.Scale = 1 << ShAmtV;
- AM.IndexReg = Zext;
-
- CurDAG->ReplaceAllUsesWith(N, NewShl);
- CurDAG->RemoveDeadNode(N.getNode());
+ // If matchIndexRecursively is not called here,
+ // Zext may be replaced by other nodes but later used to call a builder
+ // method
+ AM.IndexReg = matchIndexRecursively(Zext, AM, Depth + 1);
return false;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 0722c402348e..aea046b119d4 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -77,6 +77,37 @@ static cl::opt<int> ExperimentalPrefInnermostLoopAlignment(
"alignment set by x86-experimental-pref-loop-alignment."),
cl::Hidden);
+static cl::opt<int> BrMergingBaseCostThresh(
+ "x86-br-merging-base-cost", cl::init(1),
+ cl::desc(
+ "Sets the cost threshold for when multiple conditionals will be merged "
+ "into one branch versus be split in multiple branches. Merging "
+ "conditionals saves branches at the cost of additional instructions. "
+ "This value sets the instruction cost limit, below which conditionals "
+ "will be merged, and above which conditionals will be split."),
+ cl::Hidden);
+
+static cl::opt<int> BrMergingLikelyBias(
+ "x86-br-merging-likely-bias", cl::init(0),
+ cl::desc("Increases 'x86-br-merging-base-cost' in cases that it is likely "
+ "that all conditionals will be executed. For example for merging "
+ "the conditionals (a == b && c > d), if its known that a == b is "
+ "likely, then it is likely that if the conditionals are split "
+ "both sides will be executed, so it may be desirable to increase "
+ "the instruction cost threshold."),
+ cl::Hidden);
+
+static cl::opt<int> BrMergingUnlikelyBias(
+ "x86-br-merging-unlikely-bias", cl::init(1),
+ cl::desc(
+ "Decreases 'x86-br-merging-base-cost' in cases that it is unlikely "
+ "that all conditionals will be executed. For example for merging "
+ "the conditionals (a == b && c > d), if its known that a == b is "
+ "unlikely, then it is unlikely that if the conditionals are split "
+ "both sides will be executed, so it may be desirable to decrease "
+ "the instruction cost threshold."),
+ cl::Hidden);
+
static cl::opt<bool> MulConstantOptimization(
"mul-constant-optimization", cl::init(true),
cl::desc("Replace 'mul x, Const' with more effective instructions like "
@@ -3333,6 +3364,24 @@ unsigned X86TargetLowering::preferedOpcodeForCmpEqPiecesOfOperand(
return ISD::SRL;
}
+TargetLoweringBase::CondMergingParams
+X86TargetLowering::getJumpConditionMergingParams(Instruction::BinaryOps Opc,
+ const Value *Lhs,
+ const Value *Rhs) const {
+ using namespace llvm::PatternMatch;
+ int BaseCost = BrMergingBaseCostThresh.getValue();
+ // a == b && a == c is a fast pattern on x86.
+ ICmpInst::Predicate Pred;
+ if (BaseCost >= 0 && Opc == Instruction::And &&
+ match(Lhs, m_ICmp(Pred, m_Value(), m_Value())) &&
+ Pred == ICmpInst::ICMP_EQ &&
+ match(Rhs, m_ICmp(Pred, m_Value(), m_Value())) &&
+ Pred == ICmpInst::ICMP_EQ)
+ BaseCost += 1;
+ return {BaseCost, BrMergingLikelyBias.getValue(),
+ BrMergingUnlikelyBias.getValue()};
+}
+
bool X86TargetLowering::preferScalarizeSplat(SDNode *N) const {
return N->getOpcode() != ISD::FP_EXTEND;
}
@@ -5878,13 +5927,16 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
}
}
- // Peek through trunc/aext/zext.
+ // Peek through trunc/aext/zext/bitcast.
// TODO: aext shouldn't require SM_SentinelZero padding.
// TODO: handle shift of scalars.
unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
while (Scl.getOpcode() == ISD::TRUNCATE ||
Scl.getOpcode() == ISD::ANY_EXTEND ||
- Scl.getOpcode() == ISD::ZERO_EXTEND) {
+ Scl.getOpcode() == ISD::ZERO_EXTEND ||
+ (Scl.getOpcode() == ISD::BITCAST &&
+ Scl.getScalarValueSizeInBits() ==
+ Scl.getOperand(0).getScalarValueSizeInBits())) {
Scl = Scl.getOperand(0);
MinBitsPerElt =
std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
@@ -23419,6 +23471,20 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
return DAG.getBitcast(VT, Result);
}
+ // If the i64 elements are sign-extended enough to be representable as i32
+ // then we can compare the lower i32 bits and splat.
+ if (!FlipSigns && !Invert && DAG.ComputeNumSignBits(Op0) > 32 &&
+ DAG.ComputeNumSignBits(Op1) > 32) {
+ Op0 = DAG.getBitcast(MVT::v4i32, Op0);
+ Op1 = DAG.getBitcast(MVT::v4i32, Op1);
+
+ SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
+ static const int MaskLo[] = {0, 0, 2, 2};
+ SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
+
+ return DAG.getBitcast(VT, Result);
+ }
+
// Since SSE has no unsigned integer comparisons, we need to flip the sign
// bits of the inputs before performing those operations. The lower
// compare is always unsigned.
@@ -41259,6 +41325,20 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
KnownZero = LHSZero;
break;
}
+ case X86ISD::PCMPEQ:
+ case X86ISD::PCMPGT: {
+ APInt LHSUndef, LHSZero;
+ APInt RHSUndef, RHSZero;
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
+ Depth + 1))
+ return true;
+ if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
+ Depth + 1))
+ return true;
+ break;
+ }
case X86ISD::KSHIFTL: {
SDValue Src = Op.getOperand(0);
auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
@@ -41399,7 +41479,9 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
break;
}
case X86ISD::CVTSI2P:
- case X86ISD::CVTUI2P: {
+ case X86ISD::CVTUI2P:
+ case X86ISD::CVTPH2PS:
+ case X86ISD::CVTPS2PH: {
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
APInt SrcUndef, SrcZero;
@@ -55789,6 +55871,15 @@ static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
}
}
+ auto IsExtractFree = [](SDValue V) {
+ V = peekThroughBitcasts(V);
+ if (ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
+ return true;
+ if (ISD::isBuildVectorOfConstantFPSDNodes(V.getNode()))
+ return true;
+ return V.isUndef();
+ };
+
// If we're extracting the lowest subvector and we're the only user,
// we may be able to perform this with a smaller vector width.
unsigned InOpcode = InVec.getOpcode();
@@ -55830,14 +55921,27 @@ static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
}
if (IdxVal == 0 && InOpcode == ISD::TRUNCATE && Subtarget.hasVLX() &&
- (VT.is128BitVector() || VT.is256BitVector())) {
+ (SizeInBits == 128 || SizeInBits == 256)) {
SDValue InVecSrc = InVec.getOperand(0);
unsigned Scale = InVecSrc.getValueSizeInBits() / InSizeInBits;
SDValue Ext = extractSubVector(InVecSrc, 0, DAG, DL, Scale * SizeInBits);
return DAG.getNode(InOpcode, DL, VT, Ext);
}
+ if ((InOpcode == X86ISD::CMPP || InOpcode == X86ISD::PCMPEQ ||
+ InOpcode == X86ISD::PCMPGT) &&
+ (IsExtractFree(InVec.getOperand(0)) ||
+ IsExtractFree(InVec.getOperand(1))) &&
+ SizeInBits == 128) {
+ SDValue Ext0 =
+ extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
+ SDValue Ext1 =
+ extractSubVector(InVec.getOperand(1), IdxVal, DAG, DL, SizeInBits);
+ if (InOpcode == X86ISD::CMPP)
+ return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, InVec.getOperand(2));
+ return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1);
+ }
if (InOpcode == X86ISD::MOVDDUP &&
- (VT.is128BitVector() || VT.is256BitVector())) {
+ (SizeInBits == 128 || SizeInBits == 256)) {
SDValue Ext0 =
extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
return DAG.getNode(InOpcode, DL, VT, Ext0);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index f93c54781846..fe1943b57608 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1150,6 +1150,10 @@ namespace llvm {
bool preferScalarizeSplat(SDNode *N) const override;
+ CondMergingParams
+ getJumpConditionMergingParams(Instruction::BinaryOps Opc, const Value *Lhs,
+ const Value *Rhs) const override;
+
bool shouldFoldConstantShiftPairToMask(const SDNode *N,
CombineLevel Level) const override;
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 9f1712274bc3..f393f86e64aa 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1493,27 +1493,71 @@ def : Pat<(xor GR32:$src1, -2147483648),
// Odd encoding trick: -128 fits into an 8-bit immediate field while
// +128 doesn't, so in this special case use a sub instead of an add.
-def : Pat<(add GR16:$src1, 128),
- (SUB16ri GR16:$src1, -128)>;
+let Predicates = [NoNDD] in {
+ def : Pat<(add GR16:$src1, 128),
+ (SUB16ri GR16:$src1, -128)>;
+ def : Pat<(add GR32:$src1, 128),
+ (SUB32ri GR32:$src1, -128)>;
+ def : Pat<(add GR64:$src1, 128),
+ (SUB64ri32 GR64:$src1, -128)>;
+
+ def : Pat<(X86add_flag_nocf GR16:$src1, 128),
+ (SUB16ri GR16:$src1, -128)>;
+ def : Pat<(X86add_flag_nocf GR32:$src1, 128),
+ (SUB32ri GR32:$src1, -128)>;
+ def : Pat<(X86add_flag_nocf GR64:$src1, 128),
+ (SUB64ri32 GR64:$src1, -128)>;
+}
+let Predicates = [HasNDD] in {
+ def : Pat<(add GR16:$src1, 128),
+ (SUB16ri_ND GR16:$src1, -128)>;
+ def : Pat<(add GR32:$src1, 128),
+ (SUB32ri_ND GR32:$src1, -128)>;
+ def : Pat<(add GR64:$src1, 128),
+ (SUB64ri32_ND GR64:$src1, -128)>;
+
+ def : Pat<(X86add_flag_nocf GR16:$src1, 128),
+ (SUB16ri_ND GR16:$src1, -128)>;
+ def : Pat<(X86add_flag_nocf GR32:$src1, 128),
+ (SUB32ri_ND GR32:$src1, -128)>;
+ def : Pat<(X86add_flag_nocf GR64:$src1, 128),
+ (SUB64ri32_ND GR64:$src1, -128)>;
+}
def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
(SUB16mi addr:$dst, -128)>;
-
-def : Pat<(add GR32:$src1, 128),
- (SUB32ri GR32:$src1, -128)>;
def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
(SUB32mi addr:$dst, -128)>;
-
-def : Pat<(add GR64:$src1, 128),
- (SUB64ri32 GR64:$src1, -128)>;
def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
(SUB64mi32 addr:$dst, -128)>;
+let Predicates = [HasNDD] in {
+ def : Pat<(add (loadi16 addr:$src), 128),
+ (SUB16mi_ND addr:$src, -128)>;
+ def : Pat<(add (loadi32 addr:$src), 128),
+ (SUB32mi_ND addr:$src, -128)>;
+ def : Pat<(add (loadi64 addr:$src), 128),
+ (SUB64mi32_ND addr:$src, -128)>;
+}
-def : Pat<(X86add_flag_nocf GR16:$src1, 128),
- (SUB16ri GR16:$src1, -128)>;
-def : Pat<(X86add_flag_nocf GR32:$src1, 128),
- (SUB32ri GR32:$src1, -128)>;
-def : Pat<(X86add_flag_nocf GR64:$src1, 128),
- (SUB64ri32 GR64:$src1, -128)>;
+// The same trick applies for 32-bit immediate fields in 64-bit
+// instructions.
+let Predicates = [NoNDD] in {
+ def : Pat<(add GR64:$src1, 0x0000000080000000),
+ (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
+ def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
+ (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
+}
+let Predicates = [HasNDD] in {
+ def : Pat<(add GR64:$src1, 0x0000000080000000),
+ (SUB64ri32_ND GR64:$src1, 0xffffffff80000000)>;
+ def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
+ (SUB64ri32_ND GR64:$src1, 0xffffffff80000000)>;
+}
+def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
+ (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
+let Predicates = [HasNDD] in {
+ def : Pat<(add(loadi64 addr:$src), 0x0000000080000000),
+ (SUB64mi32_ND addr:$src, 0xffffffff80000000)>;
+}
// Depositing value to 8/16 bit subreg:
def : Pat<(or (and GR64:$dst, -256),
@@ -1532,15 +1576,6 @@ def : Pat<(or (and GR32:$dst, -65536),
(i32 (zextloadi16 addr:$src))),
(INSERT_SUBREG (i32 (COPY $dst)), (MOV16rm i16mem:$src), sub_16bit)>;
-// The same trick applies for 32-bit immediate fields in 64-bit
-// instructions.
-def : Pat<(add GR64:$src1, 0x0000000080000000),
- (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
-def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
- (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
-def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
- (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
-
// To avoid needing to materialize an immediate in a register, use a 32-bit and
// with implicit zero-extension instead of a 64-bit and if the immediate has at
// least 32 bits of leading zeros. If in addition the last 32 bits can be
diff --git a/llvm/lib/Target/X86/X86InstrSystem.td b/llvm/lib/Target/X86/X86InstrSystem.td
index d051047ae465..56293e20567e 100644
--- a/llvm/lib/Target/X86/X86InstrSystem.td
+++ b/llvm/lib/Target/X86/X86InstrSystem.td
@@ -716,7 +716,7 @@ def INVPCID64 : I<0x82, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
def INVPCID64_EVEX : I<0xF2, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
"invpcid\t{$src2, $src1|$src1, $src2}", []>,
- EVEX, NoCD8, T_MAP4, XS, Requires<[In64BitMode]>;
+ EVEX, NoCD8, T_MAP4, XS, WIG, Requires<[In64BitMode]>;
} // SchedRW
let Predicates = [HasINVPCID, NoEGPR] in {
diff --git a/llvm/lib/Target/X86/X86InstrUtils.td b/llvm/lib/Target/X86/X86InstrUtils.td
index 05ddcfbf2726..04d9d104ebc4 100644
--- a/llvm/lib/Target/X86/X86InstrUtils.td
+++ b/llvm/lib/Target/X86/X86InstrUtils.td
@@ -967,6 +967,7 @@ class ITy<bits<8> o, Format f, X86TypeInfo t, dag outs, dag ins, string m,
!strconcat(m, "{", t.InstrSuffix, "}\t", args), p>, NoCD8 {
let hasSideEffects = 0;
let hasREX_W = t.HasREX_W;
+ let IgnoresW = !if(!eq(t.VT, i8), 1, 0);
}
// BinOpRR - Instructions that read "reg, reg".
diff --git a/llvm/lib/Target/X86/X86InstrVMX.td b/llvm/lib/Target/X86/X86InstrVMX.td
index 7cc468fe15ad..da2b3d76a130 100644
--- a/llvm/lib/Target/X86/X86InstrVMX.td
+++ b/llvm/lib/Target/X86/X86InstrVMX.td
@@ -24,7 +24,7 @@ def INVEPT64 : I<0x80, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
Requires<[In64BitMode]>;
def INVEPT64_EVEX : I<0xF0, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
"invept\t{$src2, $src1|$src1, $src2}", []>,
- EVEX, NoCD8, T_MAP4, XS, Requires<[In64BitMode]>;
+ EVEX, NoCD8, T_MAP4, XS, WIG, Requires<[In64BitMode]>;
// 66 0F 38 81
def INVVPID32 : I<0x81, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
@@ -35,7 +35,7 @@ def INVVPID64 : I<0x81, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
Requires<[In64BitMode]>;
def INVVPID64_EVEX : I<0xF1, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
"invvpid\t{$src2, $src1|$src1, $src2}", []>,
- EVEX, NoCD8, T_MAP4, XS, Requires<[In64BitMode]>;
+ EVEX, NoCD8, T_MAP4, XS, WIG, Requires<[In64BitMode]>;
// 0F 01 C1
def VMCALL : I<0x01, MRM_C1, (outs), (ins), "vmcall", []>, TB;
diff --git a/llvm/lib/TargetParser/AArch64TargetParser.cpp b/llvm/lib/TargetParser/AArch64TargetParser.cpp
index 6f7b421f4e08..e36832f563ee 100644
--- a/llvm/lib/TargetParser/AArch64TargetParser.cpp
+++ b/llvm/lib/TargetParser/AArch64TargetParser.cpp
@@ -69,7 +69,14 @@ bool AArch64::getExtensionFeatures(
StringRef AArch64::resolveCPUAlias(StringRef Name) {
for (const auto &A : CpuAliases)
- if (A.Alias == Name)
+ if (A.AltName == Name)
+ return A.Name;
+ return Name;
+}
+
+StringRef AArch64::resolveExtAlias(StringRef Name) {
+ for (const auto &A : ExtAliases)
+ if (A.AltName == Name)
return A.Name;
return Name;
}
@@ -91,7 +98,7 @@ void AArch64::fillValidCPUArchList(SmallVectorImpl<StringRef> &Values) {
Values.push_back(C.Name);
for (const auto &Alias : CpuAliases)
- Values.push_back(Alias.Alias);
+ Values.push_back(Alias.AltName);
}
bool AArch64::isX18ReservedByDefault(const Triple &TT) {
@@ -114,6 +121,10 @@ const AArch64::ArchInfo *AArch64::parseArch(StringRef Arch) {
}
std::optional<AArch64::ExtensionInfo> AArch64::parseArchExtension(StringRef ArchExt) {
+ // Resolve aliases first.
+ ArchExt = resolveExtAlias(ArchExt);
+
+ // Then find the Extension name.
for (const auto &A : Extensions) {
if (ArchExt == A.Name)
return A;
diff --git a/llvm/lib/TargetParser/RISCVTargetParser.cpp b/llvm/lib/TargetParser/RISCVTargetParser.cpp
index 85cdd1289a95..8036df46fb47 100644
--- a/llvm/lib/TargetParser/RISCVTargetParser.cpp
+++ b/llvm/lib/TargetParser/RISCVTargetParser.cpp
@@ -14,6 +14,7 @@
#include "llvm/TargetParser/RISCVTargetParser.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/TargetParser/Triple.h"
namespace llvm {
@@ -95,5 +96,28 @@ void fillValidTuneCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64) {
#include "llvm/TargetParser/RISCVTargetParserDef.inc"
}
+// This function is currently used by IREE, so it's not dead code.
+void getFeaturesForCPU(StringRef CPU,
+ SmallVectorImpl<std::string> &EnabledFeatures,
+ bool NeedPlus) {
+ StringRef MarchFromCPU = llvm::RISCV::getMArchFromMcpu(CPU);
+ if (MarchFromCPU == "")
+ return;
+
+ EnabledFeatures.clear();
+ auto RII = RISCVISAInfo::parseArchString(
+ MarchFromCPU, /* EnableExperimentalExtension */ true);
+
+ if (llvm::errorToBool(RII.takeError()))
+ return;
+
+ std::vector<std::string> FeatStrings =
+ (*RII)->toFeatures(/* AddAllExtensions */ false);
+ for (const auto &F : FeatStrings)
+ if (NeedPlus)
+ EnabledFeatures.push_back(F);
+ else
+ EnabledFeatures.push_back(F.substr(1));
+}
} // namespace RISCV
} // namespace llvm
diff --git a/llvm/lib/TextAPI/RecordVisitor.cpp b/llvm/lib/TextAPI/RecordVisitor.cpp
index 3ff6bbd8bbcb..d333b3309226 100644
--- a/llvm/lib/TextAPI/RecordVisitor.cpp
+++ b/llvm/lib/TextAPI/RecordVisitor.cpp
@@ -88,5 +88,5 @@ void SymbolConverter::visitObjCInterface(const ObjCInterfaceRecord &ObjCR) {
}
void SymbolConverter::visitObjCCategory(const ObjCCategoryRecord &Cat) {
- addIVars(Cat.getObjCIVars(), Cat.getName());
+ addIVars(Cat.getObjCIVars(), Cat.getSuperClassName());
}
diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp
index 49b3f2b085e1..5c7a74dadb46 100644
--- a/llvm/lib/Transforms/IPO/FunctionImport.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp
@@ -125,7 +125,8 @@ static cl::opt<bool> ComputeDead("compute-dead", cl::init(true), cl::Hidden,
static cl::opt<bool> EnableImportMetadata(
"enable-import-metadata", cl::init(false), cl::Hidden,
- cl::desc("Enable import metadata like 'thinlto_src_module'"));
+ cl::desc("Enable import metadata like 'thinlto_src_module' and "
+ "'thinlto_src_file'"));
/// Summary file to use for function importing when using -function-import from
/// the command line.
@@ -1643,11 +1644,17 @@ Expected<bool> FunctionImporter::importFunctions(
if (Error Err = F.materialize())
return std::move(Err);
if (EnableImportMetadata) {
- // Add 'thinlto_src_module' metadata for statistics and debugging.
+ // Add 'thinlto_src_module' and 'thinlto_src_file' metadata for
+ // statistics and debugging.
F.setMetadata(
"thinlto_src_module",
MDNode::get(DestModule.getContext(),
{MDString::get(DestModule.getContext(),
+ SrcModule->getModuleIdentifier())}));
+ F.setMetadata(
+ "thinlto_src_file",
+ MDNode::get(DestModule.getContext(),
+ {MDString::get(DestModule.getContext(),
SrcModule->getSourceFileName())}));
}
GlobalsToImport.insert(&F);
@@ -1687,11 +1694,17 @@ Expected<bool> FunctionImporter::importFunctions(
<< GO->getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
if (EnableImportMetadata) {
- // Add 'thinlto_src_module' metadata for statistics and debugging.
+ // Add 'thinlto_src_module' and 'thinlto_src_file' metadata for
+ // statistics and debugging.
Fn->setMetadata(
"thinlto_src_module",
MDNode::get(DestModule.getContext(),
{MDString::get(DestModule.getContext(),
+ SrcModule->getModuleIdentifier())}));
+ Fn->setMetadata(
+ "thinlto_src_file",
+ MDNode::get(DestModule.getContext(),
+ {MDString::get(DestModule.getContext(),
SrcModule->getSourceFileName())}));
}
GlobalsToImport.insert(Fn);
diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index c20fc942eaf0..55728709cde5 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -961,21 +961,16 @@ namespace {
struct PGOUseEdge : public PGOEdge {
using PGOEdge::PGOEdge;
- bool CountValid = false;
- uint64_t CountValue = 0;
+ std::optional<uint64_t> Count;
// Set edge count value
- void setEdgeCount(uint64_t Value) {
- CountValue = Value;
- CountValid = true;
- }
+ void setEdgeCount(uint64_t Value) { Count = Value; }
// Return the information string for this object.
std::string infoString() const {
- if (!CountValid)
+ if (!Count)
return PGOEdge::infoString();
- return (Twine(PGOEdge::infoString()) + " Count=" + Twine(CountValue))
- .str();
+ return (Twine(PGOEdge::infoString()) + " Count=" + Twine(*Count)).str();
}
};
@@ -983,27 +978,22 @@ using DirectEdges = SmallVector<PGOUseEdge *, 2>;
// This class stores the auxiliary information for each BB.
struct PGOUseBBInfo : public PGOBBInfo {
- uint64_t CountValue = 0;
- bool CountValid;
+ std::optional<uint64_t> Count;
int32_t UnknownCountInEdge = 0;
int32_t UnknownCountOutEdge = 0;
DirectEdges InEdges;
DirectEdges OutEdges;
- PGOUseBBInfo(unsigned IX) : PGOBBInfo(IX), CountValid(false) {}
+ PGOUseBBInfo(unsigned IX) : PGOBBInfo(IX) {}
// Set the profile count value for this BB.
- void setBBInfoCount(uint64_t Value) {
- CountValue = Value;
- CountValid = true;
- }
+ void setBBInfoCount(uint64_t Value) { Count = Value; }
// Return the information string of this object.
std::string infoString() const {
- if (!CountValid)
+ if (!Count)
return PGOBBInfo::infoString();
- return (Twine(PGOBBInfo::infoString()) + " Count=" + Twine(CountValue))
- .str();
+ return (Twine(PGOBBInfo::infoString()) + " Count=" + Twine(*Count)).str();
}
// Add an OutEdge and update the edge count.
@@ -1027,7 +1017,8 @@ static uint64_t sumEdgeCount(const ArrayRef<PGOUseEdge *> Edges) {
for (const auto &E : Edges) {
if (E->Removed)
continue;
- Total += E->CountValue;
+ if (E->Count)
+ Total += *E->Count;
}
return Total;
}
@@ -1216,17 +1207,17 @@ bool PGOUseFunc::setInstrumentedCounts(
// If only one out-edge, the edge profile count should be the same as BB
// profile count.
- if (SrcInfo.CountValid && SrcInfo.OutEdges.size() == 1)
- setEdgeCount(E.get(), SrcInfo.CountValue);
+ if (SrcInfo.Count && SrcInfo.OutEdges.size() == 1)
+ setEdgeCount(E.get(), *SrcInfo.Count);
else {
const BasicBlock *DestBB = E->DestBB;
PGOUseBBInfo &DestInfo = getBBInfo(DestBB);
// If only one in-edge, the edge profile count should be the same as BB
// profile count.
- if (DestInfo.CountValid && DestInfo.InEdges.size() == 1)
- setEdgeCount(E.get(), DestInfo.CountValue);
+ if (DestInfo.Count && DestInfo.InEdges.size() == 1)
+ setEdgeCount(E.get(), *DestInfo.Count);
}
- if (E->CountValid)
+ if (E->Count)
continue;
// E's count should have been set from profile. If not, this meenas E skips
// the instrumentation. We set the count to 0.
@@ -1239,7 +1230,7 @@ bool PGOUseFunc::setInstrumentedCounts(
// unknown edge in Edges vector.
void PGOUseFunc::setEdgeCount(DirectEdges &Edges, uint64_t Value) {
for (auto &E : Edges) {
- if (E->CountValid)
+ if (E->Count)
continue;
E->setEdgeCount(Value);
@@ -1481,38 +1472,36 @@ void PGOUseFunc::populateCounters() {
// For efficient traversal, it's better to start from the end as most
// of the instrumented edges are at the end.
for (auto &BB : reverse(F)) {
- PGOUseBBInfo *Count = findBBInfo(&BB);
- if (Count == nullptr)
+ PGOUseBBInfo *UseBBInfo = findBBInfo(&BB);
+ if (UseBBInfo == nullptr)
continue;
- if (!Count->CountValid) {
- if (Count->UnknownCountOutEdge == 0) {
- Count->CountValue = sumEdgeCount(Count->OutEdges);
- Count->CountValid = true;
+ if (!UseBBInfo->Count) {
+ if (UseBBInfo->UnknownCountOutEdge == 0) {
+ UseBBInfo->Count = sumEdgeCount(UseBBInfo->OutEdges);
Changes = true;
- } else if (Count->UnknownCountInEdge == 0) {
- Count->CountValue = sumEdgeCount(Count->InEdges);
- Count->CountValid = true;
+ } else if (UseBBInfo->UnknownCountInEdge == 0) {
+ UseBBInfo->Count = sumEdgeCount(UseBBInfo->InEdges);
Changes = true;
}
}
- if (Count->CountValid) {
- if (Count->UnknownCountOutEdge == 1) {
+ if (UseBBInfo->Count) {
+ if (UseBBInfo->UnknownCountOutEdge == 1) {
uint64_t Total = 0;
- uint64_t OutSum = sumEdgeCount(Count->OutEdges);
+ uint64_t OutSum = sumEdgeCount(UseBBInfo->OutEdges);
// If the one of the successor block can early terminate (no-return),
// we can end up with situation where out edge sum count is larger as
// the source BB's count is collected by a post-dominated block.
- if (Count->CountValue > OutSum)
- Total = Count->CountValue - OutSum;
- setEdgeCount(Count->OutEdges, Total);
+ if (*UseBBInfo->Count > OutSum)
+ Total = *UseBBInfo->Count - OutSum;
+ setEdgeCount(UseBBInfo->OutEdges, Total);
Changes = true;
}
- if (Count->UnknownCountInEdge == 1) {
+ if (UseBBInfo->UnknownCountInEdge == 1) {
uint64_t Total = 0;
- uint64_t InSum = sumEdgeCount(Count->InEdges);
- if (Count->CountValue > InSum)
- Total = Count->CountValue - InSum;
- setEdgeCount(Count->InEdges, Total);
+ uint64_t InSum = sumEdgeCount(UseBBInfo->InEdges);
+ if (*UseBBInfo->Count > InSum)
+ Total = *UseBBInfo->Count - InSum;
+ setEdgeCount(UseBBInfo->InEdges, Total);
Changes = true;
}
}
@@ -1527,16 +1516,16 @@ void PGOUseFunc::populateCounters() {
auto BI = findBBInfo(&BB);
if (BI == nullptr)
continue;
- assert(BI->CountValid && "BB count is not valid");
+ assert(BI->Count && "BB count is not valid");
}
#endif
- uint64_t FuncEntryCount = getBBInfo(&*F.begin()).CountValue;
+ uint64_t FuncEntryCount = *getBBInfo(&*F.begin()).Count;
uint64_t FuncMaxCount = FuncEntryCount;
for (auto &BB : F) {
auto BI = findBBInfo(&BB);
if (BI == nullptr)
continue;
- FuncMaxCount = std::max(FuncMaxCount, BI->CountValue);
+ FuncMaxCount = std::max(FuncMaxCount, *BI->Count);
}
// Fix the obviously inconsistent entry count.
@@ -1566,11 +1555,11 @@ void PGOUseFunc::setBranchWeights() {
isa<CallBrInst>(TI)))
continue;
- if (getBBInfo(&BB).CountValue == 0)
+ const PGOUseBBInfo &BBCountInfo = getBBInfo(&BB);
+ if (!*BBCountInfo.Count)
continue;
// We have a non-zero Branch BB.
- const PGOUseBBInfo &BBCountInfo = getBBInfo(&BB);
unsigned Size = BBCountInfo.OutEdges.size();
SmallVector<uint64_t, 2> EdgeCounts(Size, 0);
uint64_t MaxCount = 0;
@@ -1581,7 +1570,7 @@ void PGOUseFunc::setBranchWeights() {
if (DestBB == nullptr)
continue;
unsigned SuccNum = GetSuccessorNumber(SrcBB, DestBB);
- uint64_t EdgeCount = E->CountValue;
+ uint64_t EdgeCount = *E->Count;
if (EdgeCount > MaxCount)
MaxCount = EdgeCount;
EdgeCounts[SuccNum] = EdgeCount;
@@ -1622,7 +1611,7 @@ void PGOUseFunc::annotateIrrLoopHeaderWeights() {
if (BFI->isIrrLoopHeader(&BB) || isIndirectBrTarget(&BB)) {
Instruction *TI = BB.getTerminator();
const PGOUseBBInfo &BBCountInfo = getBBInfo(&BB);
- setIrrLoopHeaderMetadata(M, TI, BBCountInfo.CountValue);
+ setIrrLoopHeaderMetadata(M, TI, *BBCountInfo.Count);
}
}
}
@@ -1649,7 +1638,7 @@ void SelectInstVisitor::annotateOneSelectInst(SelectInst &SI) {
uint64_t TotalCount = 0;
auto BI = UseFunc->findBBInfo(SI.getParent());
if (BI != nullptr)
- TotalCount = BI->CountValue;
+ TotalCount = *BI->Count;
// False Count
SCounts[1] = (TotalCount > SCounts[0] ? TotalCount - SCounts[0] : 0);
uint64_t MaxCount = std::max(SCounts[0], SCounts[1]);
@@ -1850,7 +1839,7 @@ static void fixFuncEntryCount(PGOUseFunc &Func, LoopInfo &LI,
if (!Func.findBBInfo(&BBI))
continue;
auto BFICount = NBFI.getBlockProfileCount(&BBI);
- CountValue = Func.getBBInfo(&BBI).CountValue;
+ CountValue = *Func.getBBInfo(&BBI).Count;
BFICountValue = *BFICount;
SumCount.add(APFloat(CountValue * 1.0), APFloat::rmNearestTiesToEven);
SumBFICount.add(APFloat(BFICountValue * 1.0), APFloat::rmNearestTiesToEven);
@@ -1866,7 +1855,7 @@ static void fixFuncEntryCount(PGOUseFunc &Func, LoopInfo &LI,
if (Scale < 1.001 && Scale > 0.999)
return;
- uint64_t FuncEntryCount = Func.getBBInfo(&*F.begin()).CountValue;
+ uint64_t FuncEntryCount = *Func.getBBInfo(&*F.begin()).Count;
uint64_t NewEntryCount = 0.5 + FuncEntryCount * Scale;
if (NewEntryCount == 0)
NewEntryCount = 1;
@@ -1896,8 +1885,7 @@ static void verifyFuncBFI(PGOUseFunc &Func, LoopInfo &LI,
uint64_t CountValue = 0;
uint64_t BFICountValue = 0;
- if (Func.getBBInfo(&BBI).CountValid)
- CountValue = Func.getBBInfo(&BBI).CountValue;
+ CountValue = Func.getBBInfo(&BBI).Count.value_or(CountValue);
BBNum++;
if (CountValue)
@@ -2279,8 +2267,8 @@ template <> struct DOTGraphTraits<PGOUseFunc *> : DefaultDOTGraphTraits {
OS << getSimpleNodeName(Node) << ":\\l";
PGOUseBBInfo *BI = Graph->findBBInfo(Node);
OS << "Count : ";
- if (BI && BI->CountValid)
- OS << BI->CountValue << "\\l";
+ if (BI && BI->Count)
+ OS << *BI->Count << "\\l";
else
OS << "Unknown\\l";
diff --git a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
index 9b6a39e98f5c..7e48c28176bd 100644
--- a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
@@ -461,7 +461,7 @@ static Decomposition decomposeGEP(GEPOperator &GEP,
// If Op0 is signed non-negative, the GEP is increasing monotonically and
// can be de-composed.
- if (!isKnownNonNegative(Index, DL, /*Depth=*/MaxAnalysisRecursionDepth - 1))
+ if (!isKnownNonNegative(Index, DL))
Preconditions.emplace_back(CmpInst::ICMP_SGE, Index,
ConstantInt::get(Index->getType(), 0));
}
@@ -560,10 +560,10 @@ static Decomposition decompose(Value *V,
return MergeResults(Op0, Op1, IsSigned);
}
if (match(V, m_NSWAdd(m_Value(Op0), m_Value(Op1)))) {
- if (!isKnownNonNegative(Op0, DL, /*Depth=*/MaxAnalysisRecursionDepth - 1))
+ if (!isKnownNonNegative(Op0, DL))
Preconditions.emplace_back(CmpInst::ICMP_SGE, Op0,
ConstantInt::get(Op0->getType(), 0));
- if (!isKnownNonNegative(Op1, DL, /*Depth=*/MaxAnalysisRecursionDepth - 1))
+ if (!isKnownNonNegative(Op1, DL))
Preconditions.emplace_back(CmpInst::ICMP_SGE, Op1,
ConstantInt::get(Op1->getType(), 0));
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 6ce9eb3656c9..490cb7e528eb 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -905,8 +905,8 @@ static bool processSRem(BinaryOperator *SDI, const ConstantRange &LCR,
for (Operand &Op : Ops) {
if (Op.D == Domain::NonNegative)
continue;
- auto *BO =
- BinaryOperator::CreateNeg(Op.V, Op.V->getName() + ".nonneg", SDI);
+ auto *BO = BinaryOperator::CreateNeg(Op.V, Op.V->getName() + ".nonneg",
+ SDI->getIterator());
BO->setDebugLoc(SDI->getDebugLoc());
Op.V = BO;
}
@@ -919,7 +919,8 @@ static bool processSRem(BinaryOperator *SDI, const ConstantRange &LCR,
// If the divident was non-positive, we need to negate the result.
if (Ops[0].D == Domain::NonPositive) {
- Res = BinaryOperator::CreateNeg(Res, Res->getName() + ".neg", SDI);
+ Res = BinaryOperator::CreateNeg(Res, Res->getName() + ".neg",
+ SDI->getIterator());
Res->setDebugLoc(SDI->getDebugLoc());
}
@@ -966,8 +967,8 @@ static bool processSDiv(BinaryOperator *SDI, const ConstantRange &LCR,
for (Operand &Op : Ops) {
if (Op.D == Domain::NonNegative)
continue;
- auto *BO =
- BinaryOperator::CreateNeg(Op.V, Op.V->getName() + ".nonneg", SDI);
+ auto *BO = BinaryOperator::CreateNeg(Op.V, Op.V->getName() + ".nonneg",
+ SDI->getIterator());
BO->setDebugLoc(SDI->getDebugLoc());
Op.V = BO;
}
@@ -981,7 +982,8 @@ static bool processSDiv(BinaryOperator *SDI, const ConstantRange &LCR,
// If the operands had two different domains, we need to negate the result.
if (Ops[0].D != Ops[1].D) {
- Res = BinaryOperator::CreateNeg(Res, Res->getName() + ".neg", SDI);
+ Res = BinaryOperator::CreateNeg(Res, Res->getName() + ".neg",
+ SDI->getIterator());
Res->setDebugLoc(SDI->getDebugLoc());
}
diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 818c7b40d489..61109ed37659 100644
--- a/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -270,7 +270,8 @@ static BinaryOperator *CreateMul(Value *S1, Value *S2, const Twine &Name,
}
static Instruction *CreateNeg(Value *S1, const Twine &Name,
- Instruction *InsertBefore, Value *FlagsOp) {
+ BasicBlock::iterator InsertBefore,
+ Value *FlagsOp) {
if (S1->getType()->isIntOrIntVectorTy())
return BinaryOperator::CreateNeg(S1, Name, InsertBefore);
@@ -958,7 +959,8 @@ static Value *NegateValue(Value *V, Instruction *BI,
// Insert a 'neg' instruction that subtracts the value from zero to get the
// negation.
- Instruction *NewNeg = CreateNeg(V, V->getName() + ".neg", BI, BI);
+ Instruction *NewNeg =
+ CreateNeg(V, V->getName() + ".neg", BI->getIterator(), BI);
ToRedo.insert(NewNeg);
return NewNeg;
}
@@ -1246,7 +1248,7 @@ Value *ReassociatePass::RemoveFactorFromExpression(Value *V, Value *Factor) {
}
if (NeedsNegate)
- V = CreateNeg(V, "neg", &*InsertPt, BO);
+ V = CreateNeg(V, "neg", InsertPt, BO);
return V;
}
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 6c8785d52c4e..fad70e8bf286 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -293,7 +293,7 @@ calculateFragment(DILocalVariable *Variable,
if (!CurrentFragment) {
if (auto Size = Variable->getSizeInBits()) {
// Treat the current fragment as covering the whole variable.
- CurrentFragment = DIExpression::FragmentInfo(*Size, 0);
+ CurrentFragment = DIExpression::FragmentInfo(*Size, 0);
if (Target == CurrentFragment)
return UseNoFrag;
}
@@ -1213,8 +1213,9 @@ private:
if (!IsOffsetKnown)
return PI.setAborted(&II);
- insertUse(II, Offset, Length ? Length->getLimitedValue()
- : AllocSize - Offset.getLimitedValue(),
+ insertUse(II, Offset,
+ Length ? Length->getLimitedValue()
+ : AllocSize - Offset.getLimitedValue(),
(bool)Length);
}
@@ -1669,7 +1670,7 @@ static void speculatePHINodeLoads(IRBuilderTy &IRB, PHINode &PN) {
}
// Inject loads into all of the pred blocks.
- DenseMap<BasicBlock*, Value*> InjectedLoads;
+ DenseMap<BasicBlock *, Value *> InjectedLoads;
for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
BasicBlock *Pred = PN.getIncomingBlock(Idx);
Value *InVal = PN.getIncomingValue(Idx);
@@ -1678,7 +1679,7 @@ static void speculatePHINodeLoads(IRBuilderTy &IRB, PHINode &PN) {
// basic block, as long as the value is the same. So if we already injected
// a load in the predecessor, then we should reuse the same load for all
// duplicated entries.
- if (Value* V = InjectedLoads.lookup(Pred)) {
+ if (Value *V = InjectedLoads.lookup(Pred)) {
NewPN->addIncoming(V, Pred);
continue;
}
@@ -2077,8 +2078,7 @@ static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
if (BeginIndex * ElementSize != BeginOffset ||
BeginIndex >= cast<FixedVectorType>(Ty)->getNumElements())
return false;
- uint64_t EndOffset =
- std::min(S.endOffset(), P.endOffset()) - P.beginOffset();
+ uint64_t EndOffset = std::min(S.endOffset(), P.endOffset()) - P.beginOffset();
uint64_t EndIndex = EndOffset / ElementSize;
if (EndIndex * ElementSize != EndOffset ||
EndIndex > cast<FixedVectorType>(Ty)->getNumElements())
@@ -2754,8 +2754,8 @@ public:
Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
IRB.SetInsertPoint(OldUserI);
IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
- IRB.getInserter().SetNamePrefix(
- Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
+ IRB.getInserter().SetNamePrefix(Twine(NewAI.getName()) + "." +
+ Twine(BeginOffset) + ".");
CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
if (VecTy || IntTy)
@@ -2808,7 +2808,7 @@ private:
#else
Twine()
#endif
- );
+ );
}
/// Compute suitable alignment to access this slice of the *new*
@@ -3189,8 +3189,7 @@ private:
const bool CanContinue = [&]() {
if (VecTy || IntTy)
return true;
- if (BeginOffset > NewAllocaBeginOffset ||
- EndOffset < NewAllocaEndOffset)
+ if (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset)
return false;
// Length must be in range for FixedVectorType.
auto *C = cast<ConstantInt>(II.getLength());
@@ -3984,9 +3983,9 @@ private:
if (!Sel)
return false;
- LLVM_DEBUG(dbgs() << " Rewriting gep(select) -> select(gep):"
- << "\n original: " << *Sel
- << "\n " << GEPI);
+ LLVM_DEBUG(dbgs() << " Rewriting gep(select) -> select(gep):\n";
+ dbgs() << " original: " << *Sel << "\n";
+ dbgs() << " " << GEPI << "\n";);
auto GetNewOps = [&](Value *SelOp) {
SmallVector<Value *> NewOps;
@@ -4023,9 +4022,9 @@ private:
Visited.insert(NSelI);
enqueueUsers(*NSelI);
- LLVM_DEBUG(dbgs() << "\n to: " << *NTrue
- << "\n " << *NFalse
- << "\n " << *NSel << '\n');
+ LLVM_DEBUG(dbgs() << " to: " << *NTrue << "\n";
+ dbgs() << " " << *NFalse << "\n";
+ dbgs() << " " << *NSel << "\n";);
return true;
}
@@ -4037,18 +4036,17 @@ private:
PHINode *PHI = cast<PHINode>(GEPI.getPointerOperand());
if (GEPI.getParent() != PHI->getParent() ||
- llvm::any_of(PHI->incoming_values(), [](Value *In)
- { Instruction *I = dyn_cast<Instruction>(In);
- return !I || isa<GetElementPtrInst>(I) || isa<PHINode>(I) ||
- succ_empty(I->getParent()) ||
- !I->getParent()->isLegalToHoistInto();
- }))
+ llvm::any_of(PHI->incoming_values(), [](Value *In) {
+ Instruction *I = dyn_cast<Instruction>(In);
+ return !I || isa<GetElementPtrInst>(I) || isa<PHINode>(I) ||
+ succ_empty(I->getParent()) ||
+ !I->getParent()->isLegalToHoistInto();
+ }))
return false;
- LLVM_DEBUG(dbgs() << " Rewriting gep(phi) -> phi(gep):"
- << "\n original: " << *PHI
- << "\n " << GEPI
- << "\n to: ");
+ LLVM_DEBUG(dbgs() << " Rewriting gep(phi) -> phi(gep):\n";
+ dbgs() << " original: " << *PHI << "\n";
+ dbgs() << " " << GEPI << "\n";);
SmallVector<Value *, 4> Index(GEPI.indices());
bool IsInBounds = GEPI.isInBounds();
@@ -4078,8 +4076,10 @@ private:
Visited.insert(NewPN);
enqueueUsers(*NewPN);
- LLVM_DEBUG(for (Value *In : NewPN->incoming_values())
- dbgs() << "\n " << *In;
+ LLVM_DEBUG(dbgs() << " to: ";
+ for (Value *In
+ : NewPN->incoming_values()) dbgs()
+ << "\n " << *In;
dbgs() << "\n " << *NewPN << '\n');
return true;
@@ -4089,8 +4089,7 @@ private:
if (foldGEPSelect(GEPI))
return true;
- if (isa<PHINode>(GEPI.getPointerOperand()) &&
- foldGEPPhi(GEPI))
+ if (isa<PHINode>(GEPI.getPointerOperand()) && foldGEPPhi(GEPI))
return true;
enqueueUsers(GEPI);
@@ -4162,17 +4161,17 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
return nullptr;
if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
- Type *ElementTy;
- uint64_t TyNumElements;
- if (auto *AT = dyn_cast<ArrayType>(Ty)) {
- ElementTy = AT->getElementType();
- TyNumElements = AT->getNumElements();
- } else {
- // FIXME: This isn't right for vectors with non-byte-sized or
- // non-power-of-two sized elements.
- auto *VT = cast<FixedVectorType>(Ty);
- ElementTy = VT->getElementType();
- TyNumElements = VT->getNumElements();
+ Type *ElementTy;
+ uint64_t TyNumElements;
+ if (auto *AT = dyn_cast<ArrayType>(Ty)) {
+ ElementTy = AT->getElementType();
+ TyNumElements = AT->getNumElements();
+ } else {
+ // FIXME: This isn't right for vectors with non-byte-sized or
+ // non-power-of-two sized elements.
+ auto *VT = cast<FixedVectorType>(Ty);
+ ElementTy = VT->getElementType();
+ TyNumElements = VT->getNumElements();
}
uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedValue();
uint64_t NumSkippedElements = Offset / ElementSize;
@@ -4853,9 +4852,8 @@ AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
++NumNewAllocas;
}
- LLVM_DEBUG(dbgs() << "Rewriting alloca partition "
- << "[" << P.beginOffset() << "," << P.endOffset()
- << ") to: " << *NewAI << "\n");
+ LLVM_DEBUG(dbgs() << "Rewriting alloca partition " << "[" << P.beginOffset()
+ << "," << P.endOffset() << ") to: " << *NewAI << "\n");
// Track the high watermark on the worklist as it is only relevant for
// promoted allocas. We will reset it to this point if the alloca is not in
@@ -5040,8 +5038,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
IsSorted = false;
}
}
- }
- else {
+ } else {
// We only allow whole-alloca splittable loads and stores
// for a large alloca to avoid creating too large BitVector.
for (Slice &S : AS) {
@@ -5069,7 +5066,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
uint64_t Offset;
uint64_t Size;
Fragment(AllocaInst *AI, uint64_t O, uint64_t S)
- : Alloca(AI), Offset(O), Size(S) {}
+ : Alloca(AI), Offset(O), Size(S) {}
};
SmallVector<Fragment, 4> Fragments;
@@ -5083,7 +5080,8 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
DL.getTypeSizeInBits(NewAI->getAllocatedType()).getFixedValue();
// Don't include any padding.
uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte);
- Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size));
+ Fragments.push_back(
+ Fragment(NewAI, P.beginOffset() * SizeOfByte, Size));
}
}
++NumPartitions;
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index e5deac797572..50a073e89062 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -315,12 +315,6 @@ static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
cl::desc(
"Enable runtime interleaving until load/store ports are saturated"));
-/// Interleave small loops with scalar reductions.
-static cl::opt<bool> InterleaveSmallLoopScalarReduction(
- "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
- cl::desc("Enable interleaving for loops with small iteration counts that "
- "contain scalar reductions to expose ILP."));
-
/// The number of stores in a loop that are allowed to need predication.
static cl::opt<unsigned> NumberOfStoresToPredicate(
"vectorize-num-stores-pred", cl::init(1), cl::Hidden,
@@ -1510,19 +1504,36 @@ public:
}
/// Returns the TailFoldingStyle that is best for the current loop.
- TailFoldingStyle
- getTailFoldingStyle(bool IVUpdateMayOverflow = true) const {
- if (!CanFoldTailByMasking)
- return TailFoldingStyle::None;
+ TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow = true) const {
+ return IVUpdateMayOverflow ? ChosenTailFoldingStyle.first
+ : ChosenTailFoldingStyle.second;
+ }
+
+ /// Selects and saves TailFoldingStyle for 2 options - if IV update may
+ /// overflow or not.
+ void setTailFoldingStyles() {
+ assert(ChosenTailFoldingStyle.first == TailFoldingStyle::None &&
+ ChosenTailFoldingStyle.second == TailFoldingStyle::None &&
+ "Tail folding must not be selected yet.");
+ if (!Legal->prepareToFoldTailByMasking())
+ return;
- if (ForceTailFoldingStyle.getNumOccurrences())
- return ForceTailFoldingStyle;
+ if (ForceTailFoldingStyle.getNumOccurrences()) {
+ ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second =
+ ForceTailFoldingStyle;
+ return;
+ }
- return TTI.getPreferredTailFoldingStyle(IVUpdateMayOverflow);
+ ChosenTailFoldingStyle.first =
+ TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true);
+ ChosenTailFoldingStyle.second =
+ TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false);
}
/// Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailByMasking() const {
+ // TODO: check if it is possible to check for None style independent of
+ // IVUpdateMayOverflow flag in getTailFoldingStyle.
return getTailFoldingStyle() != TailFoldingStyle::None;
}
@@ -1675,8 +1686,10 @@ private:
/// iterations to execute in the scalar loop.
ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
- /// All blocks of loop are to be masked to fold tail of scalar iterations.
- bool CanFoldTailByMasking = false;
+ /// Control finally chosen tail folding style. The first element is used if
+ /// the IV update may overflow, the second element - if it does not.
+ std::pair<TailFoldingStyle, TailFoldingStyle> ChosenTailFoldingStyle =
+ std::make_pair(TailFoldingStyle::None, TailFoldingStyle::None);
/// A map holding scalar costs for different vectorization factors. The
/// presence of a cost for an instruction in the mapping indicates that the
@@ -4633,10 +4646,9 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
// found modulo the vectorization factor is not zero, try to fold the tail
// by masking.
// FIXME: look for a smaller MaxVF that does divide TC rather than masking.
- if (Legal->prepareToFoldTailByMasking()) {
- CanFoldTailByMasking = true;
+ setTailFoldingStyles();
+ if (foldTailByMasking())
return MaxFactors;
- }
// If there was a tail-folding hint/switch, but we can't fold the tail by
// masking, fallback to a vectorization with a scalar epilogue.
@@ -5477,8 +5489,7 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
// If there are scalar reductions and TTI has enabled aggressive
// interleaving for reductions, we will interleave to expose ILP.
- if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
- AggressivelyInterleaveReductions) {
+ if (VF.isScalar() && AggressivelyInterleaveReductions) {
LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
// Interleave no less than SmallIC but not as aggressive as the normal IC
// to satisfy the rare situation when resources are too limited.
@@ -7450,12 +7461,14 @@ LoopVectorizationPlanner::executePlan(
(IsEpilogueVectorization || !ExpandedSCEVs) &&
"expanded SCEVs to reuse can only be used during epilogue vectorization");
- LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
- << '\n');
-
if (!IsEpilogueVectorization)
VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
+ LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF
+ << ", UF=" << BestUF << '\n');
+ BestVPlan.setName("Final VPlan");
+ LLVM_DEBUG(BestVPlan.dump());
+
// Perform the actual loop transformation.
VPTransformState State(BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan,
OrigLoop->getHeader()->getContext());
@@ -8990,11 +9003,8 @@ void LoopVectorizationPlanner::adjustRecipesForReductions(
BasicBlock *BB = CurrentLinkI->getParent();
VPValue *CondOp = nullptr;
- if (CM.blockNeedsPredicationForAnyReason(BB)) {
- VPBuilder::InsertPointGuard Guard(Builder);
- Builder.setInsertPoint(CurrentLink);
+ if (CM.blockNeedsPredicationForAnyReason(BB))
CondOp = RecipeBuilder.getBlockInMask(BB);
- }
VPReductionRecipe *RedRecipe = new VPReductionRecipe(
RdxDesc, CurrentLinkI, PreviousLink, VecOp, CondOp);
@@ -10073,6 +10083,8 @@ bool LoopVectorizePass::processLoop(Loop *L) {
auto *ExpandedVal = BestEpiPlan.getVPValueOrAddLiveIn(
ExpandedSCEVs.find(ExpandR->getSCEV())->second);
ExpandR->replaceAllUsesWith(ExpandedVal);
+ if (BestEpiPlan.getTripCount() == ExpandR)
+ BestEpiPlan.resetTripCount(ExpandedVal);
ExpandR->eraseFromParent();
}
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index de4e56ff8065..daea3bdce688 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/PriorityQueue.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
@@ -192,6 +193,10 @@ static cl::opt<bool>
// it has no negative effect on the llvm benchmarks.
static const unsigned AliasedCheckLimit = 10;
+// Limit of the number of uses for potentially transformed instructions/values,
+// used in checks to avoid compile-time explode.
+static constexpr int UsesLimit = 8;
+
// Another limit for the alias checks: The maximum distance between load/store
// instructions where alias checks are done.
// This limit is useful for very large basic blocks.
@@ -940,7 +945,6 @@ static bool isUsedOutsideBlock(Value *V) {
if (!I)
return true;
// Limits the number of uses to save compile time.
- constexpr int UsesLimit = 8;
return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) &&
all_of(I->users(), [I](User *U) {
auto *IU = dyn_cast<Instruction>(U);
@@ -1284,8 +1288,7 @@ public:
// Retruns true if the users of V1 and V2 won't need to be extracted.
auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) {
// Bail out if we have too many uses to save compilation time.
- static constexpr unsigned Limit = 8;
- if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit))
+ if (V1->hasNUsesOrMore(UsesLimit) || V2->hasNUsesOrMore(UsesLimit))
return false;
auto AllUsersVectorized = [U1, U2, this](Value *V) {
@@ -3997,12 +4000,14 @@ static bool isReverseOrder(ArrayRef<unsigned> Order) {
/// Checks if the given array of loads can be represented as a vectorized,
/// scatter or just simple gather.
-static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
+static LoadsState canVectorizeLoads(const BoUpSLP &R, ArrayRef<Value *> VL,
+ const Value *VL0,
const TargetTransformInfo &TTI,
const DataLayout &DL, ScalarEvolution &SE,
LoopInfo &LI, const TargetLibraryInfo &TLI,
SmallVectorImpl<unsigned> &Order,
- SmallVectorImpl<Value *> &PointerOps) {
+ SmallVectorImpl<Value *> &PointerOps,
+ bool TryRecursiveCheck = true) {
// Check that a vectorized load would load the same memory as a scalar
// load. For example, we don't want to vectorize loads that are smaller
// than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
@@ -4095,24 +4100,105 @@ static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
}
}
}
+ auto CheckForShuffledLoads = [&](Align CommonAlignment) {
+ unsigned Sz = DL.getTypeSizeInBits(ScalarTy);
+ unsigned MinVF = R.getMinVF(Sz);
+ unsigned MaxVF = std::max<unsigned>(bit_floor(VL.size() / 2), MinVF);
+ MaxVF = std::min(R.getMaximumVF(Sz, Instruction::Load), MaxVF);
+ for (unsigned VF = MaxVF; VF >= MinVF; VF /= 2) {
+ unsigned VectorizedCnt = 0;
+ SmallVector<LoadsState> States;
+ for (unsigned Cnt = 0, End = VL.size(); Cnt + VF <= End;
+ Cnt += VF, ++VectorizedCnt) {
+ ArrayRef<Value *> Slice = VL.slice(Cnt, VF);
+ SmallVector<unsigned> Order;
+ SmallVector<Value *> PointerOps;
+ LoadsState LS =
+ canVectorizeLoads(R, Slice, Slice.front(), TTI, DL, SE, LI, TLI,
+ Order, PointerOps, /*TryRecursiveCheck=*/false);
+ // Check that the sorted loads are consecutive.
+ if (LS == LoadsState::Gather)
+ break;
+ // If need the reorder - consider as high-cost masked gather for now.
+ if ((LS == LoadsState::Vectorize ||
+ LS == LoadsState::StridedVectorize) &&
+ !Order.empty() && !isReverseOrder(Order))
+ LS = LoadsState::ScatterVectorize;
+ States.push_back(LS);
+ }
+ // Can be vectorized later as a serie of loads/insertelements.
+ if (VectorizedCnt == VL.size() / VF) {
+ // Compare masked gather cost and loads + insersubvector costs.
+ TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
+ InstructionCost MaskedGatherCost = TTI.getGatherScatterOpCost(
+ Instruction::Load, VecTy,
+ cast<LoadInst>(VL0)->getPointerOperand(),
+ /*VariableMask=*/false, CommonAlignment, CostKind);
+ InstructionCost VecLdCost = 0;
+ auto *SubVecTy = FixedVectorType::get(ScalarTy, VF);
+ for (auto [I, LS] : enumerate(States)) {
+ auto *LI0 = cast<LoadInst>(VL[I * VF]);
+ switch (LS) {
+ case LoadsState::Vectorize:
+ VecLdCost += TTI.getMemoryOpCost(
+ Instruction::Load, SubVecTy, LI0->getAlign(),
+ LI0->getPointerAddressSpace(), CostKind,
+ TTI::OperandValueInfo());
+ break;
+ case LoadsState::StridedVectorize:
+ VecLdCost += TTI.getStridedMemoryOpCost(
+ Instruction::Load, SubVecTy, LI0->getPointerOperand(),
+ /*VariableMask=*/false, CommonAlignment, CostKind);
+ break;
+ case LoadsState::ScatterVectorize:
+ VecLdCost += TTI.getGatherScatterOpCost(
+ Instruction::Load, SubVecTy, LI0->getPointerOperand(),
+ /*VariableMask=*/false, CommonAlignment, CostKind);
+ break;
+ case LoadsState::Gather:
+ llvm_unreachable(
+ "Expected only consecutive, strided or masked gather loads.");
+ }
+ VecLdCost +=
+ TTI.getShuffleCost(TTI ::SK_InsertSubvector, VecTy,
+ std::nullopt, CostKind, I * VF, SubVecTy);
+ }
+ // If masked gather cost is higher - better to vectorize, so
+ // consider it as a gather node. It will be better estimated
+ // later.
+ if (MaskedGatherCost > VecLdCost)
+ return true;
+ }
+ }
+ return false;
+ };
// TODO: need to improve analysis of the pointers, if not all of them are
// GEPs or have > 2 operands, we end up with a gather node, which just
// increases the cost.
Loop *L = LI.getLoopFor(cast<LoadInst>(VL0)->getParent());
bool ProfitableGatherPointers =
- static_cast<unsigned>(count_if(
- PointerOps,
- [L](Value *V) { return L && L->isLoopInvariant(V); })) <= Sz / 2 &&
- Sz > 2;
+ L && Sz > 2 && count_if(PointerOps, [L](Value *V) {
+ return L->isLoopInvariant(V);
+ }) <= Sz / 2;
if (ProfitableGatherPointers || all_of(PointerOps, [IsSorted](Value *P) {
auto *GEP = dyn_cast<GetElementPtrInst>(P);
return (IsSorted && !GEP && doesNotNeedToBeScheduled(P)) ||
- (GEP && GEP->getNumOperands() == 2);
+ (GEP && GEP->getNumOperands() == 2 &&
+ isa<Constant, Instruction>(GEP->getOperand(1)));
})) {
Align CommonAlignment = computeCommonAlignment<LoadInst>(VL);
if (TTI.isLegalMaskedGather(VecTy, CommonAlignment) &&
- !TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment))
+ !TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment)) {
+ // Check if potential masked gather can be represented as series
+ // of loads + insertsubvectors.
+ if (TryRecursiveCheck && CheckForShuffledLoads(CommonAlignment)) {
+ // If masked gather cost is higher - better to vectorize, so
+ // consider it as a gather node. It will be better estimated
+ // later.
+ return LoadsState::Gather;
+ }
return LoadsState::ScatterVectorize;
+ }
}
}
@@ -4392,24 +4478,16 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
if (!areTwoInsertFromSameBuildVector(
IE1, IE2,
[](InsertElementInst *II) { return II->getOperand(0); }))
- return false;
- std::optional<unsigned> Idx1 = getInsertIndex(IE1);
- std::optional<unsigned> Idx2 = getInsertIndex(IE2);
- if (Idx1 == std::nullopt || Idx2 == std::nullopt)
- return false;
- return *Idx1 < *Idx2;
+ return I1 < I2;
+ return getInsertIndex(IE1) < getInsertIndex(IE2);
}
if (auto *EE1 = dyn_cast<ExtractElementInst>(FirstUserOfPhi1))
if (auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2)) {
if (EE1->getOperand(0) != EE2->getOperand(0))
- return false;
- std::optional<unsigned> Idx1 = getExtractIndex(EE1);
- std::optional<unsigned> Idx2 = getExtractIndex(EE2);
- if (Idx1 == std::nullopt || Idx2 == std::nullopt)
- return false;
- return *Idx1 < *Idx2;
+ return I1 < I2;
+ return getInsertIndex(EE1) < getInsertIndex(EE2);
}
- return false;
+ return I1 < I2;
};
auto IsIdentityOrder = [](const OrdersType &Order) {
for (unsigned Idx : seq<unsigned>(0, Order.size()))
@@ -4432,23 +4510,21 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
return std::nullopt; // No need to reorder.
return std::move(ResOrder);
}
- if (TE.State == TreeEntry::NeedToGather) {
+ if (TE.State == TreeEntry::NeedToGather && !TE.isAltShuffle() &&
+ allSameType(TE.Scalars)) {
// TODO: add analysis of other gather nodes with extractelement
// instructions and other values/instructions, not only undefs.
- if (((TE.getOpcode() == Instruction::ExtractElement &&
- !TE.isAltShuffle()) ||
+ if ((TE.getOpcode() == Instruction::ExtractElement ||
(all_of(TE.Scalars,
[](Value *V) {
return isa<UndefValue, ExtractElementInst>(V);
}) &&
any_of(TE.Scalars,
[](Value *V) { return isa<ExtractElementInst>(V); }))) &&
- all_of(TE.Scalars,
- [](Value *V) {
- auto *EE = dyn_cast<ExtractElementInst>(V);
- return !EE || isa<FixedVectorType>(EE->getVectorOperandType());
- }) &&
- allSameType(TE.Scalars)) {
+ all_of(TE.Scalars, [](Value *V) {
+ auto *EE = dyn_cast<ExtractElementInst>(V);
+ return !EE || isa<FixedVectorType>(EE->getVectorOperandType());
+ })) {
// Check that gather of extractelements can be represented as
// just a shuffle of a single vector.
OrdersType CurrentOrder;
@@ -5158,7 +5234,7 @@ void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
OrderedEntries.insert(Data.first);
}
} else {
- reorderOrder(Data.first->ReorderIndices, Mask, /*BottomOrder=*/true);
+ reorderOrder(Data.first->ReorderIndices, Mask);
}
}
}
@@ -5234,8 +5310,7 @@ BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const {
for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) {
Value *V = TE->Scalars[Lane];
// To save compilation time we don't visit if we have too many users.
- static constexpr unsigned UsersLimit = 4;
- if (V->hasNUsesOrMore(UsersLimit))
+ if (V->hasNUsesOrMore(UsesLimit))
break;
// Collect stores per pointer object.
@@ -5560,8 +5635,8 @@ BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
// treats loading/storing it as an i8 struct. If we vectorize loads/stores
// from such a struct, we read/write packed bits disagreeing with the
// unvectorized version.
- switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, *LI, *TLI, CurrentOrder,
- PointerOps)) {
+ switch (canVectorizeLoads(*this, VL, VL0, *TTI, *DL, *SE, *LI, *TLI,
+ CurrentOrder, PointerOps)) {
case LoadsState::Vectorize:
return TreeEntry::Vectorize;
case LoadsState::ScatterVectorize:
@@ -7342,7 +7417,7 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
SmallVector<Value *> PointerOps;
OrdersType CurrentOrder;
LoadsState LS =
- canVectorizeLoads(Slice, Slice.front(), TTI, *R.DL, *R.SE,
+ canVectorizeLoads(R, Slice, Slice.front(), TTI, *R.DL, *R.SE,
*R.LI, *R.TLI, CurrentOrder, PointerOps);
switch (LS) {
case LoadsState::Vectorize:
@@ -7605,8 +7680,24 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
transformMaskAfterShuffle(CommonMask, CommonMask);
}
SameNodesEstimated = false;
- Cost += createShuffle(&E1, E2, Mask);
- transformMaskAfterShuffle(CommonMask, Mask);
+ if (!E2 && InVectors.size() == 1) {
+ unsigned VF = E1.getVectorFactor();
+ if (Value *V1 = InVectors.front().dyn_cast<Value *>()) {
+ VF = std::max(VF,
+ cast<FixedVectorType>(V1->getType())->getNumElements());
+ } else {
+ const auto *E = InVectors.front().get<const TreeEntry *>();
+ VF = std::max(VF, E->getVectorFactor());
+ }
+ for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
+ if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem)
+ CommonMask[Idx] = Mask[Idx] + VF;
+ Cost += createShuffle(InVectors.front(), &E1, CommonMask);
+ transformMaskAfterShuffle(CommonMask, CommonMask);
+ } else {
+ Cost += createShuffle(&E1, E2, Mask);
+ transformMaskAfterShuffle(CommonMask, Mask);
+ }
}
class ShuffleCostBuilder {
@@ -8102,7 +8193,7 @@ const BoUpSLP::TreeEntry *BoUpSLP::getOperandEntry(const TreeEntry *E,
unsigned Idx) const {
Value *Op = E->getOperand(Idx).front();
if (const TreeEntry *TE = getTreeEntry(Op)) {
- if (find_if(E->UserTreeIndices, [&](const EdgeInfo &EI) {
+ if (find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) {
return EI.EdgeIdx == Idx && EI.UserTE == E;
}) != TE->UserTreeIndices.end())
return TE;
@@ -8974,6 +9065,20 @@ bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const {
if (isFullyVectorizableTinyTree(ForReduction))
return false;
+ // Check if any of the gather node forms an insertelement buildvector
+ // somewhere.
+ if (any_of(VectorizableTree, [](const std::unique_ptr<TreeEntry> &TE) {
+ return TE->State == TreeEntry::NeedToGather &&
+ all_of(TE->Scalars, [](Value *V) {
+ return isa<ExtractElementInst, UndefValue>(V) ||
+ (!V->hasNUsesOrMore(UsesLimit) &&
+ any_of(V->users(), [](User *U) {
+ return isa<InsertElementInst>(U);
+ }));
+ });
+ }))
+ return false;
+
assert(VectorizableTree.empty()
? ExternalUses.empty()
: true && "We shouldn't have any external users");
@@ -9364,7 +9469,7 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
VectorCasts
.insert(std::make_pair(ScalarTE, FTy->getElementType()))
.second) {
- unsigned BWSz = It->second.second;
+ unsigned BWSz = It->second.first;
unsigned DstBWSz = DL->getTypeSizeInBits(FTy->getElementType());
unsigned VecOpcode;
if (DstBWSz < BWSz)
@@ -9376,7 +9481,7 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
InstructionCost C = TTI->getCastInstrCost(
VecOpcode, FTy,
FixedVectorType::get(
- IntegerType::get(FTy->getContext(), It->second.first),
+ IntegerType::get(FTy->getContext(), BWSz),
FTy->getNumElements()),
TTI::CastContextHint::None, CostKind);
LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
@@ -13891,12 +13996,14 @@ bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
if (Idx != Set.size() - 1)
continue;
}
- if (Operands.size() <= 1) {
+ auto E = make_scope_exit([&, &DataVar = Data]() {
Operands.clear();
- Operands.push_back(Stores[Data.first]);
- PrevDist = Data.second;
+ Operands.push_back(Stores[DataVar.first]);
+ PrevDist = DataVar.second;
+ });
+
+ if (Operands.size() <= 1)
continue;
- }
unsigned MaxVecRegSize = R.getMaxVecRegSize();
unsigned EltSize = R.getVectorElementSize(Operands[0]);
@@ -13912,16 +14019,24 @@ bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
unsigned MinVF = TTI->getStoreMinimumVF(
R.getMinVF(DL->getTypeSizeInBits(ValueTy)), StoreTy, ValueTy);
- if (MaxVF <= MinVF) {
+ if (MaxVF < MinVF) {
LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF
- << ") <= "
+ << ") < "
<< "MinVF (" << MinVF << ")\n");
+ continue;
}
+ unsigned Sz = 1 + Log2_32(MaxVF) - Log2_32(MinVF);
+ SmallVector<unsigned> CandidateVFs(Sz);
// FIXME: Is division-by-2 the correct step? Should we assert that the
// register size is a power-of-2?
+ unsigned Size = MaxVF;
+ for_each(CandidateVFs, [&](unsigned &VF) {
+ VF = Size;
+ Size /= 2;
+ });
unsigned StartIdx = 0;
- for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) {
+ for (unsigned Size : CandidateVFs) {
for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) {
ArrayRef<Value *> Slice = ArrayRef(Operands).slice(Cnt, Size);
assert(
@@ -13955,9 +14070,6 @@ bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
if (StartIdx >= Operands.size())
break;
}
- Operands.clear();
- Operands.push_back(Stores[Data.first]);
- PrevDist = Data.second;
}
};
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 47cebecdb270..16c09a83e777 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2931,6 +2931,14 @@ public:
return TripCount;
}
+ /// Resets the trip count for the VPlan. The caller must make sure all uses of
+ /// the original trip count have been replaced.
+ void resetTripCount(VPValue *NewTripCount) {
+ assert(TripCount && NewTripCount && TripCount->getNumUsers() == 0 &&
+ "TripCount always must be set");
+ TripCount = NewTripCount;
+ }
+
/// The backedge taken count of the original loop.
VPValue *getOrCreateBackedgeTakenCount() {
if (!BackedgeTakenCount)
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll
index 30da63b3feec..7cc7cff0e6e8 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll
@@ -14,7 +14,7 @@ define void @vector_broadcast() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = shufflevector <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = shufflevector <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = shufflevector <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %8 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %8 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %9 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %10 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %11 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
@@ -29,7 +29,7 @@ define void @vector_broadcast() {
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = shufflevector <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = shufflevector <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = shufflevector <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %8 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %8 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %9 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %10 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %11 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
@@ -78,20 +78,20 @@ declare <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x
define void @vector_reverse() {
; CHECK-LABEL: 'vector_reverse'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv16i8 = call <vscale x 16 x i8> @llvm.experimental.vector.reverse.nxv16i8(<vscale x 16 x i8> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %reverse_nxv32i8 = call <vscale x 32 x i8> @llvm.experimental.vector.reverse.nxv32i8(<vscale x 32 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %reverse_nxv16i8 = call <vscale x 16 x i8> @llvm.experimental.vector.reverse.nxv16i8(<vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %reverse_nxv32i8 = call <vscale x 32 x i8> @llvm.experimental.vector.reverse.nxv32i8(<vscale x 32 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %reverse_nxv2i16 = call <vscale x 2 x i16> @llvm.experimental.vector.reverse.nxv2i16(<vscale x 2 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %reverse_nxv4i16 = call <vscale x 4 x i16> @llvm.experimental.vector.reverse.nxv4i16(<vscale x 4 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv8i16 = call <vscale x 8 x i16> @llvm.experimental.vector.reverse.nxv8i16(<vscale x 8 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %reverse_nxv16i16 = call <vscale x 16 x i16> @llvm.experimental.vector.reverse.nxv16i16(<vscale x 16 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv4i32 = call <vscale x 4 x i32> @llvm.experimental.vector.reverse.nxv4i32(<vscale x 4 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %reverse_nxv8i32 = call <vscale x 8 x i32> @llvm.experimental.vector.reverse.nxv8i32(<vscale x 8 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv2i64 = call <vscale x 2 x i64> @llvm.experimental.vector.reverse.nxv2i64(<vscale x 2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %reverse_nxv4i64 = call <vscale x 4 x i64> @llvm.experimental.vector.reverse.nxv4i64(<vscale x 4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 69 for instruction: %reverse_nxv8i64 = call <vscale x 8 x i64> @llvm.experimental.vector.reverse.nxv8i64(<vscale x 8 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 138 for instruction: %reverse_nxv16i64 = call <vscale x 16 x i64> @llvm.experimental.vector.reverse.nxv16i64(<vscale x 16 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 276 for instruction: %reverse_nxv32i64 = call <vscale x 32 x i64> @llvm.experimental.vector.reverse.nxv32i64(<vscale x 32 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %reverse_nxv16i1 = call <vscale x 16 x i1> @llvm.experimental.vector.reverse.nxv16i1(<vscale x 16 x i1> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %reverse_nxv8i16 = call <vscale x 8 x i16> @llvm.experimental.vector.reverse.nxv8i16(<vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %reverse_nxv16i16 = call <vscale x 16 x i16> @llvm.experimental.vector.reverse.nxv16i16(<vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %reverse_nxv4i32 = call <vscale x 4 x i32> @llvm.experimental.vector.reverse.nxv4i32(<vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %reverse_nxv8i32 = call <vscale x 8 x i32> @llvm.experimental.vector.reverse.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %reverse_nxv2i64 = call <vscale x 2 x i64> @llvm.experimental.vector.reverse.nxv2i64(<vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %reverse_nxv4i64 = call <vscale x 4 x i64> @llvm.experimental.vector.reverse.nxv4i64(<vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 83 for instruction: %reverse_nxv8i64 = call <vscale x 8 x i64> @llvm.experimental.vector.reverse.nxv8i64(<vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 166 for instruction: %reverse_nxv16i64 = call <vscale x 16 x i64> @llvm.experimental.vector.reverse.nxv16i64(<vscale x 16 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 332 for instruction: %reverse_nxv32i64 = call <vscale x 32 x i64> @llvm.experimental.vector.reverse.nxv32i64(<vscale x 32 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %reverse_nxv16i1 = call <vscale x 16 x i1> @llvm.experimental.vector.reverse.nxv16i1(<vscale x 16 x i1> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv8i1 = call <vscale x 8 x i1> @llvm.experimental.vector.reverse.nxv8i1(<vscale x 8 x i1> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv4i1 = call <vscale x 4 x i1> @llvm.experimental.vector.reverse.nxv4i1(<vscale x 4 x i1> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv2i1 = call <vscale x 2 x i1> @llvm.experimental.vector.reverse.nxv2i1(<vscale x 2 x i1> undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
index fc4a6b17d3f8..46bf3152ac5b 100644
--- a/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
@@ -45,9 +45,9 @@ define void @broadcast_scalable() #0{
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %38 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %39 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %40 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %41 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %42 = shufflevector <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 41 for instruction: %43 = shufflevector <vscale x 64 x i1> undef, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %41 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %42 = shufflevector <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %43 = shufflevector <vscale x 64 x i1> undef, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; SIZE-LABEL: 'broadcast_scalable'
@@ -92,9 +92,9 @@ define void @broadcast_scalable() #0{
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %38 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %39 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %40 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
-; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %41 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
-; SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %42 = shufflevector <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
-; SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %43 = shufflevector <vscale x 64 x i1> undef, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %41 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %42 = shufflevector <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %43 = shufflevector <vscale x 64 x i1> undef, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
%zero = shufflevector <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll
index 146909cc93df..e80dbe31683f 100644
--- a/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll
@@ -20,21 +20,21 @@ define void @reverse() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i16 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i16 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i16 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i16 = shufflevector <16 x i16> undef, <16 x i16> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i16 = shufflevector <16 x i16> undef, <16 x i16> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i32 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i32 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i32 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i32 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i64 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i64 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2f16 = shufflevector <2 x half> undef, <2 x half> undef, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8f16 = shufflevector <8 x half> undef, <8 x half> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16f16 = shufflevector <16 x half> undef, <16 x half> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = shufflevector <16 x half> undef, <16 x half> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2f32 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f32 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f32 = shufflevector <8 x float> undef, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = shufflevector <8 x float> undef, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2f64 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f64 = shufflevector <4 x double> undef, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = shufflevector <4 x double> undef, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; SIZE-LABEL: 'reverse'
diff --git a/llvm/test/Assembler/debug-info.ll b/llvm/test/Assembler/debug-info.ll
index 419623a2cb7d..06144b261373 100644
--- a/llvm/test/Assembler/debug-info.ll
+++ b/llvm/test/Assembler/debug-info.ll
@@ -1,8 +1,8 @@
; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s
; RUN: verify-uselistorder %s
-; CHECK: !named = !{!0, !0, !1, !2, !3, !4, !5, !6, !7, !8, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39}
-!named = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42}
+; CHECK: !named = !{!0, !0, !1, !2, !3, !4, !5, !6, !7, !8, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42, !43}
+!named = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42, !43, !44, !45, !46}
; CHECK: !0 = !DISubrange(count: 3, lowerBound: 0)
; CHECK-NEXT: !1 = !DISubrange(count: 3, lowerBound: 4)
@@ -99,3 +99,15 @@
; CHECK-NEXT: !39 = !DIBasicType(name: "u64.le", size: 64, align: 1, encoding: DW_ATE_unsigned, flags: DIFlagLittleEndian)
!41 = !DIBasicType(name: "u64.be", size: 64, align: 1, encoding: DW_ATE_unsigned, flags: DIFlagBigEndian)
!42 = !DIBasicType(name: "u64.le", size: 64, align: 1, encoding: DW_ATE_unsigned, flags: DIFlagLittleEndian)
+
+; CHECK: !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !13, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: false, ptrAuthAuthenticatesNullValues: false)
+!43 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !15, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234)
+
+; CHECK: !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !13, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: true, ptrAuthAuthenticatesNullValues: false)
+!44 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !15, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: true)
+
+; CHECK: !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !13, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: false, ptrAuthAuthenticatesNullValues: true)
+!45 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !15, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthAuthenticatesNullValues: true)
+
+; CHECK: !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !13, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: true, ptrAuthAuthenticatesNullValues: true)
+!46 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !15, ptrAuthKey: 2, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1234, ptrAuthIsaPointer: true, ptrAuthAuthenticatesNullValues: true)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-2-icmps-of-0-and-or.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-2-icmps-of-0-and-or.mir
new file mode 100644
index 000000000000..2ce5c693f3db
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-2-icmps-of-0-and-or.mir
@@ -0,0 +1,1244 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
+# REQUIRES: asserts
+
+
+---
+name: valid_and_eq_0_eq_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: valid_and_eq_0_eq_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR %x, %y
+ ; CHECK-NEXT: %and:_(s1) = G_ICMP intpred(eq), [[OR]](s32), %zero
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s32), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_and_eq_1_eq_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_and_eq_1_eq_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s32), %one
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s32), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %one:_(s32) = G_CONSTANT i32 1
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %one:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s32), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_and_eq_0_eq_1_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_eq_1_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s32), %one
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %one:_(s32) = G_CONSTANT i32 1
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s32), %one:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_and_ne_0_eq_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_eq_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s32), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s32), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_and_eq_0_ne_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_ne_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s32), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_and_ne_0_ne_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_ne_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s32), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: valid_or_ne_0_ne_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: valid_or_ne_0_ne_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR %x, %y
+ ; CHECK-NEXT: %or:_(s1) = G_ICMP intpred(ne), [[OR]](s32), %zero
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %or:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_or_ne_1_ne_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_or_ne_1_ne_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s32), %one
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s32), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %one:_(s32) = G_CONSTANT i32 1
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %one:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %or:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_or_ne_0_ne_1_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_ne_1_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s32), %one
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %one:_(s32) = G_CONSTANT i32 1
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %one:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %or:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_or_eq_0_ne_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_or_eq_0_ne_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s32), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %or:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_or_ne_0_eq_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_eq_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s32), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s32), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %or:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+
+---
+name: valid_and_eq_0_eq_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: valid_and_eq_0_eq_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR %x, %y
+ ; CHECK-NEXT: %and:_(s1) = G_ICMP intpred(eq), [[OR]](s64), %zero
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_1_eq_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_1_eq_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %one:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s64), %one
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s64), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %one:_(s64) = G_CONSTANT i64 1
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s64), %one:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_0_eq_1_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_eq_1_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %one:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s64), %one
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %one:_(s64) = G_CONSTANT i64 1
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %one:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_ne_0_eq_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_eq_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s64), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_0_ne_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_ne_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s64), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_ne_0_ne_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_ne_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s64), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: valid_or_ne_0_ne_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: valid_or_ne_0_ne_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR %x, %y
+ ; CHECK-NEXT: %or:_(s1) = G_ICMP intpred(ne), [[OR]](s64), %zero
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %or:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_1_ne_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_1_ne_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %one:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s64), %one
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s64), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %one:_(s64) = G_CONSTANT i64 1
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %one:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %or:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_0_ne_1_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_ne_1_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %one:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s64), %one
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %one:_(s64) = G_CONSTANT i64 1
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %one:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %or:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_eq_0_ne_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_eq_0_ne_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s64), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %or:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_0_eq_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_eq_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s64), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %or:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: valid_and_eq_0_eq_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: valid_and_eq_0_eq_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR %x, %y
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_ICMP intpred(eq), [[OR]](<2 x s32>), %zero
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_non_0_eq_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_non_0_eq_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %scalar0:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %scalar1:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ ; CHECK-NEXT: %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %non_zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %scalar0:_(s32) = G_CONSTANT i32 0
+ %scalar1:_(s32) = G_CONSTANT i32 1
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %non_zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_0_eq_non_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_eq_non_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %scalar0:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %scalar1:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ ; CHECK-NEXT: %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s32>), %non_zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %scalar0:_(s32) = G_CONSTANT i32 0
+ %scalar1:_(s32) = G_CONSTANT i32 1
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %non_zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_ne_0_eq_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_eq_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_0_ne_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_ne_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_ne_0_ne_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_ne_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: valid_or_ne_0_ne_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: valid_or_ne_0_ne_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR %x, %y
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_ICMP intpred(ne), [[OR]](<2 x s32>), %zero
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_non_0_ne_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_non_0_ne_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %scalar0:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %scalar1:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ ; CHECK-NEXT: %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x(<2 x s32>), %non_zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %scalar0:_(s32) = G_CONSTANT i32 0
+ %scalar1:_(s32) = G_CONSTANT i32 1
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %non_zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_0_ne_non_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_ne_non_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %scalar0:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %scalar1:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ ; CHECK-NEXT: %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y(<2 x s32>), %non_zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %scalar0:_(s32) = G_CONSTANT i32 0
+ %scalar1:_(s32) = G_CONSTANT i32 1
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %non_zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_eq_0_ne_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_eq_0_ne_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_0_eq_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_eq_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_eq_0_eq_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_eq_0_eq_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_p0_src
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_p0_src
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(p0) = COPY $x0
+ ; CHECK-NEXT: %y:_(p0) = COPY $x1
+ ; CHECK-NEXT: %zero:_(p0) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(p0), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(p0), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(p0) = COPY $x0
+ %y:_(p0) = COPY $x1
+ %zero:_(p0) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(p0), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(p0), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_p0_src_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $q0, $q1
+
+ ; CHECK-LABEL: name: invalid_p0_src_vec
+ ; CHECK: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x p0>) = COPY $q0
+ ; CHECK-NEXT: %y:_(<2 x p0>) = COPY $q1
+ ; CHECK-NEXT: %scalar0:_(p0) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %zero:_(<2 x p0>) = G_BUILD_VECTOR %scalar0(p0), %scalar0(p0)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x p0>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x p0>), %zero
+ ; CHECK-NEXT: %or:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s64>) = G_ZEXT %or(<2 x s1>)
+ ; CHECK-NEXT: $q0 = COPY %zext(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+ %x:_(<2 x p0>) = COPY $q0
+ %y:_(<2 x p0>) = COPY $q1
+ %scalar0:_(p0) = G_CONSTANT i64 0
+ %zero:_(<2 x p0>) = G_BUILD_VECTOR %scalar0(p0), %scalar0(p0)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x p0>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x p0>), %zero:_
+ %or:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s64>) = G_ZEXT %or:_(<2 x s1>)
+ $q0 = COPY %zext
+ RET_ReallyLR implicit $q0
+
+...
+---
+name: invalid_diff_src_ty
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $x1
+
+ ; CHECK-LABEL: name: invalid_diff_src_ty
+ ; CHECK: liveins: $w0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero_s32:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero_s64:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s32), %zero_s32
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s64), %zero_s64
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s64) = COPY $x1
+ %zero_s32:_(s32) = G_CONSTANT i32 0
+ %zero_s64:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %zero_s32:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %zero_s64:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_diff_src_ty_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $q1
+
+ ; CHECK-LABEL: name: invalid_diff_src_ty_vec
+ ; CHECK: liveins: $x0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: %scalar0s32:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %scalar0s64:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %zero_s32:_(<2 x s32>) = G_BUILD_VECTOR %scalar0s32(s32), %scalar0s32(s32)
+ ; CHECK-NEXT: %zero_s64:_(<2 x s64>) = G_BUILD_VECTOR %scalar0s64(s64), %scalar0s64(s64)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %zero_s32
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s64>), %zero_s64
+ ; CHECK-NEXT: %or:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %or(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s64>) = COPY $q1
+ %scalar0s32:_(s32) = G_CONSTANT i32 0
+ %scalar0s64:_(s64) = G_CONSTANT i64 0
+ %zero_s32:_(<2 x s32>) = G_BUILD_VECTOR %scalar0s32(s32), %scalar0s32(s32)
+ %zero_s64:_(<2 x s64>) = G_BUILD_VECTOR %scalar0s64(s64), %scalar0s64(s64)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero_s32:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s64>), %zero_s64:_
+ %or:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %or:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
index ef8e46653640..42f6570047fc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
@@ -26,7 +26,7 @@ define void @asm_simple_register_clobber() {
define i64 @asm_register_early_clobber() {
; CHECK-LABEL: name: asm_register_early_clobber
; CHECK: bb.1 (%ir-block.0):
- ; CHECK-NEXT: INLINEASM &"mov $0, 7; mov $1, 7", 1 /* sideeffect attdialect */, 2752523 /* regdef-ec:GPR64common */, def early-clobber %0, 2752523 /* regdef-ec:GPR64common */, def early-clobber %1, !0
+ ; CHECK-NEXT: INLINEASM &"mov $0, 7; mov $1, 7", 1 /* sideeffect attdialect */, {{[0-9]+}} /* regdef-ec:GPR64common */, def early-clobber %0, {{[0-9]+}} /* regdef-ec:GPR64common */, def early-clobber %1, !0
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY %0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY %1
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
@@ -54,7 +54,7 @@ entry:
define i32 @test_single_register_output() nounwind ssp {
; CHECK-LABEL: name: test_single_register_output
; CHECK: bb.1.entry:
- ; CHECK-NEXT: INLINEASM &"mov ${0:w}, 7", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %0
+ ; CHECK-NEXT: INLINEASM &"mov ${0:w}, 7", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY %0
; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -66,7 +66,7 @@ entry:
define i64 @test_single_register_output_s64() nounwind ssp {
; CHECK-LABEL: name: test_single_register_output_s64
; CHECK: bb.1.entry:
- ; CHECK-NEXT: INLINEASM &"mov $0, 7", 0 /* attdialect */, 2752522 /* regdef:GPR64common */, def %0
+ ; CHECK-NEXT: INLINEASM &"mov $0, 7", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR64common */, def %0
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY %0
; CHECK-NEXT: $x0 = COPY [[COPY]](s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
@@ -79,7 +79,7 @@ entry:
define float @test_multiple_register_outputs_same() #0 {
; CHECK-LABEL: name: test_multiple_register_outputs_same
; CHECK: bb.1 (%ir-block.0):
- ; CHECK-NEXT: INLINEASM &"mov $0, #0; mov $1, #0", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %0, 1703946 /* regdef:GPR32common */, def %1
+ ; CHECK-NEXT: INLINEASM &"mov $0, #0; mov $1, #0", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0, {{[0-9]+}} /* regdef:GPR32common */, def %1
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY %0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY %1
; CHECK-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY]], [[COPY1]]
@@ -96,7 +96,7 @@ define float @test_multiple_register_outputs_same() #0 {
define double @test_multiple_register_outputs_mixed() #0 {
; CHECK-LABEL: name: test_multiple_register_outputs_mixed
; CHECK: bb.1 (%ir-block.0):
- ; CHECK-NEXT: INLINEASM &"mov $0, #0; mov $1, #0", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %0, 2555914 /* regdef:FPR64 */, def %1
+ ; CHECK-NEXT: INLINEASM &"mov $0, #0; mov $1, #0", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0, {{[0-9]+}} /* regdef:FPR64 */, def %1
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY %0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY %1
; CHECK-NEXT: $d0 = COPY [[COPY1]](s64)
@@ -125,7 +125,7 @@ define zeroext i8 @test_register_output_trunc(ptr %src) nounwind {
; CHECK-NEXT: liveins: $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK-NEXT: INLINEASM &"mov ${0:w}, 32", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %1
+ ; CHECK-NEXT: INLINEASM &"mov ${0:w}, 32", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY %1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
@@ -155,7 +155,7 @@ define void @test_input_register_imm() {
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 42
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64common = COPY [[C]](s64)
- ; CHECK-NEXT: INLINEASM &"mov x0, $0", 1 /* sideeffect attdialect */, 2752521 /* reguse:GPR64common */, [[COPY]]
+ ; CHECK-NEXT: INLINEASM &"mov x0, $0", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:GPR64common */, [[COPY]]
; CHECK-NEXT: RET_ReallyLR
call void asm sideeffect "mov x0, $0", "r"(i64 42)
ret void
@@ -190,7 +190,7 @@ define zeroext i8 @test_input_register(ptr %src) nounwind {
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY [[COPY]](p0)
- ; CHECK-NEXT: INLINEASM &"ldtrb ${0:w}, [$1]", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %1, 2752521 /* reguse:GPR64common */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"ldtrb ${0:w}, [$1]", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %1, {{[0-9]+}} /* reguse:GPR64common */, [[COPY1]]
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY %1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
@@ -207,7 +207,7 @@ define i32 @test_memory_constraint(ptr %a) nounwind {
; CHECK-NEXT: liveins: $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK-NEXT: INLINEASM &"ldr $0, $1", 8 /* mayload attdialect */, 1703946 /* regdef:GPR32common */, def %1, 262158 /* mem:m */, [[COPY]](p0)
+ ; CHECK-NEXT: INLINEASM &"ldr $0, $1", 8 /* mayload attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %1, 262158 /* mem:m */, [[COPY]](p0)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY %1
; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -221,7 +221,7 @@ define i16 @test_anyext_input() {
; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32common = COPY [[ANYEXT]](s32)
- ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 1703946 /* regdef:GPR32common */, def %0, 1703945 /* reguse:GPR32common */, [[COPY]]
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0, {{[0-9]+}} /* reguse:GPR32common */, [[COPY]]
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY %0
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
@@ -237,7 +237,7 @@ define i16 @test_anyext_input_with_matching_constraint() {
; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32common = COPY [[ANYEXT]](s32)
- ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 1703946 /* regdef:GPR32common */, def %0, 2147483657 /* reguse tiedto:$0 */, [[COPY]](tied-def 3)
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0, 2147483657 /* reguse tiedto:$0 */, [[COPY]](tied-def 3)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY %0
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll
index 59eb80ae6146..fbffb50bcbc8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll
@@ -71,7 +71,7 @@ define void @test2() #0 personality ptr @__gcc_personality_v0 {
; CHECK-NEXT: G_INVOKE_REGION_START
; CHECK-NEXT: EH_LABEL <mcsymbol >
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64common = COPY [[DEF]](p0)
- ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 2752521 /* reguse:GPR64common */, [[COPY]]
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:GPR64common */, [[COPY]]
; CHECK-NEXT: EH_LABEL <mcsymbol >
; CHECK-NEXT: G_BR %bb.2
; CHECK-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll b/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
index 8ed7059d2e75..58299696e78f 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
@@ -5,7 +5,7 @@ entry:
; CHECK: %0:ppr = COPY $p0
; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
; CHECK: %1:pnr_p8to15 = COPY %0
-; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, 458761 /* reguse:PNR_p8to15 */, %1
+; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR_p8to15 */, %1
; CHECK: RET_ReallyLR
%predcnt.addr = alloca target("aarch64.svcount"), align 2
store target("aarch64.svcount") %predcnt, ptr %predcnt.addr, align 2
@@ -19,7 +19,7 @@ entry:
; CHECK: %0:ppr = COPY $p0
; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
; CHECK: %1:pnr = COPY %0
-; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, 262153 /* reguse:PNR */, %1
+; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR */, %1
; CHECK: RET_ReallyLR
%predcnt.addr = alloca target("aarch64.svcount"), align 2
store target("aarch64.svcount") %predcnt, ptr %predcnt.addr, align 2
@@ -33,7 +33,7 @@ entry:
; CHECK: %0:ppr = COPY $p0
; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
; CHECK: %1:pnr_3b = COPY %0
-; CHECK: INLINEASM &"fadd z0.h, $0/m, z0.h, #0.5", 1 /* sideeffect attdialect */, 393225 /* reguse:PNR_3b */, %1
+; CHECK: INLINEASM &"fadd z0.h, $0/m, z0.h, #0.5", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR_3b */, %1
; CHECK: RET_ReallyLR
%predcnt.addr = alloca target("aarch64.svcount"), align 2
store target("aarch64.svcount") %predcnt, ptr %predcnt.addr, align 2
diff --git a/llvm/test/CodeGen/AArch64/abs.ll b/llvm/test/CodeGen/AArch64/abs.ll
index 40ba2c12fa15..f2cad6631dc2 100644
--- a/llvm/test/CodeGen/AArch64/abs.ll
+++ b/llvm/test/CodeGen/AArch64/abs.ll
@@ -2,9 +2,6 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-; CHECK-GI: warning: Instruction selection used fallback path for abs_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for abs_v2i16
-
; ===== Legal Scalars =====
define i8 @abs_i8(i8 %a){
diff --git a/llvm/test/CodeGen/AArch64/aes.ll b/llvm/test/CodeGen/AArch64/aes.ll
new file mode 100644
index 000000000000..386114f4a0d7
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aes.ll
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc %s -o - -mtriple=aarch64 -mattr=+aes | FileCheck %s
+
+declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k)
+declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d, <16 x i8> %k)
+
+define <16 x i8> @aese(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: aese:
+; CHECK: // %bb.0:
+; CHECK-NEXT: aese v0.16b, v1.16b
+; CHECK-NEXT: ret
+ %r = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %r
+}
+
+define <16 x i8> @aese_c(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: aese_c:
+; CHECK: // %bb.0:
+; CHECK-NEXT: aese v0.16b, v1.16b
+; CHECK-NEXT: ret
+ %r = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %b, <16 x i8> %a)
+ ret <16 x i8> %r
+}
+
+define <16 x i8> @aesd(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: aesd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: aesd v0.16b, v1.16b
+; CHECK-NEXT: ret
+ %r = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %r
+}
+
+define <16 x i8> @aesd_c(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: aesd_c:
+; CHECK: // %bb.0:
+; CHECK-NEXT: aesd v0.16b, v1.16b
+; CHECK-NEXT: ret
+ %r = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %b, <16 x i8> %a)
+ ret <16 x i8> %r
+}
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
index 7244ac949ab8..9a4e01a29ecb 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
@@ -14,12 +14,12 @@ define void @array_1D(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp]
; CHECK-NEXT: addvl sp, sp, #3
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
@@ -81,18 +81,18 @@ define void @array_2D(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 48 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #5, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #4, mul vl]
-; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #3, mul vl]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #5, mul vl]
-; CHECK-NEXT: st1d { z3.d }, p0, [sp, #4, mul vl]
-; CHECK-NEXT: st1d { z5.d }, p0, [sp, #3, mul vl]
-; CHECK-NEXT: st1d { z4.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #5, mul vl]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
+; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp, #5, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #4, mul vl]
+; CHECK-NEXT: st1d { z3.d }, p0, [sp, #3, mul vl]
+; CHECK-NEXT: st1d { z5.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z4.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp]
; CHECK-NEXT: addvl sp, sp, #6
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
index f03a6f018d34..7292d52aaf47 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
@@ -13,12 +13,12 @@ define void @test(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp]
; CHECK-NEXT: addvl sp, sp, #3
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/and-mask-removal.ll b/llvm/test/CodeGen/AArch64/and-mask-removal.ll
index 17ff01597016..a8a59f159126 100644
--- a/llvm/test/CodeGen/AArch64/and-mask-removal.ll
+++ b/llvm/test/CodeGen/AArch64/and-mask-removal.ll
@@ -65,9 +65,8 @@ if.end: ; preds = %if.then, %entry
define zeroext i1 @test8_0(i8 zeroext %x) align 2 {
; CHECK-LABEL: test8_0:
; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: add w8, w0, #74
-; CHECK-NEXT: and w8, w8, #0xff
-; CHECK-NEXT: cmp w8, #236
+; CHECK-NEXT: sub w8, w0, #182
+; CHECK-NEXT: cmn w8, #20
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
entry:
@@ -508,16 +507,17 @@ define i64 @pr58109(i8 signext %0) {
define i64 @pr58109b(i8 signext %0, i64 %a, i64 %b) {
; CHECK-SD-LABEL: pr58109b:
; CHECK-SD: ; %bb.0:
-; CHECK-SD-NEXT: add w8, w0, #1
-; CHECK-SD-NEXT: tst w8, #0xfe
-; CHECK-SD-NEXT: csel x0, x1, x2, eq
+; CHECK-SD-NEXT: and w8, w0, #0xff
+; CHECK-SD-NEXT: sub w8, w8, #255
+; CHECK-SD-NEXT: cmn w8, #254
+; CHECK-SD-NEXT: csel x0, x1, x2, lo
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: pr58109b:
; CHECK-GI: ; %bb.0:
-; CHECK-GI-NEXT: add w8, w0, #1
-; CHECK-GI-NEXT: and w8, w8, #0xff
-; CHECK-GI-NEXT: cmp w8, #2
+; CHECK-GI-NEXT: mov w8, #-255 ; =0xffffff01
+; CHECK-GI-NEXT: add w8, w8, w0, uxtb
+; CHECK-GI-NEXT: cmn w8, #254
; CHECK-GI-NEXT: csel x0, x1, x2, lo
; CHECK-GI-NEXT: ret
%2 = add i8 %0, 1
diff --git a/llvm/test/CodeGen/AArch64/callbr-asm-outputs-indirect-isel.ll b/llvm/test/CodeGen/AArch64/callbr-asm-outputs-indirect-isel.ll
index 3b7b5dd3fa7a..fbe89e70e4d8 100644
--- a/llvm/test/CodeGen/AArch64/callbr-asm-outputs-indirect-isel.ll
+++ b/llvm/test/CodeGen/AArch64/callbr-asm-outputs-indirect-isel.ll
@@ -18,7 +18,7 @@ define i32 @test0() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"# $0", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %5, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"# $0", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %5, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %5
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
@@ -31,7 +31,7 @@ define i32 @test0() {
; CHECK-NEXT: bb.2.direct:
; CHECK-NEXT: successors: %bb.4(0x80000000), %bb.3(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"# $0", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %7, 13 /* imm */, %bb.3
+ ; CHECK-NEXT: INLINEASM_BR &"# $0", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %7, 13 /* imm */, %bb.3
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY %7
; CHECK-NEXT: B %bb.4
; CHECK-NEXT: {{ $}}
@@ -107,7 +107,7 @@ define i32 @dont_split1() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x80000000), %bb.2(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %1, 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %1, 13 /* imm */, %bb.2
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %1
; CHECK-NEXT: B %bb.1
; CHECK-NEXT: {{ $}}
@@ -168,7 +168,7 @@ define i32 @dont_split3() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x80000000), %bb.2(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %0, 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0, 13 /* imm */, %bb.2
; CHECK-NEXT: B %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.x:
@@ -194,7 +194,7 @@ define i32 @split_me0() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
@@ -244,7 +244,7 @@ define i32 @split_me1(i1 %z) {
; CHECK-NEXT: bb.1.w:
; CHECK-NEXT: successors: %bb.3(0x80000000), %bb.2(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %5, 13 /* imm */, %bb.2, 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %5, 13 /* imm */, %bb.2, 13 /* imm */, %bb.2
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY %5
; CHECK-NEXT: B %bb.3
; CHECK-NEXT: {{ $}}
@@ -297,7 +297,7 @@ define i32 @split_me2(i1 %z) {
; CHECK-NEXT: bb.1.w:
; CHECK-NEXT: successors: %bb.3(0x80000000), %bb.2(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %6, 13 /* imm */, %bb.2, 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %6, 13 /* imm */, %bb.2, 13 /* imm */, %bb.2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY %6
; CHECK-NEXT: B %bb.3
; CHECK-NEXT: {{ $}}
@@ -340,7 +340,7 @@ define i32 @dont_split4() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x80000000), %bb.2(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.2
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.1
; CHECK-NEXT: {{ $}}
@@ -379,7 +379,7 @@ define i32 @dont_split5() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
@@ -410,7 +410,7 @@ define i32 @split_me3() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
@@ -456,7 +456,7 @@ define i32 @dont_split6(i32 %0) {
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr32all = PHI [[COPY]], %bb.0, %2, %bb.2
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32common = COPY [[PHI]]
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %4, 2147483657 /* reguse tiedto:$0 */, [[COPY1]](tied-def 3), 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %4, 2147483657 /* reguse tiedto:$0 */, [[COPY1]](tied-def 3), 13 /* imm */, %bb.2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY %4
; CHECK-NEXT: B %bb.3
; CHECK-NEXT: {{ $}}
@@ -491,7 +491,7 @@ define i32 @split_me4() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
@@ -522,7 +522,7 @@ define i32 @split_me5() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
index d44d45ea03b6..aca2816225e3 100644
--- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
+++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
@@ -193,7 +193,7 @@ body: |
liveins: $z0, $z1, $p0, $p1, $w0
renamable $p2 = COPY killed $p0
- renamable $p0 = PTRUE_S 31
+ renamable $p0 = PTRUE_S 31, implicit $vg
ST1W_IMM killed renamable $z0, renamable $p0, %stack.0.z0.addr, 0 :: (store unknown-size into %ir.z0.addr, align 16)
ST1W_IMM killed renamable $z1, renamable $p0, %stack.1.z1.addr, 0 :: (store unknown-size into %ir.z1.addr, align 16)
STR_PXI killed renamable $p2, %stack.2.p0.addr, 0 :: (store unknown-size into %ir.p0.addr, align 2)
diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
index 75917ef32ae2..0ea180b20730 100644
--- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
+++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
@@ -111,7 +111,7 @@ body: |
STRXui killed renamable $x1, %stack.1, 0, debug-location !8
DBG_VALUE %stack.1, $noreg, !11, !DIExpression(DW_OP_constu, 16, DW_OP_plus, DW_OP_deref), debug-location !8
- renamable $p2 = PTRUE_S 31, debug-location !DILocation(line: 4, column: 1, scope: !5)
+ renamable $p2 = PTRUE_S 31, implicit $vg, debug-location !DILocation(line: 4, column: 1, scope: !5)
ST1W_IMM renamable $z0, renamable $p2, %stack.2, 0, debug-location !DILocation(line: 5, column: 1, scope: !5)
DBG_VALUE %stack.2, $noreg, !12, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 5, column: 1, scope: !5)
ST1W_IMM renamable $z1, killed renamable $p2, %stack.3, 0, debug-location !DILocation(line: 6, column: 1, scope: !5)
diff --git a/llvm/test/CodeGen/AArch64/emit_fneg_with_non_register_operand.mir b/llvm/test/CodeGen/AArch64/emit_fneg_with_non_register_operand.mir
index 483dbd2f14d5..92fb053b0db7 100644
--- a/llvm/test/CodeGen/AArch64/emit_fneg_with_non_register_operand.mir
+++ b/llvm/test/CodeGen/AArch64/emit_fneg_with_non_register_operand.mir
@@ -91,10 +91,10 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[LOADgot:%[0-9]+]]:gpr64common = LOADgot target-flags(aarch64-got) @c
; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[LOADgot]], 0 :: (dereferenceable load (s64) from @c)
- ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 2359306 /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %2, 2147483657 /* reguse tiedto:$0 */, [[LDRDui]](tied-def 3)
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, {{[0-9]+}} /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %2, 2147483657 /* reguse tiedto:$0 */, [[LDRDui]](tied-def 3)
; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY %2
; CHECK-NEXT: [[LDRDui1:%[0-9]+]]:fpr64 = LDRDui [[LOADgot]], 0 :: (dereferenceable load (s64) from @c)
- ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 2359306 /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %4, 2147483657 /* reguse tiedto:$0 */, [[LDRDui1]](tied-def 3)
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, {{[0-9]+}} /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %4, 2147483657 /* reguse tiedto:$0 */, [[LDRDui1]](tied-def 3)
; CHECK-NEXT: [[FNEGDr:%[0-9]+]]:fpr64 = FNEGDr %2
; CHECK-NEXT: nofpexcept FCMPDrr %4, killed [[FNEGDr]], implicit-def $nzcv, implicit $fpcr
; CHECK-NEXT: Bcc 1, %bb.2, implicit $nzcv
diff --git a/llvm/test/CodeGen/AArch64/live-debugvalues-sve.mir b/llvm/test/CodeGen/AArch64/live-debugvalues-sve.mir
index 8903ca2b865b..612453ab53f4 100644
--- a/llvm/test/CodeGen/AArch64/live-debugvalues-sve.mir
+++ b/llvm/test/CodeGen/AArch64/live-debugvalues-sve.mir
@@ -145,7 +145,7 @@ body: |
liveins: $z1
ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp, debug-location !34
- renamable $p0 = PTRUE_S 31, debug-location !34
+ renamable $p0 = PTRUE_S 31, implicit $vg, debug-location !34
$x0 = ADDXri %stack.0, 0, 0, debug-location !34
ST1W_IMM renamable $z1, killed renamable $p0, %stack.0, 0, debug-location !34 :: (store unknown-size into %stack.0, align 16)
$z0 = COPY renamable $z1, debug-location !34
@@ -157,7 +157,7 @@ body: |
$z7 = COPY renamable $z1, debug-location !34
BL @bar, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit $z1, implicit $z2, implicit $z3, implicit $z4, implicit $z5, implicit $z6, implicit $z7, implicit $x0, implicit-def $sp, implicit-def $z0, implicit-def $z1, debug-location !34
ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp, debug-location !34
- renamable $p0 = PTRUE_S 31, debug-location !34
+ renamable $p0 = PTRUE_S 31, implicit $vg, debug-location !34
$z3 = IMPLICIT_DEF
renamable $z1 = LD1W_IMM renamable $p0, %stack.0, 0, debug-location !34 :: (load unknown-size from %stack.0, align 16)
ST1W_IMM renamable $z3, killed renamable $p0, %stack.0, 0 :: (store unknown-size into %stack.0, align 16)
diff --git a/llvm/test/CodeGen/AArch64/load.ll b/llvm/test/CodeGen/AArch64/load.ll
new file mode 100644
index 000000000000..7f4540d915ab
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/load.ll
@@ -0,0 +1,318 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+; ===== Legal Scalars =====
+
+define i8 @load_i8(ptr %ptr){
+; CHECK-LABEL: load_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldrb w0, [x0]
+; CHECK-NEXT: ret
+ %a = load i8 , ptr %ptr
+ ret i8 %a
+}
+
+define i16 @load_i16(ptr %ptr){
+; CHECK-LABEL: load_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldrh w0, [x0]
+; CHECK-NEXT: ret
+ %a = load i16 , ptr %ptr
+ ret i16 %a
+}
+
+define i32 @load_i32(ptr %ptr){
+; CHECK-LABEL: load_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr w0, [x0]
+; CHECK-NEXT: ret
+ %a = load i32 , ptr %ptr
+ ret i32 %a
+}
+
+define i64 @load_i64(ptr %ptr){
+; CHECK-LABEL: load_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr x0, [x0]
+; CHECK-NEXT: ret
+ %a = load i64 , ptr %ptr
+ ret i64 %a
+}
+
+; ===== Legal Vector Types =====
+
+define <8 x i8> @load_v8i8(ptr %ptr){
+; CHECK-LABEL: load_v8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr d0, [x0]
+; CHECK-NEXT: ret
+ %a = load <8 x i8>, ptr %ptr
+ ret <8 x i8> %a
+}
+
+define <16 x i8> @load_v16i8(ptr %ptr){
+; CHECK-LABEL: load_v16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ret
+ %a = load <16 x i8>, ptr %ptr
+ ret <16 x i8> %a
+}
+
+define <4 x i16> @load_v4i16(ptr %ptr){
+; CHECK-LABEL: load_v4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr d0, [x0]
+; CHECK-NEXT: ret
+ %a = load <4 x i16>, ptr %ptr
+ ret <4 x i16> %a
+}
+
+define <8 x i16> @load_v8i16(ptr %ptr){
+; CHECK-LABEL: load_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ret
+ %a = load <8 x i16>, ptr %ptr
+ ret <8 x i16> %a
+}
+
+define <2 x i32> @load_v2i32(ptr %ptr){
+; CHECK-LABEL: load_v2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr d0, [x0]
+; CHECK-NEXT: ret
+ %a = load <2 x i32>, ptr %ptr
+ ret <2 x i32> %a
+}
+
+define <4 x i32> @load_v4i32(ptr %ptr){
+; CHECK-LABEL: load_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ret
+ %a = load <4 x i32>, ptr %ptr
+ ret <4 x i32> %a
+}
+
+define <2 x i64> @load_v2i64(ptr %ptr){
+; CHECK-LABEL: load_v2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ret
+ %a = load <2 x i64>, ptr %ptr
+ ret <2 x i64> %a
+}
+
+; ===== Smaller/Larger Width Vectors with Legal Element Sizes =====
+
+define <2 x i8> @load_v2i8(ptr %ptr, <2 x i8> %b){
+; CHECK-SD-LABEL: load_v2i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ld1 { v0.b }[0], [x0]
+; CHECK-SD-NEXT: add x8, x0, #1
+; CHECK-SD-NEXT: ld1 { v0.b }[4], [x8]
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: load_v2i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr b0, [x0]
+; CHECK-GI-NEXT: ldr b1, [x0, #1]
+; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
+ %a = load <2 x i8>, ptr %ptr
+ ret <2 x i8> %a
+}
+
+define i32 @load_v4i8(ptr %ptr, <4 x i8> %b){
+; CHECK-LABEL: load_v4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr w0, [x0]
+; CHECK-NEXT: ret
+ %a = load <4 x i8>, ptr %ptr
+ %c = bitcast <4 x i8> %a to i32
+ ret i32 %c
+}
+
+define <32 x i8> @load_v32i8(ptr %ptr){
+; CHECK-LABEL: load_v32i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: ret
+ %a = load <32 x i8>, ptr %ptr
+ ret <32 x i8> %a
+}
+
+define <2 x i16> @load_v2i16(ptr %ptr){
+; CHECK-SD-LABEL: load_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ld1 { v0.h }[0], [x0]
+; CHECK-SD-NEXT: add x8, x0, #2
+; CHECK-SD-NEXT: ld1 { v0.h }[2], [x8]
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: load_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr h0, [x0]
+; CHECK-GI-NEXT: ldr h1, [x0, #2]
+; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
+ %a = load <2 x i16>, ptr %ptr
+ ret <2 x i16> %a
+}
+
+define <16 x i16> @load_v16i16(ptr %ptr){
+; CHECK-LABEL: load_v16i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: ret
+ %a = load <16 x i16>, ptr %ptr
+ ret <16 x i16> %a
+}
+
+define <1 x i32> @load_v1i32(ptr %ptr){
+; CHECK-LABEL: load_v1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr s0, [x0]
+; CHECK-NEXT: ret
+ %a = load <1 x i32>, ptr %ptr
+ ret <1 x i32> %a
+}
+
+define <8 x i32> @load_v8i32(ptr %ptr){
+; CHECK-LABEL: load_v8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: ret
+ %a = load <8 x i32>, ptr %ptr
+ ret <8 x i32> %a
+}
+
+define <4 x i64> @load_v4i64(ptr %ptr){
+; CHECK-LABEL: load_v4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: ret
+ %a = load <4 x i64>, ptr %ptr
+ ret <4 x i64> %a
+}
+
+; ===== Vectors with Non-Pow 2 Widths =====
+
+define <3 x i8> @load_v3i8(ptr %ptr){
+; CHECK-SD-LABEL: load_v3i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr s0, [x0]
+; CHECK-SD-NEXT: umov w0, v0.b[0]
+; CHECK-SD-NEXT: umov w1, v0.b[1]
+; CHECK-SD-NEXT: umov w2, v0.b[2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: load_v3i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldrb w8, [x0]
+; CHECK-GI-NEXT: ldrb w1, [x0, #1]
+; CHECK-GI-NEXT: ldrb w2, [x0, #2]
+; CHECK-GI-NEXT: mov w0, w8
+; CHECK-GI-NEXT: ret
+ %a = load <3 x i8>, ptr %ptr
+ ret <3 x i8> %a
+}
+
+define <7 x i8> @load_v7i8(ptr %ptr){
+; CHECK-SD-LABEL: load_v7i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d0, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: load_v7i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr b0, [x0]
+; CHECK-GI-NEXT: ldr b1, [x0, #1]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: ldr b1, [x0, #2]
+; CHECK-GI-NEXT: mov v0.b[2], v1.b[0]
+; CHECK-GI-NEXT: ldr b1, [x0, #3]
+; CHECK-GI-NEXT: mov v0.b[3], v1.b[0]
+; CHECK-GI-NEXT: ldr b1, [x0, #4]
+; CHECK-GI-NEXT: mov v0.b[4], v1.b[0]
+; CHECK-GI-NEXT: ldr b1, [x0, #5]
+; CHECK-GI-NEXT: mov v0.b[5], v1.b[0]
+; CHECK-GI-NEXT: ldr b1, [x0, #6]
+; CHECK-GI-NEXT: mov v0.b[6], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[7], v0.b[0]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
+ %a = load <7 x i8>, ptr %ptr
+ ret <7 x i8> %a
+}
+
+define <3 x i16> @load_v3i16(ptr %ptr){
+; CHECK-SD-LABEL: load_v3i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d0, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: load_v3i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr h0, [x0]
+; CHECK-GI-NEXT: ldr h1, [x0, #2]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ldr h1, [x0, #4]
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: mov v0.h[3], v0.h[0]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
+ %a = load <3 x i16>, ptr %ptr
+ ret <3 x i16> %a
+}
+
+define <7 x i16> @load_v7i16(ptr %ptr){
+; CHECK-SD-LABEL: load_v7i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr q0, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: load_v7i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr h0, [x0]
+; CHECK-GI-NEXT: ldr h1, [x0, #2]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ldr h1, [x0, #4]
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: ldr h1, [x0, #6]
+; CHECK-GI-NEXT: mov v0.h[3], v1.h[0]
+; CHECK-GI-NEXT: ldr h1, [x0, #8]
+; CHECK-GI-NEXT: mov v0.h[4], v1.h[0]
+; CHECK-GI-NEXT: ldr h1, [x0, #10]
+; CHECK-GI-NEXT: mov v0.h[5], v1.h[0]
+; CHECK-GI-NEXT: ldr h1, [x0, #12]
+; CHECK-GI-NEXT: mov v0.h[6], v1.h[0]
+; CHECK-GI-NEXT: mov v0.h[7], v0.h[0]
+; CHECK-GI-NEXT: ret
+ %a = load <7 x i16>, ptr %ptr
+ ret <7 x i16> %a
+}
+
+define <3 x i32> @load_v3i32(ptr %ptr){
+; CHECK-SD-LABEL: load_v3i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr q0, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: load_v3i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldp s0, s1, [x0]
+; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: ldr s1, [x0, #8]
+; CHECK-GI-NEXT: mov v0.s[2], v1.s[0]
+; CHECK-GI-NEXT: mov v0.s[3], v0.s[0]
+; CHECK-GI-NEXT: ret
+ %a = load <3 x i32>, ptr %ptr
+ ret <3 x i32> %a
+}
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll b/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
index bf166954d80c..dc6fa9128e93 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
@@ -206,7 +206,7 @@ entry:
%aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2
%in2 = load <16 x i8>, ptr %p2, align 16
%aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2
- %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2
+ %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %aesmc1, <16 x i8> %in2) #2
store <16 x i8> %aesmc1, ptr %x3, align 16
%in3 = load <16 x i8>, ptr %p3, align 16
%aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2
diff --git a/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir b/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
index 041b2dc6af12..65148344096c 100644
--- a/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
+++ b/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
@@ -487,7 +487,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64common = COPY $x0
; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[DEF]]
- ; CHECK-NEXT: INLINEASM &"ldr ${0:s}, $1", 8 /* mayload attdialect */, 2359306 /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %1, 262158 /* mem:m */, killed [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"ldr ${0:s}, $1", 8 /* mayload attdialect */, {{[0-9]+}} /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %1, 262158 /* mem:m */, killed [[COPY1]]
; CHECK-NEXT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY [[MOVIv2d_ns]].dsub
; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
index 46b714d8e5fb..bb9546af8bb7 100644
--- a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
+++ b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
@@ -21,9 +21,7 @@ define noundef i1 @logger(i32 noundef %logLevel, ptr %ea, ptr %pll) {
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB1_2: // %land.rhs
; CHECK-NEXT: ldr x8, [x1]
-; CHECK-NEXT: ldrb w8, [x8]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ldrb w0, [x8]
; CHECK-NEXT: ret
entry:
%0 = load i32, ptr %pll, align 4
diff --git a/llvm/test/CodeGen/AArch64/shufflevector.ll b/llvm/test/CodeGen/AArch64/shufflevector.ll
new file mode 100644
index 000000000000..df59eb8e629f
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/shufflevector.ll
@@ -0,0 +1,565 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+; CHECK-GI: warning: Instruction selection used fallback path for shufflevector_v2i1
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v4i8
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v32i8
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v2i16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v16i16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v2i1_zeroes
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v4i8_zeroes
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v32i8_zeroes
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v2i16_zeroes
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v16i16_zeroes
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v3i8
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shufflevector_v3i8_zeroes
+
+; ===== Legal Vector Types =====
+
+define <8 x i8> @shufflevector_v8i8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-SD-LABEL: shufflevector_v8i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: adrp x8, .LCPI0_0
+; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: ldr d1, [x8, :lo12:.LCPI0_0]
+; CHECK-SD-NEXT: tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shufflevector_v8i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: adrp x8, .LCPI0_0
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI0_0]
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b }, v1.16b
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
+ %c = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 8, i32 10, i32 12, i32 15>
+ ret <8 x i8> %c
+}
+
+define <16 x i8> @shufflevector_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SD-LABEL: shufflevector_v16i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: adrp x8, .LCPI1_0
+; CHECK-SD-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-SD-NEXT: ldr q2, [x8, :lo12:.LCPI1_0]
+; CHECK-SD-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-SD-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shufflevector_v16i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, .LCPI1_0
+; CHECK-GI-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI1_0]
+; CHECK-GI-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-GI-NEXT: ret
+ %c = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 8, i32 10, i32 12, i32 15, i32 2, i32 4, i32 6, i32 8, i32 25, i32 30, i32 31, i32 31>
+ ret <16 x i8> %c
+}
+
+define <4 x i16> @shufflevector_v4i16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: shufflevector_v4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uzp2 v0.4h, v0.4h, v1.4h
+; CHECK-NEXT: ret
+ %c = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i16> %c
+}
+
+define <8 x i16> @shufflevector_v8i16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SD-LABEL: shufflevector_v8i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: adrp x8, .LCPI3_0
+; CHECK-SD-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-SD-NEXT: ldr q2, [x8, :lo12:.LCPI3_0]
+; CHECK-SD-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-SD-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shufflevector_v8i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, .LCPI3_0
+; CHECK-GI-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI3_0]
+; CHECK-GI-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-GI-NEXT: ret
+ %c = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 8, i32 10, i32 12, i32 15>
+ ret <8 x i16> %c
+}
+
+define <2 x i32> @shufflevector_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: shufflevector_v2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: zip2 v0.2s, v0.2s, v1.2s
+; CHECK-NEXT: ret
+ %c = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %c
+}
+
+define <4 x i32> @shufflevector_v4i32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: shufflevector_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uzp2 v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: ret
+ %c = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i32> %c
+}
+
+define <2 x i64> @shufflevector_v2i64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: shufflevector_v2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: zip2 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT: ret
+ %c = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %c
+}
+
+; ===== Legal Vector Types with Zero Masks =====
+
+define <8 x i8> @shufflevector_v8i8_zeroes(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: shufflevector_v8i8_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: dup v0.8b, v0.b[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i8> %c
+}
+
+define <16 x i8> @shufflevector_v16i8_zeroes(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: shufflevector_v16i8_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: dup v0.16b, v0.b[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <16 x i8> %c
+}
+
+define <4 x i16> @shufflevector_v4i16_zeroes(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: shufflevector_v4i16_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: dup v0.4h, v0.h[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ ret <4 x i16> %c
+}
+
+define <8 x i16> @shufflevector_v8i16_zeroes(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: shufflevector_v8i16_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: dup v0.8h, v0.h[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i16> %c
+}
+
+define <2 x i32> @shufflevector_v2i32_zeroes(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: shufflevector_v2i32_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: dup v0.2s, v0.s[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x i32> %c
+}
+
+define <4 x i32> @shufflevector_v4i32_zeroes(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: shufflevector_v4i32_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: dup v0.4s, v0.s[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ ret <4 x i32> %c
+}
+
+define <2 x i64> @shufflevector_v2i64_zeroes(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: shufflevector_v2i64_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: dup v0.2d, v0.d[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x i64> %c
+}
+
+; ===== Smaller/Larger Width Vectors with Legal Element Sizes =====
+
+define <2 x i1> @shufflevector_v2i1(<2 x i1> %a, <2 x i1> %b){
+; CHECK-LABEL: shufflevector_v2i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: mov v0.s[1], v1.s[1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %c = shufflevector <2 x i1> %a, <2 x i1> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i1> %c
+}
+
+define i32 @shufflevector_v4i8(<4 x i8> %a, <4 x i8> %b){
+; CHECK-LABEL: shufflevector_v4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #6
+; CHECK-NEXT: zip1 v1.4h, v1.4h, v0.4h
+; CHECK-NEXT: ext v0.8b, v0.8b, v1.8b, #4
+; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
+ %c = shufflevector <4 x i8> %a, <4 x i8> %b, <4 x i32> <i32 1, i32 2, i32 4, i32 7>
+ %d = bitcast <4 x i8> %c to i32
+ ret i32 %d
+}
+
+define <32 x i8> @shufflevector_v32i8(<32 x i8> %a, <32 x i8> %b){
+; CHECK-LABEL: shufflevector_v32i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $q1_q2
+; CHECK-NEXT: adrp x8, .LCPI16_0
+; CHECK-NEXT: adrp x9, .LCPI16_1
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI16_0]
+; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI16_1]
+; CHECK-NEXT: tbl v0.16b, { v1.16b, v2.16b }, v3.16b
+; CHECK-NEXT: tbl v1.16b, { v1.16b, v2.16b }, v4.16b
+; CHECK-NEXT: ret
+ %c = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 1, i32 32, i32 32, i32 32, i32 2, i32 32, i32 32, i32 32, i32 3, i32 32, i32 32, i32 32, i32 4, i32 32, i32 32, i32 32, i32 5, i32 32, i32 32, i32 32, i32 6, i32 32, i32 32, i32 32, i32 7, i32 32, i32 32, i32 32>
+ ret <32 x i8> %c
+}
+
+define i32 @shufflevector_v2i16(<2 x i16> %a, <2 x i16> %b){
+; CHECK-LABEL: shufflevector_v2i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: ext v0.8b, v0.8b, v1.8b, #4
+; CHECK-NEXT: mov w8, v0.s[1]
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: strh w9, [sp, #12]
+; CHECK-NEXT: strh w8, [sp, #14]
+; CHECK-NEXT: ldr w0, [sp, #12]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
+ %c = shufflevector <2 x i16> %a, <2 x i16> %b, <2 x i32> <i32 1, i32 2>
+ %d = bitcast <2 x i16> %c to i32
+ ret i32 %d
+}
+
+define <16 x i16> @shufflevector_v16i16(<16 x i16> %a, <16 x i16> %b){
+; CHECK-LABEL: shufflevector_v16i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $q1_q2
+; CHECK-NEXT: adrp x8, .LCPI18_0
+; CHECK-NEXT: adrp x9, .LCPI18_1
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI18_0]
+; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI18_1]
+; CHECK-NEXT: tbl v0.16b, { v1.16b, v2.16b }, v3.16b
+; CHECK-NEXT: tbl v1.16b, { v1.16b, v2.16b }, v4.16b
+; CHECK-NEXT: ret
+ %c = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 16, i32 16, i32 16, i32 1, i32 16, i32 16, i32 16, i32 1, i32 16, i32 16, i32 16, i32 3, i32 16, i32 16, i32 16>
+ ret <16 x i16> %c
+}
+
+define <1 x i32> @shufflevector_v1i32(<1 x i32> %a, <1 x i32> %b) {
+; CHECK-LABEL: shufflevector_v1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ret
+ %c = shufflevector <1 x i32> %a, <1 x i32> %b, <1 x i32> <i32 1>
+ ret <1 x i32> %c
+}
+
+define <8 x i32> @shufflevector_v8i32(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-SD-LABEL: shufflevector_v8i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: uzp1 v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: uzp2 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: mov v2.s[3], v3.s[3]
+; CHECK-SD-NEXT: mov v1.16b, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shufflevector_v8i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, .LCPI20_0
+; CHECK-GI-NEXT: // kill: def $q2 killed $q2 killed $q2_q3 def $q2_q3
+; CHECK-GI-NEXT: uzp2 v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI20_0]
+; CHECK-GI-NEXT: // kill: def $q3 killed $q3 killed $q2_q3 def $q2_q3
+; CHECK-GI-NEXT: tbl v1.16b, { v2.16b, v3.16b }, v4.16b
+; CHECK-GI-NEXT: ret
+ %c = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 8, i32 10, i32 12, i32 15>
+ ret <8 x i32> %c
+}
+
+define <4 x i64> @shufflevector_v4i64(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-SD-LABEL: shufflevector_v4i64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: zip2 v2.2d, v2.2d, v3.2d
+; CHECK-SD-NEXT: zip2 v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT: mov v1.16b, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shufflevector_v4i64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: zip2 v0.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT: zip2 v1.2d, v2.2d, v3.2d
+; CHECK-GI-NEXT: ret
+ %c = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i64> %c
+}
+
+; ===== Smaller/Larger Width Vectors with Zero Masks =====
+
+define <2 x i1> @shufflevector_v2i1_zeroes(<2 x i1> %a, <2 x i1> %b){
+; CHECK-LABEL: shufflevector_v2i1_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: dup v0.2s, v0.s[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <2 x i1> %a, <2 x i1> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x i1> %c
+}
+
+define i32 @shufflevector_v4i8_zeroes(<4 x i8> %a, <4 x i8> %b){
+; CHECK-LABEL: shufflevector_v4i8_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: dup v0.4h, v0.h[0]
+; CHECK-NEXT: xtn v0.8b, v0.8h
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
+ %c = shufflevector <4 x i8> %a, <4 x i8> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %d = bitcast <4 x i8> %c to i32
+ ret i32 %d
+}
+
+define <32 x i8> @shufflevector_v32i8_zeroes(<32 x i8> %a, <32 x i8> %b){
+; CHECK-LABEL: shufflevector_v32i8_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: dup v0.16b, v0.b[0]
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ret
+ %c = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <32 x i8> %c
+}
+
+define i32 @shufflevector_v2i16_zeroes(<2 x i16> %a, <2 x i16> %b){
+; CHECK-LABEL: shufflevector_v2i16_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: dup v1.2s, v0.s[0]
+; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: strh w9, [sp, #12]
+; CHECK-NEXT: mov w8, v1.s[1]
+; CHECK-NEXT: strh w8, [sp, #14]
+; CHECK-NEXT: ldr w0, [sp, #12]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
+ %c = shufflevector <2 x i16> %a, <2 x i16> %b, <2 x i32> <i32 0, i32 0>
+ %d = bitcast <2 x i16> %c to i32
+ ret i32 %d
+}
+
+define <16 x i16> @shufflevector_v16i16_zeroes(<16 x i16> %a, <16 x i16> %b){
+; CHECK-LABEL: shufflevector_v16i16_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: dup v0.8h, v0.h[0]
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ret
+ %c = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <16 x i16> %c
+}
+
+define <1 x i32> @shufflevector_v1i32_zeroes(<1 x i32> %a, <1 x i32> %b) {
+; CHECK-LABEL: shufflevector_v1i32_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %c = shufflevector <1 x i32> %a, <1 x i32> %b, <1 x i32> <i32 0>
+ ret <1 x i32> %c
+}
+
+define <8 x i32> @shufflevector_v8i32_zeroes(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: shufflevector_v8i32_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: dup v0.4s, v0.s[0]
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ret
+ %c = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i32> %c
+}
+
+define <4 x i64> @shufflevector_v4i64_zeroes(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: shufflevector_v4i64_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: dup v0.2d, v0.d[0]
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: ret
+ %c = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ ret <4 x i64> %c
+}
+
+; ===== Vectors with Non-Pow 2 Widths =====
+
+define <3 x i8> @shufflevector_v3i8(<3 x i8> %a, <3 x i8> %b) {
+; CHECK-LABEL: shufflevector_v3i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: mov w1, w2
+; CHECK-NEXT: mov w2, w4
+; CHECK-NEXT: ret
+ %c = shufflevector <3 x i8> %a, <3 x i8> %b, <3 x i32> <i32 1, i32 2, i32 4>
+ ret <3 x i8> %c
+}
+
+define <7 x i8> @shufflevector_v7i8(<7 x i8> %a, <7 x i8> %b) {
+; CHECK-SD-LABEL: shufflevector_v7i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: adrp x8, .LCPI31_0
+; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: ldr d1, [x8, :lo12:.LCPI31_0]
+; CHECK-SD-NEXT: tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shufflevector_v7i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: adrp x8, .LCPI31_0
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI31_0]
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b }, v1.16b
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
+ %c = shufflevector <7 x i8> %a, <7 x i8> %b, <7 x i32> <i32 1, i32 3, i32 5, i32 7, i32 8, i32 10, i32 12>
+ ret <7 x i8> %c
+}
+
+define <3 x i16> @shufflevector_v3i16(<3 x i16> %a, <3 x i16> %b) {
+; CHECK-SD-LABEL: shufflevector_v3i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: zip1 v1.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: zip2 v0.4h, v1.4h, v0.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shufflevector_v3i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: adrp x8, .LCPI32_0
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI32_0]
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b }, v1.16b
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
+ %c = shufflevector <3 x i16> %a, <3 x i16> %b, <3 x i32> <i32 1, i32 2, i32 4>
+ ret <3 x i16> %c
+}
+
+define <7 x i16> @shufflevector_v7i16(<7 x i16> %a, <7 x i16> %b) {
+; CHECK-SD-LABEL: shufflevector_v7i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: adrp x8, .LCPI33_0
+; CHECK-SD-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-SD-NEXT: ldr q2, [x8, :lo12:.LCPI33_0]
+; CHECK-SD-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-SD-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shufflevector_v7i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, .LCPI33_0
+; CHECK-GI-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI33_0]
+; CHECK-GI-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-GI-NEXT: ret
+ %c = shufflevector <7 x i16> %a, <7 x i16> %b, <7 x i32> <i32 1, i32 3, i32 5, i32 7, i32 8, i32 10, i32 12>
+ ret <7 x i16> %c
+}
+
+define <3 x i32> @shufflevector_v3i32(<3 x i32> %a, <3 x i32> %b) {
+; CHECK-SD-LABEL: shufflevector_v3i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: zip1 v1.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: zip2 v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shufflevector_v3i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, .LCPI34_0
+; CHECK-GI-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI34_0]
+; CHECK-GI-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-GI-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-GI-NEXT: ret
+ %c = shufflevector <3 x i32> %a, <3 x i32> %b, <3 x i32> <i32 1, i32 2, i32 4>
+ ret <3 x i32> %c
+}
+
+; ===== Vectors with Non-Pow 2 Widths with Zero Masks =====
+
+define <3 x i8> @shufflevector_v3i8_zeroes(<3 x i8> %a, <3 x i8> %b) {
+; CHECK-LABEL: shufflevector_v3i8_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w1, w0
+; CHECK-NEXT: mov w2, w0
+; CHECK-NEXT: ret
+ %c = shufflevector <3 x i8> %a, <3 x i8> %b, <3 x i32> <i32 0, i32 0, i32 0>
+ ret <3 x i8> %c
+}
+
+define <7 x i8> @shufflevector_v7i8_zeroes(<7 x i8> %a, <7 x i8> %b) {
+; CHECK-LABEL: shufflevector_v7i8_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: dup v0.8b, v0.b[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <7 x i8> %a, <7 x i8> %b, <7 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <7 x i8> %c
+}
+
+define <3 x i16> @shufflevector_v3i16_zeroes(<3 x i16> %a, <3 x i16> %b) {
+; CHECK-LABEL: shufflevector_v3i16_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: dup v0.4h, v0.h[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <3 x i16> %a, <3 x i16> %b, <3 x i32> <i32 0, i32 0, i32 0>
+ ret <3 x i16> %c
+}
+
+define <7 x i16> @shufflevector_v7i16_zeroes(<7 x i16> %a, <7 x i16> %b) {
+; CHECK-LABEL: shufflevector_v7i16_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: dup v0.8h, v0.h[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <7 x i16> %a, <7 x i16> %b, <7 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <7 x i16> %c
+}
+
+define <3 x i32> @shufflevector_v3i32_zeroes(<3 x i32> %a, <3 x i32> %b) {
+; CHECK-LABEL: shufflevector_v3i32_zeroes:
+; CHECK: // %bb.0:
+; CHECK-NEXT: dup v0.4s, v0.s[0]
+; CHECK-NEXT: ret
+ %c = shufflevector <3 x i32> %a, <3 x i32> %b, <3 x i32> <i32 0, i32 0, i32 0>
+ ret <3 x i32> %c
+}
diff --git a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
index ab42e6463fee..bb4df6d8935b 100644
--- a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
@@ -396,7 +396,7 @@ define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind {
define i1 @add_ulecmp_bad_i16_i8(i16 %x) nounwind {
; CHECK-LABEL: add_ulecmp_bad_i16_i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w0, #1
+; CHECK-NEXT: mov w0, #1 // =0x1
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 128 ; 1U << (8-1)
%tmp1 = icmp ule i16 %tmp0, -1 ; when we +1 it, it will wrap to 0
diff --git a/llvm/test/CodeGen/AArch64/sme-disable-gisel-fisel.ll b/llvm/test/CodeGen/AArch64/sme-disable-gisel-fisel.ll
index 2a78012045ff..cd348be5d771 100644
--- a/llvm/test/CodeGen/AArch64/sme-disable-gisel-fisel.ll
+++ b/llvm/test/CodeGen/AArch64/sme-disable-gisel-fisel.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -fast-isel=true -global-isel=false -fast-isel-abort=0 -mtriple=aarch64-linux-gnu -mattr=+sme < %s \
+; RUN: llc -fast-isel=true -global-isel=false -fast-isel-abort=0 -mtriple=aarch64-linux-gnu -mattr=+sme2 < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-FISEL
-; RUN: llc -fast-isel=false -global-isel=true -global-isel-abort=0 -mtriple=aarch64-linux-gnu -mattr=+sme < %s \
+; RUN: llc -fast-isel=false -global-isel=true -global-isel-abort=0 -mtriple=aarch64-linux-gnu -mattr=+sme2 < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-GISEL
@@ -447,3 +447,64 @@ define float @frem_call_sm_compat(float %a, float %b) "aarch64_pstate_sm_compati
%res = frem float %a, %b
ret float %res
}
+
+;
+; Check ZT0 State
+;
+
+declare double @zt0_shared_callee(double) "aarch64_inout_zt0"
+
+define double @zt0_new_caller_to_zt0_shared_callee(double %x) nounwind noinline optnone "aarch64_new_zt0" {
+; CHECK-COMMON-LABEL: zt0_new_caller_to_zt0_shared_callee:
+; CHECK-COMMON: // %bb.0: // %prelude
+; CHECK-COMMON-NEXT: sub sp, sp, #80
+; CHECK-COMMON-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
+; CHECK-COMMON-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-COMMON-NEXT: cbz x8, .LBB13_2
+; CHECK-COMMON-NEXT: b .LBB13_1
+; CHECK-COMMON-NEXT: .LBB13_1: // %save.za
+; CHECK-COMMON-NEXT: mov x8, sp
+; CHECK-COMMON-NEXT: str zt0, [x8]
+; CHECK-COMMON-NEXT: bl __arm_tpidr2_save
+; CHECK-COMMON-NEXT: ldr zt0, [x8]
+; CHECK-COMMON-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-COMMON-NEXT: b .LBB13_2
+; CHECK-COMMON-NEXT: .LBB13_2: // %entry
+; CHECK-COMMON-NEXT: smstart za
+; CHECK-COMMON-NEXT: zero { zt0 }
+; CHECK-COMMON-NEXT: bl zt0_shared_callee
+; CHECK-COMMON-NEXT: mov x8, #4631107791820423168 // =0x4045000000000000
+; CHECK-COMMON-NEXT: fmov d1, x8
+; CHECK-COMMON-NEXT: fadd d0, d0, d1
+; CHECK-COMMON-NEXT: smstop za
+; CHECK-COMMON-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
+; CHECK-COMMON-NEXT: add sp, sp, #80
+; CHECK-COMMON-NEXT: ret
+entry:
+ %call = call double @zt0_shared_callee(double %x)
+ %add = fadd double %call, 4.200000e+01
+ ret double %add;
+}
+
+define double @zt0_shared_caller_to_normal_callee(double %x) nounwind noinline optnone "aarch64_inout_zt0" {
+; CHECK-COMMON-LABEL: zt0_shared_caller_to_normal_callee:
+; CHECK-COMMON: // %bb.0: // %entry
+; CHECK-COMMON-NEXT: sub sp, sp, #80
+; CHECK-COMMON-NEXT: stp x30, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-COMMON-NEXT: mov x19, sp
+; CHECK-COMMON-NEXT: str zt0, [x19]
+; CHECK-COMMON-NEXT: smstop za
+; CHECK-COMMON-NEXT: bl normal_callee
+; CHECK-COMMON-NEXT: smstart za
+; CHECK-COMMON-NEXT: ldr zt0, [x19]
+; CHECK-COMMON-NEXT: mov x8, #4631107791820423168 // =0x4045000000000000
+; CHECK-COMMON-NEXT: fmov d1, x8
+; CHECK-COMMON-NEXT: fadd d0, d0, d1
+; CHECK-COMMON-NEXT: ldp x30, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-COMMON-NEXT: add sp, sp, #80
+; CHECK-COMMON-NEXT: ret
+entry:
+ %call = call double @normal_callee(double %x)
+ %add = fadd double %call, 4.200000e+01
+ ret double %add;
+}
diff --git a/llvm/test/CodeGen/AArch64/store.ll b/llvm/test/CodeGen/AArch64/store.ll
new file mode 100644
index 000000000000..bf22d79a4df9
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/store.ll
@@ -0,0 +1,342 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+; ===== Legal Scalars =====
+define void @store_i8(i8 %a, ptr %ptr){
+; CHECK-LABEL: store_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: strb w0, [x1]
+; CHECK-NEXT: ret
+ store i8 %a, ptr %ptr
+ ret void
+}
+
+define void @store_i16(i16 %a, ptr %ptr){
+; CHECK-LABEL: store_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: strh w0, [x1]
+; CHECK-NEXT: ret
+ store i16 %a, ptr %ptr
+ ret void
+}
+
+define void @store_i32(i32 %a, ptr %ptr){
+; CHECK-LABEL: store_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ store i32 %a, ptr %ptr
+ ret void
+}
+
+define void @store_i64(i64 %a, ptr %ptr){
+; CHECK-LABEL: store_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x0, [x1]
+; CHECK-NEXT: ret
+ store i64 %a, ptr %ptr
+ ret void
+}
+
+; ===== Legal Vector Types =====
+
+define void @store_v8i8(<8 x i8> %a, ptr %ptr){
+; CHECK-LABEL: store_v8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
+ store <8 x i8> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v16i8(<16 x i8> %a, ptr %ptr){
+; CHECK-LABEL: store_v16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+ store <16 x i8> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v4i16(<4 x i16> %a, ptr %ptr){
+; CHECK-LABEL: store_v4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
+ store <4 x i16> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v8i16(<8 x i16> %a, ptr %ptr){
+; CHECK-LABEL: store_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+ store <8 x i16> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v2i32(<2 x i32> %a, ptr %ptr){
+; CHECK-LABEL: store_v2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
+ store <2 x i32> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v4i32(<4 x i32> %a, ptr %ptr){
+; CHECK-LABEL: store_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+ store <4 x i32> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v2i64(<2 x i64> %a, ptr %ptr){
+; CHECK-LABEL: store_v2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+ store <2 x i64> %a, ptr %ptr
+ ret void
+}
+
+; ===== Smaller/Larger Width Vectors with Legal Element Sizes =====
+
+define void @store_v2i8(<2 x i8> %a, ptr %ptr){
+; CHECK-SD-LABEL: store_v2i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: mov w8, v0.s[1]
+; CHECK-SD-NEXT: fmov w9, s0
+; CHECK-SD-NEXT: strb w9, [x0]
+; CHECK-SD-NEXT: strb w8, [x0, #1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: store_v2i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: str b0, [x0]
+; CHECK-GI-NEXT: str b1, [x0, #1]
+; CHECK-GI-NEXT: ret
+ store <2 x i8> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v4i8(i32 %a, ptr %ptr) {
+; CHECK-LABEL: store_v4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str w0, [x1]
+; CHECK-NEXT: ret
+ %c = bitcast i32 %a to <4 x i8>
+ store <4 x i8> %c, ptr %ptr
+ ret void
+}
+
+define void @store_v32i8(<32 x i8> %a, ptr %ptr){
+; CHECK-LABEL: store_v32i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: ret
+ store <32 x i8> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v2i16(<2 x i16> %a, ptr %ptr){
+; CHECK-SD-LABEL: store_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: mov w8, v0.s[1]
+; CHECK-SD-NEXT: fmov w9, s0
+; CHECK-SD-NEXT: strh w9, [x0]
+; CHECK-SD-NEXT: strh w8, [x0, #2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: store_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: str h0, [x0]
+; CHECK-GI-NEXT: str h1, [x0, #2]
+; CHECK-GI-NEXT: ret
+ store <2 x i16> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v16i16(<16 x i16> %a, ptr %ptr){
+; CHECK-LABEL: store_v16i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: ret
+ store <16 x i16> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v1i32(<1 x i32> %a, ptr %ptr){
+; CHECK-SD-LABEL: store_v1i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: str s0, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: store_v1i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: str s0, [x0]
+; CHECK-GI-NEXT: ret
+ store <1 x i32> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v8i32(<8 x i32> %a, ptr %ptr){
+; CHECK-LABEL: store_v8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: ret
+ store <8 x i32> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v4i64(<4 x i64> %a, ptr %ptr){
+; CHECK-LABEL: store_v4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: ret
+ store <4 x i64> %a, ptr %ptr
+ ret void
+}
+
+; ===== Vectors with Non-Pow 2 Widths =====
+
+define void @store_v3i8(<3 x i8> %a, ptr %ptr){
+; CHECK-SD-LABEL: store_v3i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: mov v0.h[1], w1
+; CHECK-SD-NEXT: mov v0.h[2], w2
+; CHECK-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-SD-NEXT: str s0, [sp, #12]
+; CHECK-SD-NEXT: ldrh w8, [sp, #12]
+; CHECK-SD-NEXT: strb w2, [x3, #2]
+; CHECK-SD-NEXT: strh w8, [x3]
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: store_v3i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: strb w0, [x3]
+; CHECK-GI-NEXT: strb w1, [x3, #1]
+; CHECK-GI-NEXT: strb w2, [x3, #2]
+; CHECK-GI-NEXT: ret
+ store <3 x i8> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v7i8(<7 x i8> %a, ptr %ptr){
+; CHECK-SD-LABEL: store_v7i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add x8, x0, #6
+; CHECK-SD-NEXT: add x9, x0, #4
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: str s0, [x0]
+; CHECK-SD-NEXT: st1 { v0.b }[6], [x8]
+; CHECK-SD-NEXT: st1 { v0.h }[2], [x9]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: store_v7i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add x8, x0, #1
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: add x9, x0, #2
+; CHECK-GI-NEXT: st1 { v0.b }[0], [x0]
+; CHECK-GI-NEXT: st1 { v0.b }[1], [x8]
+; CHECK-GI-NEXT: add x8, x0, #3
+; CHECK-GI-NEXT: st1 { v0.b }[3], [x8]
+; CHECK-GI-NEXT: add x8, x0, #4
+; CHECK-GI-NEXT: st1 { v0.b }[4], [x8]
+; CHECK-GI-NEXT: add x8, x0, #5
+; CHECK-GI-NEXT: st1 { v0.b }[5], [x8]
+; CHECK-GI-NEXT: add x8, x0, #6
+; CHECK-GI-NEXT: st1 { v0.b }[2], [x9]
+; CHECK-GI-NEXT: st1 { v0.b }[6], [x8]
+; CHECK-GI-NEXT: ret
+ store <7 x i8> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v3i16(<3 x i16> %a, ptr %ptr){
+; CHECK-SD-LABEL: store_v3i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add x8, x0, #4
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: str s0, [x0]
+; CHECK-SD-NEXT: st1 { v0.h }[2], [x8]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: store_v3i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add x8, x0, #2
+; CHECK-GI-NEXT: add x9, x0, #4
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: str h0, [x0]
+; CHECK-GI-NEXT: st1 { v0.h }[1], [x8]
+; CHECK-GI-NEXT: st1 { v0.h }[2], [x9]
+; CHECK-GI-NEXT: ret
+ store <3 x i16> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v7i16(<7 x i16> %a, ptr %ptr){
+; CHECK-SD-LABEL: store_v7i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add x8, x0, #12
+; CHECK-SD-NEXT: add x9, x0, #8
+; CHECK-SD-NEXT: str d0, [x0]
+; CHECK-SD-NEXT: st1 { v0.h }[6], [x8]
+; CHECK-SD-NEXT: st1 { v0.s }[2], [x9]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: store_v7i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add x8, x0, #2
+; CHECK-GI-NEXT: add x9, x0, #4
+; CHECK-GI-NEXT: str h0, [x0]
+; CHECK-GI-NEXT: st1 { v0.h }[1], [x8]
+; CHECK-GI-NEXT: add x8, x0, #6
+; CHECK-GI-NEXT: st1 { v0.h }[3], [x8]
+; CHECK-GI-NEXT: add x8, x0, #8
+; CHECK-GI-NEXT: st1 { v0.h }[4], [x8]
+; CHECK-GI-NEXT: add x8, x0, #10
+; CHECK-GI-NEXT: st1 { v0.h }[5], [x8]
+; CHECK-GI-NEXT: add x8, x0, #12
+; CHECK-GI-NEXT: st1 { v0.h }[2], [x9]
+; CHECK-GI-NEXT: st1 { v0.h }[6], [x8]
+; CHECK-GI-NEXT: ret
+ store <7 x i16> %a, ptr %ptr
+ ret void
+}
+
+define void @store_v3i32(<3 x i32> %a, ptr %ptr){
+; CHECK-SD-LABEL: store_v3i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add x8, x0, #8
+; CHECK-SD-NEXT: str d0, [x0]
+; CHECK-SD-NEXT: st1 { v0.s }[2], [x8]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: store_v3i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add x8, x0, #4
+; CHECK-GI-NEXT: add x9, x0, #8
+; CHECK-GI-NEXT: str s0, [x0]
+; CHECK-GI-NEXT: st1 { v0.s }[1], [x8]
+; CHECK-GI-NEXT: st1 { v0.s }[2], [x9]
+; CHECK-GI-NEXT: ret
+ store <3 x i32> %a, ptr %ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret.ll
index 82bf756f8228..c7c102f5d567 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme < %s | FileCheck %s
@@ -150,6 +150,46 @@ define <vscale x 16 x i1> @chained_reinterpret() {
ret <vscale x 16 x i1> %out
}
+define <vscale x 16 x i1> @reinterpret_scalar_bool_h(i1 %x){
+; CHECK-LABEL: reinterpret_scalar_bool_h:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: sbfx x8, x0, #0, #1
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ret
+ %.splatinsert = insertelement <vscale x 8 x i1> poison, i1 %x, i64 0
+ %.splat = shufflevector <vscale x 8 x i1> %.splatinsert, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %.splat)
+ ret <vscale x 16 x i1> %out
+}
+
+define <vscale x 16 x i1> @reinterpret_scalar_bool_s(i1 %x){
+; CHECK-LABEL: reinterpret_scalar_bool_s:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: sbfx x8, x0, #0, #1
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ret
+ %.splatinsert = insertelement <vscale x 4 x i1> poison, i1 %x, i64 0
+ %.splat = shufflevector <vscale x 4 x i1> %.splatinsert, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %.splat)
+ ret <vscale x 16 x i1> %out
+}
+
+define <vscale x 16 x i1> @reinterpret_scalar_bool_q(i1 %x){
+; CHECK-LABEL: reinterpret_scalar_bool_q:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT: sbfx x8, x0, #0, #1
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ret
+ %.splatinsert = insertelement <vscale x 2 x i1> poison, i1 %x, i64 0
+ %.splat = shufflevector <vscale x 2 x i1> %.splatinsert, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %.splat)
+ ret <vscale x 16 x i1> %out
+}
+
+
declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg)
declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 immarg)
declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
diff --git a/llvm/test/CodeGen/AArch64/sve-localstackalloc.mir b/llvm/test/CodeGen/AArch64/sve-localstackalloc.mir
index 3fbb7889c8b7..6063c8dfc792 100644
--- a/llvm/test/CodeGen/AArch64/sve-localstackalloc.mir
+++ b/llvm/test/CodeGen/AArch64/sve-localstackalloc.mir
@@ -48,7 +48,7 @@ body: |
%2:gpr32 = COPY $w0
%1:zpr = COPY $z1
%0:zpr = COPY $z0
- %5:ppr_3b = PTRUE_B 31
+ %5:ppr_3b = PTRUE_B 31, implicit $vg
%6:gpr64sp = ADDXri %stack.0, 0, 0
ST1B_IMM %1, %5, %6, 1 :: (store unknown-size, align 16)
ST1B_IMM %0, %5, %stack.0, 0 :: (store unknown-size into %stack.0, align 16)
diff --git a/llvm/test/CodeGen/AArch64/sve-pfalse-machine-cse.mir b/llvm/test/CodeGen/AArch64/sve-pfalse-machine-cse.mir
index b76fe7821b6c..8395a7619fbb 100644
--- a/llvm/test/CodeGen/AArch64/sve-pfalse-machine-cse.mir
+++ b/llvm/test/CodeGen/AArch64/sve-pfalse-machine-cse.mir
@@ -11,15 +11,15 @@ body: |
; CHECK: liveins: $p0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:ppr = COPY $p0
- ; CHECK-NEXT: [[PFALSE:%[0-9]+]]:ppr = PFALSE
+ ; CHECK-NEXT: [[PFALSE:%[0-9]+]]:ppr = PFALSE implicit $vg
; CHECK-NEXT: [[UZP1_PPP_B:%[0-9]+]]:ppr = UZP1_PPP_B [[COPY]], [[PFALSE]]
; CHECK-NEXT: [[UZP1_PPP_B1:%[0-9]+]]:ppr = UZP1_PPP_B killed [[UZP1_PPP_B]], [[PFALSE]]
; CHECK-NEXT: $p0 = COPY [[UZP1_PPP_B1]]
; CHECK-NEXT: RET_ReallyLR implicit $p0
%0:ppr = COPY $p0
- %2:ppr = PFALSE
+ %2:ppr = PFALSE implicit $vg
%3:ppr = UZP1_PPP_B %0, %2
- %4:ppr = PFALSE
+ %4:ppr = PFALSE implicit $vg
%5:ppr = UZP1_PPP_B killed %3, %4
$p0 = COPY %5
RET_ReallyLR implicit $p0
diff --git a/llvm/test/CodeGen/AArch64/sve-pseudos-expand-undef.mir b/llvm/test/CodeGen/AArch64/sve-pseudos-expand-undef.mir
index df0e50de4d1a..ae70f91a4ec6 100644
--- a/llvm/test/CodeGen/AArch64/sve-pseudos-expand-undef.mir
+++ b/llvm/test/CodeGen/AArch64/sve-pseudos-expand-undef.mir
@@ -26,7 +26,7 @@ body: |
name: expand_mls_to_msb
body: |
bb.0:
- renamable $p0 = PTRUE_B 31
+ renamable $p0 = PTRUE_B 31, implicit $vg
renamable $z0 = MLS_ZPZZZ_B_UNDEF killed renamable $p0, killed renamable $z2, killed renamable $z0, killed renamable $z1
RET_ReallyLR implicit $z0
...
@@ -36,7 +36,7 @@ body: |
name: expand_mla_to_mad
body: |
bb.0:
- renamable $p0 = PTRUE_B 31
+ renamable $p0 = PTRUE_B 31, implicit $vg
renamable $z0 = MLA_ZPZZZ_B_UNDEF killed renamable $p0, killed renamable $z2, killed renamable $z0, killed renamable $z1
RET_ReallyLR implicit $z0
...
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-cmpeq.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-cmpeq.mir
index 81318aa5c2a5..5169113697dc 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-cmpeq.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-cmpeq.mir
@@ -174,7 +174,7 @@ body: |
%1:zpr = COPY $z0
%0:ppr_3b = COPY $p0
%2:ppr = CMPEQ_PPzZI_B %0, %1, 0, implicit-def dead $nzcv
- %3:ppr = PTRUE_B 31
+ %3:ppr = PTRUE_B 31, implicit $vg
PTEST_PP killed %3, killed %2, implicit-def $nzcv
%4:gpr32 = COPY $wzr
%5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
@@ -409,14 +409,14 @@ body: |
; CHECK-LABEL: name: cmpeq_imm_nxv16i8_ptest_not_all_active
; CHECK: %2:ppr = CMPEQ_PPzZI_B %0, %1, 0, implicit-def dead $nzcv
- ; CHECK-NEXT: %3:ppr = PTRUE_B 0
+ ; CHECK-NEXT: %3:ppr = PTRUE_B 0, implicit $vg
; CHECK-NEXT: PTEST_PP killed %3, killed %2, implicit-def $nzcv
; CHECK-NEXT: %4:gpr32 = COPY $wzr
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:zpr = COPY $z0
%0:ppr_3b = COPY $p0
%2:ppr = CMPEQ_PPzZI_B %0, %1, 0, implicit-def dead $nzcv
- %3:ppr = PTRUE_B 0
+ %3:ppr = PTRUE_B 0, implicit $vg
PTEST_PP killed %3, killed %2, implicit-def $nzcv
%4:gpr32 = COPY $wzr
%5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
@@ -446,14 +446,14 @@ body: |
; CHECK-LABEL: name: cmpeq_imm_nxv16i8_ptest_of_halfs
; CHECK: %2:ppr = CMPEQ_PPzZI_B %0, %1, 0, implicit-def dead $nzcv
- ; CHECK-NEXT: %3:ppr = PTRUE_H 31
+ ; CHECK-NEXT: %3:ppr = PTRUE_H 31, implicit $vg
; CHECK-NEXT: PTEST_PP killed %3, killed %2, implicit-def $nzcv
; CHECK-NEXT: %4:gpr32 = COPY $wzr
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:zpr = COPY $z0
%0:ppr_3b = COPY $p0
%2:ppr = CMPEQ_PPzZI_B %0, %1, 0, implicit-def dead $nzcv
- %3:ppr = PTRUE_H 31
+ %3:ppr = PTRUE_H 31, implicit $vg
PTEST_PP killed %3, killed %2, implicit-def $nzcv
%4:gpr32 = COPY $wzr
%5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilege.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilege.mir
index 8f7467d99154..c1d9dfff7344 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilege.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilege.mir
@@ -30,7 +30,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILEGE_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -63,7 +63,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg, implicit $vg
%3:ppr = WHILEGE_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -98,7 +98,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILEGE_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -133,7 +133,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILEGE_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -168,7 +168,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILEGE_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -203,7 +203,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILEGE_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -238,7 +238,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILEGE_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -273,7 +273,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILEGE_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -313,7 +313,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 0
+ %2:ppr = PTRUE_B 0, implicit $vg
%3:ppr = WHILEGE_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -353,7 +353,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%3:ppr = WHILEGE_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -393,7 +393,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%3:ppr = WHILEGE_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -433,7 +433,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%3:ppr = WHILEGE_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilegt.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilegt.mir
index 217d984560e3..c6df21f85db7 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilegt.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilegt.mir
@@ -30,7 +30,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILEGT_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -63,7 +63,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg, implicit $vg
%3:ppr = WHILEGT_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -98,7 +98,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg, implicit $vg
%4:ppr = WHILEGT_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -133,7 +133,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg, implicit $vg
%4:ppr = WHILEGT_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -168,7 +168,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg, implicit $vg
%4:ppr = WHILEGT_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -203,7 +203,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg, implicit $vg
%4:ppr = WHILEGT_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -238,7 +238,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg, implicit $vg
%4:ppr = WHILEGT_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -273,7 +273,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg, implicit $vg
%4:ppr = WHILEGT_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -313,7 +313,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 1
+ %2:ppr = PTRUE_H 1, implicit $vg, implicit $vg
%3:ppr = WHILEGT_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -353,7 +353,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILEGT_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -393,7 +393,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%3:ppr = WHILEGT_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -433,7 +433,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%3:ppr = WHILEGT_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilehi.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilehi.mir
index 8d6f466c6b73..7d8aed3c325a 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilehi.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilehi.mir
@@ -30,7 +30,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILEHI_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -63,7 +63,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILEHI_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -98,7 +98,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILEHI_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -133,7 +133,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILEHI_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -168,7 +168,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILEHI_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -203,7 +203,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILEHI_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -238,7 +238,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILEHI_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -273,7 +273,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILEHI_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -313,7 +313,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 29
+ %2:ppr = PTRUE_S 29, implicit $vg
%3:ppr = WHILEHI_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -353,7 +353,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILEHI_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -393,7 +393,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%3:ppr = WHILEHI_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -433,7 +433,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%3:ppr = WHILEHI_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilehs.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilehs.mir
index da76a30f843b..f4dbfbc3db1c 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilehs.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilehs.mir
@@ -30,7 +30,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILEHS_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -63,7 +63,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILEHS_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -98,7 +98,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILEHS_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -133,7 +133,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILEHS_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -168,7 +168,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILEHS_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -203,7 +203,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILEHS_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -238,7 +238,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILEHS_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -273,7 +273,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILEHS_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -313,7 +313,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 30
+ %2:ppr = PTRUE_D 30, implicit $vg
%3:ppr = WHILEHS_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -353,7 +353,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILEHS_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -393,7 +393,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%3:ppr = WHILEHS_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -433,7 +433,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%3:ppr = WHILEHS_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilele.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilele.mir
index 32954d593c1d..dc2265490cb5 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilele.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilele.mir
@@ -30,7 +30,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELE_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -63,7 +63,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELE_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -98,7 +98,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILELE_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -133,7 +133,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILELE_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -168,7 +168,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILELE_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -203,7 +203,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILELE_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -238,7 +238,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILELE_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -273,7 +273,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILELE_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -313,7 +313,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 7
+ %2:ppr = PTRUE_B 7, implicit $vg
%3:ppr = WHILELE_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -353,7 +353,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%3:ppr = WHILELE_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -393,7 +393,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%3:ppr = WHILELE_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -433,7 +433,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%3:ppr = WHILELE_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilelo.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilelo.mir
index cca0ab8ef210..4d66e3e57da8 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilelo.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilelo.mir
@@ -30,7 +30,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELO_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -63,7 +63,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELO_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -98,7 +98,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILELO_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -133,7 +133,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILELO_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -168,7 +168,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILELO_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -203,7 +203,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILELO_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -238,7 +238,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILELO_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -273,7 +273,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILELO_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -313,7 +313,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 6
+ %2:ppr = PTRUE_H 6, implicit $vg
%3:ppr = WHILELO_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -353,7 +353,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELO_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -393,7 +393,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%3:ppr = WHILELO_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -433,7 +433,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%3:ppr = WHILELO_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilels.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilels.mir
index 4bae3a1986f4..ea02f8c70ef8 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilels.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilels.mir
@@ -30,7 +30,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELS_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -63,7 +63,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELS_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -98,7 +98,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILELS_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -133,7 +133,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILELS_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -168,7 +168,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILELS_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -203,7 +203,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILELS_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -238,7 +238,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILELS_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -273,7 +273,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILELS_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -313,7 +313,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 5
+ %2:ppr = PTRUE_S 5, implicit $vg
%3:ppr = WHILELS_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -353,7 +353,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELS_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -393,7 +393,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%3:ppr = WHILELS_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -433,7 +433,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%3:ppr = WHILELS_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilelt.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilelt.mir
index 3c6a9e21b4c6..d08781f203e3 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilelt.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilelt.mir
@@ -30,7 +30,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELT_PWW_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -63,7 +63,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELT_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -98,7 +98,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILELT_PWW_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -133,7 +133,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILELT_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -168,7 +168,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILELT_PWW_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -203,7 +203,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILELT_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -238,7 +238,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILELT_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -273,7 +273,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILELT_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -313,7 +313,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_D 4
+ %2:ppr = PTRUE_D 4, implicit $vg
%3:ppr = WHILELT_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -353,7 +353,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILELT_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -393,7 +393,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%3:ppr = WHILELT_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -433,7 +433,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr32 = COPY $w1
%0:gpr32 = COPY $w0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%3:ppr = WHILELT_PWW_D %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilerw.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilerw.mir
index 27cdf593df77..d800009b9537 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilerw.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilerw.mir
@@ -30,7 +30,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILERW_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -65,7 +65,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILERW_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -100,7 +100,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILERW_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -135,7 +135,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILERW_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -175,7 +175,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 0
+ %2:ppr = PTRUE_B 0, implicit $vg
%3:ppr = WHILERW_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -215,7 +215,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%3:ppr = WHILERW_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -255,7 +255,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%3:ppr = WHILERW_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -295,7 +295,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%3:ppr = WHILERW_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilewr.mir b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilewr.mir
index 3b49b1ec2c80..9f8b7c3197ec 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilewr.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-whilewr.mir
@@ -30,7 +30,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 31
+ %2:ppr = PTRUE_B 31, implicit $vg
%3:ppr = WHILEWR_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -65,7 +65,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%4:ppr = WHILEWR_PXX_H %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -100,7 +100,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%4:ppr = WHILEWR_PXX_S %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -135,7 +135,7 @@ body: |
; CHECK-NOT: PTEST
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%4:ppr = WHILEWR_PXX_D %0, %1, implicit-def dead $nzcv
PTEST_PP %2, %4, implicit-def $nzcv
%6:gpr32 = COPY $wzr
@@ -175,7 +175,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_B 0
+ %2:ppr = PTRUE_B 0, implicit $vg
%3:ppr = WHILEWR_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -215,7 +215,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_H 31
+ %2:ppr = PTRUE_H 31, implicit $vg
%3:ppr = WHILEWR_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -255,7 +255,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_S 31
+ %2:ppr = PTRUE_S 31, implicit $vg
%3:ppr = WHILEWR_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
@@ -295,7 +295,7 @@ body: |
; CHECK-NEXT: %5:gpr32 = CSINCWr %4, $wzr, 0, implicit $nzcv
%1:gpr64 = COPY $x1
%0:gpr64 = COPY $x0
- %2:ppr = PTRUE_D 31
+ %2:ppr = PTRUE_D 31, implicit $vg
%3:ppr = WHILEWR_PXX_B %0, %1, implicit-def dead $nzcv
PTEST_PP killed %2, killed %3, implicit-def $nzcv
%4:gpr32 = COPY $wzr
diff --git a/llvm/test/CodeGen/AArch64/sve2p1_copy_pnr.mir b/llvm/test/CodeGen/AArch64/sve2p1_copy_pnr.mir
index d6a87a42a79e..5e5db2ac4e20 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1_copy_pnr.mir
+++ b/llvm/test/CodeGen/AArch64/sve2p1_copy_pnr.mir
@@ -13,10 +13,10 @@ machineFunctionInfo:
body: |
bb.0:
; CHECK-LABEL: name: pnr_to_ppr
- ; CHECK: renamable $pn8 = PTRUE_C_D
+ ; CHECK: renamable $pn8 = PTRUE_C_D implicit $vg
; CHECK-NEXT: $p0 = ORR_PPzPP $p8, $p8, killed $p8
; CHECK-NEXT: RET_ReallyLR implicit killed $p0
- renamable $pn8 = PTRUE_C_D
+ renamable $pn8 = PTRUE_C_D implicit $vg
$p0 = COPY killed renamable $pn8
RET_ReallyLR implicit killed $p0
@@ -34,10 +34,10 @@ machineFunctionInfo:
body: |
bb.0:
; CHECK-LABEL: name: ppr_to_pnr
- ; CHECK: renamable $p8 = PTRUE_H 31
+ ; CHECK: renamable $p8 = PTRUE_H 31, implicit $vg
; CHECK-NEXT: $p0 = ORR_PPzPP $p8, $p8, killed $p8, implicit-def $pn0
; CHECK-NEXT: RET_ReallyLR implicit killed $pn0
- renamable $p8 = PTRUE_H 31
+ renamable $p8 = PTRUE_H 31, implicit $vg
$pn0 = COPY killed renamable $p8
RET_ReallyLR implicit killed $pn0
@@ -55,10 +55,10 @@ machineFunctionInfo:
body: |
bb.0:
; CHECK-LABEL: name: pnr_to_pnr
- ; CHECK: renamable $pn8 = PTRUE_C_H
+ ; CHECK: renamable $pn8 = PTRUE_C_H implicit $vg
; CHECK-NEXT: $p0 = ORR_PPzPP $p8, $p8, killed $p8, implicit-def $pn0
; CHECK-NEXT: RET_ReallyLR implicit killed $pn0
- renamable $pn8 = PTRUE_C_H
+ renamable $pn8 = PTRUE_C_H implicit $vg
$pn0 = COPY killed renamable $pn8
RET_ReallyLR implicit killed $pn0
diff --git a/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll b/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
index ccfbf456693d..39edc03ced44 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
@@ -246,9 +246,8 @@ define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
; CHECK-LABEL: safe_sub_var_imm:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldrb w8, [x0]
-; CHECK-NEXT: add w8, w8, #8
-; CHECK-NEXT: and w8, w8, #0xff
-; CHECK-NEXT: cmp w8, #252
+; CHECK-NEXT: sub w8, w8, #248
+; CHECK-NEXT: cmn w8, #4
; CHECK-NEXT: cset w0, hi
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
index 0f70c1996d6e..d4d5cb18bbd3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -global-isel -amdgpu-global-isel-risky-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
; Divergent phis that don't require lowering using lane mask merging
@@ -147,32 +147,28 @@ define void @divergent_i1_phi_used_inside_loop_bigger_loop_body(float %val, floa
; GFX10-LABEL: divergent_i1_phi_used_inside_loop_bigger_loop_body:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_lt_f32_e32 vcc_lo, 1.0, v1
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v1
; GFX10-NEXT: v_mov_b32_e32 v1, 0x3e8
-; GFX10-NEXT: v_mov_b32_e32 v8, s5
+; GFX10-NEXT: v_mov_b32_e32 v8, s4
; GFX10-NEXT: ; implicit-def: $sgpr6
-; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc_lo
; GFX10-NEXT: s_branch .LBB3_2
; GFX10-NEXT: .LBB3_1: ; %loop_body
; GFX10-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX10-NEXT: v_cvt_f32_u32_e32 v9, v8
-; GFX10-NEXT: s_xor_b32 s4, s4, -1
+; GFX10-NEXT: s_xor_b32 s5, s5, -1
; GFX10-NEXT: v_add_nc_u32_e32 v8, 1, v8
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v9, v0
-; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, s4
-; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 s6, s6, exec_lo
-; GFX10-NEXT: s_and_b32 s4, exec_lo, s4
-; GFX10-NEXT: s_or_b32 s6, s6, s4
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_and_b32 s7, exec_lo, s5
+; GFX10-NEXT: s_or_b32 s6, s6, s7
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execz .LBB3_6
; GFX10-NEXT: .LBB3_2: ; %loop_start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_and_b32_e32 v9, 1, v9
; GFX10-NEXT: v_cmp_ge_i32_e32 vcc_lo, 0x3e8, v8
; GFX10-NEXT: s_mov_b32 s7, 1
-; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v9
; GFX10-NEXT: s_cbranch_vccz .LBB3_4
; GFX10-NEXT: ; %bb.3: ; %else
; GFX10-NEXT: ; in Loop: Header=BB3_2 Depth=1
@@ -189,7 +185,7 @@ define void @divergent_i1_phi_used_inside_loop_bigger_loop_body(float %val, floa
; GFX10-NEXT: flat_store_dword v[4:5], v1
; GFX10-NEXT: s_branch .LBB3_1
; GFX10-NEXT: .LBB3_6: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s6
; GFX10-NEXT: flat_store_dword v[2:3], v0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir
index 5549c89dc402..9b0bd2752b82 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir
@@ -33,6 +33,7 @@ body: |
; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[COPY2]](s32), [[C]]
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY3]](s32), [[C1]]
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: G_BRCOND [[ICMP1]](s1), %bb.2
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
@@ -46,7 +47,8 @@ body: |
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.4(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = G_PHI %14(s1), %bb.3, [[ICMP]](s1), %bb.0
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY6]](s1), %bb.0, %20(s1), %bb.3
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.3:
@@ -54,12 +56,13 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY2]](s32), [[C3]]
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.4:
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[PHI]](s1), [[C5]], [[C4]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY7]](s1), [[C5]], [[C4]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p1) :: (store (s32), addrspace 1)
; GFX10-NEXT: S_ENDPGM 0
bb.0:
@@ -126,9 +129,10 @@ body: |
; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr0
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[COPY2]](s32), [[C]]
- ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY3]](s32), [[C1]]
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[COPY4]](s1)
; GFX10-NEXT: G_BRCOND [[ICMP1]](s1), %bb.2
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
@@ -137,17 +141,17 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY2]](s32), [[C2]]
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY4]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY5]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY6]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.2:
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[ICMP]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.1
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY4]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.1
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY6]](s1), [[C4]], [[C3]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY7]](s1), [[C4]], [[C3]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p1) :: (store (s32), addrspace 1)
; GFX10-NEXT: S_ENDPGM 0
bb.0:
@@ -292,19 +296,21 @@ body: |
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[COPY1]](s32), [[C1]]
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[FCMP]](s1)
; GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %39(s1), %bb.5
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI %15(s32), %bb.5, [[C]](s32), %bb.0
- ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.5
- ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = G_PHI [[FCMP]](s1), %bb.0, %19(s1), %bb.5
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %42(s1), %bb.5
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[COPY8]](s1), %bb.0, %39(s1), %bb.5
+ ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI %15(s32), %bb.5, [[C]](s32), %bb.0
+ ; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.5
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1000
- ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[PHI2]](s32), [[C3]]
+ ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[PHI3]](s32), [[C3]]
; GFX10-NEXT: G_BRCOND [[ICMP]](s1), %bb.4
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
@@ -336,26 +342,27 @@ body: |
; GFX10-NEXT: successors: %bb.6(0x04000000), %bb.1(0x7c000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C8:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[PHI3]], [[C8]]
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[XOR1]](s1)
- ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI2]](s32)
+ ; GFX10-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[COPY10]], [[C8]]
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[XOR1]](s1)
+ ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI3]](s32)
; GFX10-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[COPY]]
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C9]]
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI1]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY9]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI3]], [[C9]]
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI2]](s32)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[XOR1]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.6
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.6:
; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.5
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI5]](s32)
; GFX10-NEXT: [[C10:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
; GFX10-NEXT: [[C11:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY10]](s1), [[C11]], [[C10]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY13]](s1), [[C11]], [[C10]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p0) :: (store (s32))
; GFX10-NEXT: SI_RETURN
bb.0:
@@ -475,6 +482,7 @@ body: |
; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[AND1]](s32)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[TRUNC1]], [[C5]]
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sreg_32(s1) = COPY [[C5]](s1)
; GFX10-NEXT: G_BRCOND [[XOR]](s1), %bb.2
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
@@ -487,9 +495,10 @@ body: |
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %30(s32), %bb.4, [[DEF]](s32), %bb.0
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = G_PHI %32(s1), %bb.4, [[C5]](s1), %bb.0
- ; GFX10-NEXT: G_BRCOND [[PHI1]](s1), %bb.5
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY3]](s1), %bb.0, %58(s1), %bb.4
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI %30(s32), %bb.4, [[DEF]](s32), %bb.0
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: G_BRCOND [[COPY4]](s1), %bb.5
; GFX10-NEXT: G_BR %bb.6
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.3:
@@ -517,6 +526,7 @@ body: |
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[PHI5]](s32), [[AMDGPU_BUFFER_LOAD]]
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP2]]
; GFX10-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s1)
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[C10]](s1)
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.5:
@@ -527,7 +537,7 @@ body: |
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[C11]]
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.6:
- ; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI [[PHI]](s32), %bb.2, [[OR2]](s32), %bb.5
+ ; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI [[PHI1]](s32), %bb.2, [[OR2]](s32), %bb.5
; GFX10-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[LOAD]](<8 x s32>)
; GFX10-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY1]]
; GFX10-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
index e9df20f9688e..49c232661c6d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -amdgpu-global-isel-risky-select -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
; This file contains various tests that have divergent i1s used outside of
; the loop. These are lane masks is sgpr and need to have correct value in
@@ -137,28 +137,24 @@ define void @divergent_i1_xor_used_outside_loop(float %val, float %pre.cond.val,
; GFX10-LABEL: divergent_i1_xor_used_outside_loop:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_lt_f32_e32 vcc_lo, 1.0, v1
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v1
+; GFX10-NEXT: v_mov_b32_e32 v1, s4
; GFX10-NEXT: ; implicit-def: $sgpr6
-; GFX10-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc_lo
; GFX10-NEXT: .LBB2_1: ; %loop
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_and_b32_e32 v4, 1, v4
-; GFX10-NEXT: v_cvt_f32_u32_e32 v5, v1
+; GFX10-NEXT: v_cvt_f32_u32_e32 v4, v1
+; GFX10-NEXT: s_xor_b32 s5, s5, -1
; GFX10-NEXT: v_add_nc_u32_e32 v1, 1, v1
-; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX10-NEXT: v_cmp_gt_f32_e64 s4, v5, v0
-; GFX10-NEXT: s_xor_b32 s7, vcc_lo, -1
-; GFX10-NEXT: s_or_b32 s5, s4, s5
-; GFX10-NEXT: v_mov_b32_e32 v4, s7
-; GFX10-NEXT: s_andn2_b32 s4, s6, exec_lo
-; GFX10-NEXT: s_and_b32 s6, exec_lo, s7
-; GFX10-NEXT: s_or_b32 s6, s4, s6
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v4, v0
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 s6, s6, exec_lo
+; GFX10-NEXT: s_and_b32 s7, exec_lo, s5
+; GFX10-NEXT: s_or_b32 s6, s6, s7
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB2_1
; GFX10-NEXT: ; %bb.2: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s6
; GFX10-NEXT: flat_store_dword v[2:3], v0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
@@ -197,7 +193,7 @@ define void @divergent_i1_xor_used_outside_loop_larger_loop_body(i32 %num.elts,
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: s_mov_b32 s6, 1
+; GFX10-NEXT: s_mov_b32 s6, -1
; GFX10-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX10-NEXT: s_cbranch_execz .LBB3_6
; GFX10-NEXT: ; %bb.1: ; %loop.start.preheader
@@ -332,7 +328,7 @@ define void @divergent_i1_icmp_used_outside_loop(i32 %v0, i32 %v1, ptr addrspace
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s7
; GFX10-NEXT: v_cmp_ne_u32_e64 s4, v1, v4
-; GFX10-NEXT: s_mov_b32 s7, 1
+; GFX10-NEXT: s_mov_b32 s7, -1
; GFX10-NEXT: ; implicit-def: $vgpr5
; GFX10-NEXT: s_and_saveexec_b32 s8, s4
; GFX10-NEXT: s_cbranch_execz .LBB4_1
@@ -410,7 +406,7 @@ define amdgpu_ps void @divergent_i1_freeze_used_outside_loop(i32 %n, ptr addrspa
; GFX10-LABEL: divergent_i1_freeze_used_outside_loop:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_mov_b32 s0, 0
-; GFX10-NEXT: s_mov_b32 s3, 1
+; GFX10-NEXT: s_mov_b32 s3, -1
; GFX10-NEXT: v_mov_b32_e32 v5, s0
; GFX10-NEXT: ; implicit-def: $sgpr1
; GFX10-NEXT: ; implicit-def: $sgpr2
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir
index ace9bec6e1c2..206c0adb6c0c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir
@@ -175,14 +175,15 @@ body: |
; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[COPY7]](s1)
; GFX10-NEXT: G_BRCOND [[ICMP1]](s1), %bb.1
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.4:
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[COPY7]](s1)
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[COPY14]](s1)
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
; GFX10-NEXT: [[C7:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY14]](s1), [[C7]], [[C6]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY15]](s1), [[C7]], [[C6]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV1]](p0) :: (store (s32))
; GFX10-NEXT: SI_RETURN
bb.0:
@@ -255,37 +256,40 @@ body: |
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[COPY1]](s32), [[C1]]
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[FCMP]](s1)
; GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
; GFX10-NEXT: successors: %bb.2(0x04000000), %bb.1(0x7c000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %24(s1), %bb.1
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI %9(s32), %bb.1, [[C]](s32), %bb.0
- ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %11(s32), %bb.1
- ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = G_PHI [[FCMP]](s1), %bb.0, %13(s1), %bb.1
- ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %27(s1), %bb.1
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[COPY4]](s1), %bb.0, %24(s1), %bb.1
+ ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI %9(s32), %bb.1, [[C]](s32), %bb.0
+ ; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %11(s32), %bb.1
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[PHI3]], [[C2]]
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1)
- ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI2]](s32)
+ ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[COPY6]], [[C2]]
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1)
+ ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI3]](s32)
; GFX10-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[COPY]]
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C3]]
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI1]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY4]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY5]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI3]], [[C3]]
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI2]](s32)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY7]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.2:
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.1
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI4]](s32)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY6]](s1), [[C5]], [[C4]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY9]](s1), [[C5]], [[C4]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p0) :: (store (s32))
; GFX10-NEXT: SI_RETURN
bb.0:
@@ -349,7 +353,8 @@ body: |
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]]
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[C1]](s1)
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[C1]](s1)
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY5]](s1)
; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP]](s1), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
@@ -365,26 +370,26 @@ body: |
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[C1]](s1), %bb.0, %39(s1), %bb.8
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[COPY5]](s1), %bb.0, %40(s1), %bb.8
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
- ; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY6]](s1), %bb.6, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY7]](s1), %bb.6, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.5
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.3:
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.7(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[DEF3]](s1), %bb.1, %72(s1), %bb.7
- ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[DEF2]](s1), %bb.1, %61(s1), %bb.7
- ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.1, %48(s1), %bb.7
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[DEF3]](s1), %bb.1, %73(s1), %bb.7
+ ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[DEF2]](s1), %bb.1, %62(s1), %bb.7
+ ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.1, %49(s1), %bb.7
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[C2]](s32), %bb.1, %17(s32), %bb.7
; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI %19(s32), %bb.7, [[C2]](s32), %bb.1
- ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[PHI5]](s32)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C4]](s32)
@@ -392,14 +397,14 @@ body: |
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32), addrspace 1)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[LOAD]](s32), [[C5]]
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY12]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY10]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_1]](s1)
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_1]](s1)
; GFX10-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.7, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -407,16 +412,16 @@ body: |
; GFX10-NEXT: successors: %bb.7(0x80000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[C6]](s1)
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[C6]](s1)
; GFX10-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI5]], [[C7]]
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[PHI5]](s32), [[COPY]]
- ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY12]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY13]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY15]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY13]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY15]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY14]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY16]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.7
; GFX10-NEXT: {{ $}}
@@ -436,15 +441,15 @@ body: |
; GFX10-NEXT: [[PHI6:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_1]](s1), %bb.3, [[S_OR_B32_3]](s1), %bb.4
; GFX10-NEXT: [[PHI7:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.3, [[S_OR_B32_2]](s1), %bb.4
; GFX10-NEXT: [[PHI8:%[0-9]+]]:_(s32) = G_PHI [[ADD]](s32), %bb.4, [[DEF]](s32), %bb.3
- ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[PHI6]](s1)
- ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
+ ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[PHI6]](s1)
+ ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF2]](s32)
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[COPY17]], [[C9]]
- ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1)
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY16]](s1), [[PHI4]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_4:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY7]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_4:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY18]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[COPY18]], [[C9]]
+ ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1)
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY17]](s1), [[PHI4]](s32)
+ ; GFX10-NEXT: [[S_ANDN2_B32_4:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_4:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY19]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_4:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_4]](s1), [[S_AND_B32_4]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.3, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.8
@@ -453,11 +458,11 @@ body: |
; GFX10-NEXT: successors: %bb.2(0x80000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI9:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.7
- ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_4]](s1)
- ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY19]](s1)
+ ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_4]](s1)
+ ; GFX10-NEXT: [[COPY21:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY20]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI9]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY20]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY6]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY21]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_5]](s1), [[S_AND_B32_5]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.2
bb.0:
@@ -574,7 +579,7 @@ body: |
; GFX10-NEXT: bb.1:
; GFX10-NEXT: successors: %bb.2(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[DEF1]](s1), %bb.0, %38(s1), %bb.6
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[DEF1]](s1), %bb.0, %39(s1), %bb.6
; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI %11(s32), %bb.6, [[C]](s32), %bb.0
; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %13(s32), %bb.6
; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
@@ -600,9 +605,10 @@ body: |
; GFX10-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[C2]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[PHI2]]
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[C2]](s1)
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[COPY8]](s1)
; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.6, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.5
; GFX10-NEXT: {{ $}}
@@ -610,21 +616,21 @@ body: |
; GFX10-NEXT: successors: %bb.6(0x80000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C4]]
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY9]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY10]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.6:
; GFX10-NEXT: successors: %bb.7(0x04000000), %bb.1(0x7c000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[C2]](s1), %bb.4, [[S_OR_B32_]](s1), %bb.5
+ ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[COPY8]](s1), %bb.4, [[S_OR_B32_]](s1), %bb.5
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[ADD]](s32), %bb.5, [[DEF]](s32), %bb.4
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF1]](s32)
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY10]](s1), [[PHI1]](s32)
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY11]](s1), [[PHI1]](s32)
; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY6]](s1), $exec_lo, implicit-def $scc
; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY7]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
@@ -636,9 +642,9 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.6
; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI [[PHI2]](s32), %bb.6
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[S_OR_B32_1]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[S_OR_B32_1]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI5]](s32)
- ; GFX10-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY11]](s1), %bb.9, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX10-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY12]](s1), %bb.9, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.8
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.8:
@@ -751,26 +757,27 @@ body: |
; GFX10-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY3]](s32), [[COPY4]](s32)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[C1]](s1)
; GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[DEF1:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
; GFX10-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, %53(s1), %bb.3
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %42(s1), %bb.3
- ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[C1]](s1), %bb.0, %32(s1), %bb.3
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, %54(s1), %bb.3
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %43(s1), %bb.3
+ ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[COPY5]](s1), %bb.0, %33(s1), %bb.3
; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI %10(s32), %bb.3, [[C]](s32), %bb.0
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %12(s32), %bb.3
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
- ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI2]](s1)
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[COPY7]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY6]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY8]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI2]](s1)
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[COPY8]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY7]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY9]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
- ; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY7]](s1), %bb.3, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY8]](s1), %bb.3, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.2:
@@ -783,10 +790,10 @@ body: |
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32), addrspace 1)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LOAD]](s32), [[C3]]
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: [[DEF2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = IMPLICIT_DEF
- ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY10]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.3:
@@ -794,32 +801,32 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.1, [[S_OR_B32_1]](s1), %bb.2
; GFX10-NEXT: [[PHI6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[PHI2]](s1), %bb.1, [[DEF2]](s1), %bb.2
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI6]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI6]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
- ; GFX10-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE [[COPY11]]
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[FREEZE]](s1)
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[FREEZE]](s1)
+ ; GFX10-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE [[COPY12]]
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[FREEZE]](s1)
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[FREEZE]](s1)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI4]], [[C4]]
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[PHI4]](s32), [[COPY]]
; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[ICMP1]](s1), [[PHI3]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY12]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY13]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY15]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY6]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.4:
; GFX10-NEXT: [[PHI7:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.3
- ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_3]](s1)
+ ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_3]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI7]](s32)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY15]](s1), [[C6]], [[C5]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY16]](s1), [[C6]], [[C5]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV1]](p0) :: (store (s32))
; GFX10-NEXT: S_ENDPGM 0
bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
index 609fff51863a..1698f84eea51 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -amdgpu-global-isel-risky-select -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
; Simples case, if - then, that requires lane mask merging,
; %phi lane mask will hold %val_A at %A. Lanes that are active in %B
@@ -43,13 +43,12 @@ define amdgpu_ps void @divergent_i1_phi_if_else(ptr addrspace(1) %out, i32 %tid,
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_and_b32 s0, 1, s0
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3
-; GFX10-NEXT: v_cmp_ne_u32_e64 s2, 0, s0
-; GFX10-NEXT: ; implicit-def: $sgpr0
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
; GFX10-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX10-NEXT: s_xor_b32 s1, exec_lo, s1
; GFX10-NEXT: ; %bb.1: ; %B
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 2, v2
-; GFX10-NEXT: s_andn2_b32 s0, s2, exec_lo
+; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo
; GFX10-NEXT: ; implicit-def: $vgpr2
; GFX10-NEXT: s_and_b32 s2, exec_lo, vcc_lo
; GFX10-NEXT: s_or_b32 s0, s0, s2
@@ -211,7 +210,7 @@ define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) %
; GFX10-NEXT: ; in Loop: Header=BB3_3 Depth=1
; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v4, v7
; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v5, v8, vcc_lo
-; GFX10-NEXT: s_mov_b32 s4, 1
+; GFX10-NEXT: s_mov_b32 s4, -1
; GFX10-NEXT: global_load_dword v9, v[9:10], off
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v9
@@ -308,7 +307,7 @@ define amdgpu_cs void @loop_with_3breaks(ptr addrspace(1) %x, ptr addrspace(1) %
; GFX10-NEXT: ; in Loop: Header=BB4_4 Depth=1
; GFX10-NEXT: v_add_co_u32 v11, vcc_lo, v4, v9
; GFX10-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, v5, v10, vcc_lo
-; GFX10-NEXT: s_mov_b32 s4, 1
+; GFX10-NEXT: s_mov_b32 s4, -1
; GFX10-NEXT: global_load_dword v11, v[11:12], off
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v11
@@ -318,7 +317,7 @@ define amdgpu_cs void @loop_with_3breaks(ptr addrspace(1) %x, ptr addrspace(1) %
; GFX10-NEXT: ; in Loop: Header=BB4_4 Depth=1
; GFX10-NEXT: v_add_co_u32 v11, vcc_lo, v6, v9
; GFX10-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, v7, v10, vcc_lo
-; GFX10-NEXT: s_mov_b32 s5, 1
+; GFX10-NEXT: s_mov_b32 s5, -1
; GFX10-NEXT: global_load_dword v11, v[11:12], off
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v11
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir
index df5505e1b28b..8197b072c740 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir
@@ -18,9 +18,10 @@ body: |
; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[COPY2]](s32), [[C]]
- ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[C1]]
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[COPY4]](s1)
; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
@@ -29,18 +30,18 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY2]](s32), [[C2]]
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY4]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY5]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY6]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.2:
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[ICMP]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.1
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY4]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.1
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY6]](s1), [[C4]], [[C3]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY7]](s1), [[C4]], [[C3]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p1) :: (store (s32), addrspace 1)
; GFX10-NEXT: S_ENDPGM 0
bb.0:
@@ -91,18 +92,20 @@ body: |
; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s1) = G_IMPLICIT_DEF
- ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[DEF]](s1)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[COPY3]](s32), [[C]]
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[DEF]](s1)
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[COPY4]](s1)
; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP]](s1), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.3
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
; GFX10-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %19(s1), %bb.3
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[COPY5]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY4]](s1), %bb.0, %20(s1), %bb.3
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[COPY6]](s1)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[COPY7]](s1)
; GFX10-NEXT: [[SI_ELSE:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_ELSE [[SI_IF]](s32), %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
@@ -111,9 +114,9 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[COPY2]](s32), [[C1]]
- ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP1]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY6]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY7]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP1]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY9]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -122,19 +125,19 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY2]](s32), [[C2]]
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY4]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY8]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY10]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.4:
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[COPY5]](s1), %bb.1, [[S_OR_B32_]](s1), %bb.2
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[COPY7]](s1), %bb.1, [[S_OR_B32_]](s1), %bb.2
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_ELSE]](s32)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY9]](s1), [[C3]], [[C4]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY11]](s1), [[C3]], [[C4]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p1) :: (store (s32), addrspace 1)
; GFX10-NEXT: S_ENDPGM 0
bb.0:
@@ -368,13 +371,14 @@ body: |
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.5(0x40000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C5]](s32)
; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV2]], [[SHL1]](s64)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32), addrspace 1)
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[LOAD1]](s32), [[C6]]
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[COPY9]](s1)
; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -383,9 +387,9 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.1, %47(s1), %bb.5
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI %32(s32), %bb.5, [[DEF]](s32), %bb.1
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY10]](s1), [[PHI1]](s32)
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY11]](s1), [[PHI1]](s32)
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.6
; GFX10-NEXT: {{ $}}
@@ -402,21 +406,21 @@ body: |
; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C8]]
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[PHI2]](s32), [[C9]]
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY12]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.5:
; GFX10-NEXT: successors: %bb.3(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[C4]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
+ ; GFX10-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[COPY9]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI [[ADD1]](s32), %bb.4, [[DEF]](s32), %bb.2
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[COPY12]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[COPY13]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF1]](s32)
; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.3
; GFX10-NEXT: {{ $}}
@@ -560,13 +564,14 @@ body: |
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.5(0x40000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C5]](s32)
; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV2]], [[SHL1]](s64)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32), addrspace 1)
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[LOAD1]](s32), [[C6]]
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[COPY11]](s1)
; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -575,9 +580,9 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.1, %60(s1), %bb.5
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI %35(s32), %bb.5, [[DEF]](s32), %bb.1
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY12]](s1), [[PHI1]](s32)
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY13]](s1), [[PHI1]](s32)
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.8
; GFX10-NEXT: {{ $}}
@@ -585,26 +590,27 @@ body: |
; GFX10-NEXT: successors: %bb.6(0x40000000), %bb.7(0x40000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C7:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[C7]](s1)
; GFX10-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C8]](s32)
; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV3]], [[SHL2]](s64)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s32), addrspace 1)
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[LOAD2]](s32), [[C9]]
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[C7]](s1)
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[COPY14]](s1)
; GFX10-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP2]](s1), %bb.7, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.6
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.5:
; GFX10-NEXT: successors: %bb.3(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[C4]](s1), %bb.2, %71(s1), %bb.7
+ ; GFX10-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[COPY11]](s1), %bb.2, %72(s1), %bb.7
; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI %46(s32), %bb.7, [[DEF]](s32), %bb.2
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
- ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[COPY14]](s1)
+ ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
+ ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[COPY16]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF1]](s32)
; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY15]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY17]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.3
; GFX10-NEXT: {{ $}}
@@ -621,21 +627,21 @@ body: |
; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C11]]
; GFX10-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
; GFX10-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[PHI2]](s32), [[C12]]
- ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP3]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY13]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY16]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP3]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY15]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY18]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.7:
; GFX10-NEXT: successors: %bb.5(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI7:%[0-9]+]]:sreg_32(s1) = PHI [[C7]](s1), %bb.4, [[S_OR_B32_2]](s1), %bb.6
+ ; GFX10-NEXT: [[PHI7:%[0-9]+]]:sreg_32(s1) = PHI [[COPY14]](s1), %bb.4, [[S_OR_B32_2]](s1), %bb.6
; GFX10-NEXT: [[PHI8:%[0-9]+]]:_(s32) = G_PHI [[ADD1]](s32), %bb.6, [[DEF]](s32), %bb.4
- ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
- ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[COPY17]](s1)
+ ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
+ ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32(s1) = COPY [[COPY19]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF2]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY11]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY18]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY12]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY20]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.5
; GFX10-NEXT: {{ $}}
@@ -970,6 +976,7 @@ body: |
; GFX10-NEXT: [[DEF1:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[DEF2:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[DEF3:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: G_BR %bb.7
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
@@ -982,19 +989,19 @@ body: |
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.7(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI %67(s1), %bb.6, %70(s1), %bb.7
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI %67(s1), %bb.6, %71(s1), %bb.7
; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI %49(s1), %bb.6, %48(s1), %bb.7
; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI %35(s1), %bb.6, %34(s1), %bb.7
- ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY9]](s1)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY10]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %15(s32)
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY8]](s1), %17(s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY7]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY10]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY9]](s1), %17(s32)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[S_OR_B32_]](s1)
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.7, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -1011,28 +1018,28 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INTRINSIC_CONVERGENT]](s32)
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY5]](s32), [[COPY]]
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[C2]](s1)
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[C2]](s1)
; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[C2]]
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP2]], [[XOR]]
; GFX10-NEXT: [[INTRINSIC_CONVERGENT2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[OR]](s1), %25(s32)
; GFX10-NEXT: [[DEF4:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[DEF5:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 %63(s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY12]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY11]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY12]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT2]](s32), %bb.7, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.5
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.5:
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT2]](s32), %bb.4
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_1]](s1)
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_1]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI4]](s32)
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY14]](s1), [[COPY3]], [[COPY2]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY15]](s1), [[COPY3]], [[COPY2]]
; GFX10-NEXT: [[INTRINSIC_CONVERGENT3:%[0-9]+]]:_(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane), [[SELECT]](s32)
; GFX10-NEXT: $sgpr0 = COPY [[INTRINSIC_CONVERGENT3]](s32)
; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0
@@ -1042,14 +1049,14 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT1]](s32), %bb.3
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
+ ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI5]](s32)
; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 %42(s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY16]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY17]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
; GFX10-NEXT: [[S_ANDN2_B32_4:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 %56(s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_4:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY15]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_4:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY16]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_4:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_4]](s1), [[S_AND_B32_4]](s1), implicit-def $scc
; GFX10-NEXT: [[DEF6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = IMPLICIT_DEF
; GFX10-NEXT: G_BR %bb.2
@@ -1057,27 +1064,27 @@ body: |
; GFX10-NEXT: bb.7:
; GFX10-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[ICMP]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.2, [[S_OR_B32_2]](s1), %bb.4
+ ; GFX10-NEXT: [[PHI6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[COPY7]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.2, [[S_OR_B32_2]](s1), %bb.4
; GFX10-NEXT: [[PHI7:%[0-9]+]]:sreg_32(s1) = PHI [[DEF3]](s1), %bb.0, [[PHI7]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
; GFX10-NEXT: [[PHI8:%[0-9]+]]:sreg_32(s1) = PHI [[DEF2]](s1), %bb.0, [[PHI1]](s1), %bb.2, [[DEF5]](s1), %bb.4
; GFX10-NEXT: [[PHI9:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, [[PHI2]](s1), %bb.2, [[DEF4]](s1), %bb.4
; GFX10-NEXT: [[PHI10:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT2]](s32), %bb.4, [[PHI10]](s32), %bb.2, [[C]](s32), %bb.0
; GFX10-NEXT: [[PHI11:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.4, [[INTRINSIC_CONVERGENT]](s32), %bb.2, [[C]](s32), %bb.0
- ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI6]](s1)
- ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
- ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[PHI8]](s1)
- ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32(s1) = COPY [[PHI9]](s1)
+ ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI6]](s1)
+ ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
+ ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32(s1) = COPY [[PHI8]](s1)
+ ; GFX10-NEXT: [[COPY21:%[0-9]+]]:sreg_32(s1) = COPY [[PHI9]](s1)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY21:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_5:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY20]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[COPY22:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_5:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY21]](s1), $exec_lo, implicit-def $scc
; GFX10-NEXT: [[S_AND_B32_5:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY6]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_5:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_5]](s1), [[S_AND_B32_5]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY22:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_5]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_6:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY19]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_6:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY21]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY23:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_5]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_6:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY20]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_6:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY22]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_6:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_6]](s1), [[S_AND_B32_6]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY23:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_6]](s1)
- ; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY17]](s1), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX10-NEXT: [[COPY24:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_6]](s1)
+ ; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY18]](s1), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.1
bb.0:
successors: %bb.7(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
index 312c6a3822ce..1855ede0483d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -global-isel -amdgpu-global-isel-risky-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
define void @temporal_divergent_i1_phi(float %val, ptr %addr) {
; GFX10-LABEL: temporal_divergent_i1_phi:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll
index b21e6a729dbc..1934958ea8f3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -global-isel -amdgpu-global-isel-risky-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
define void @temporal_divergent_i32(float %val, ptr %addr) {
; GFX10-LABEL: temporal_divergent_i32:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
index 921bdb5015c7..63e7339d829e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
@@ -256,6 +256,7 @@ define void @store_load_vindex_foo(i32 %idx) {
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b32 v0, v2, s32 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b32 v0, v1, s32 scope:SCOPE_SYS
@@ -607,6 +608,7 @@ define void @store_load_vindex_small_offset_foo(i32 %idx) {
; GFX12-NEXT: scratch_load_b32 v3, off, s32 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b32 v0, v2, s32 offset:256 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b32 v0, v1, s32 offset:256 scope:SCOPE_SYS
@@ -921,6 +923,7 @@ define void @store_load_vindex_large_offset_foo(i32 %idx) {
; GFX12-NEXT: scratch_load_b32 v3, off, s32 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b32 v0, v2, s32 offset:16384 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b32 v0, v1, s32 offset:16384 scope:SCOPE_SYS
@@ -1089,6 +1092,7 @@ define void @store_load_large_imm_offset_foo() {
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b32 off, v0, s32 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b32 off, v1, s32 offset:16000 scope:SCOPE_SYS
@@ -1242,6 +1246,7 @@ define void @store_load_i64_aligned(ptr addrspace(5) nocapture %arg) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v1, 15
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b64 v0, v[1:2], off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b64 v[0:1], v0, off scope:SCOPE_SYS
@@ -1306,6 +1311,7 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v1, 15
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b64 v0, v[1:2], off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b64 v[0:1], v0, off scope:SCOPE_SYS
@@ -1389,6 +1395,7 @@ define void @store_load_v3i32_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX12-NEXT: s_mov_b32 s0, 1
; GFX12-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v2, s1
; GFX12-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b96 v0, v[1:3], off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b96 v[0:2], v0, off scope:SCOPE_SYS
@@ -1478,6 +1485,7 @@ define void @store_load_v4i32_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX12-NEXT: s_mov_b32 s0, 1
; GFX12-NEXT: v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v3, s2
; GFX12-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b128 v0, v[1:4], off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b128 v[0:3], v0, off scope:SCOPE_SYS
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir
index c7d45f062d0d..4bb9eb807e15 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -amdgpu-global-isel-risky-select -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GCN
+# RUN: llc -mtriple=amdgcn -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GCN
---
name: g_phi_s32_ss_sbranch
@@ -322,60 +322,6 @@ body: |
...
---
-name: g_phi_vcc_s1_sbranch
-legalized: true
-regBankSelected: true
-tracksRegLiveness: true
-machineFunctionInfo: {}
-body: |
- ; GCN-LABEL: name: g_phi_vcc_s1_sbranch
- ; GCN: bb.0:
- ; GCN-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $sgpr2
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GCN-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GCN-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 [[COPY]], [[S_MOV_B32_]], implicit $exec
- ; GCN-NEXT: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc
- ; GCN-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $scc
- ; GCN-NEXT: $scc = COPY [[COPY3]]
- ; GCN-NEXT: S_CBRANCH_SCC1 %bb.1, implicit $scc
- ; GCN-NEXT: S_BRANCH %bb.2
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: bb.1:
- ; GCN-NEXT: successors: %bb.2(0x80000000)
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 [[COPY1]], [[S_MOV_B32_]], implicit $exec
- ; GCN-NEXT: S_BRANCH %bb.2
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: bb.2:
- ; GCN-NEXT: [[PHI:%[0-9]+]]:sreg_64_xexec = PHI [[V_CMP_EQ_U32_e64_]], %bb.0, [[V_CMP_EQ_U32_e64_1]], %bb.1
- ; GCN-NEXT: S_SETPC_B64 undef $sgpr30_sgpr31, implicit [[PHI]]
- bb.0:
- liveins: $vgpr0, $vgpr1, $sgpr2
-
- %0:vgpr(s32) = COPY $vgpr0
- %1:vgpr(s32) = COPY $vgpr1
- %2:sgpr(s32) = COPY $sgpr2
- %3:sgpr(s32) = G_CONSTANT i32 0
- %4:vcc(s1) = G_ICMP intpred(eq), %0, %3
- %5:sgpr(s32) = G_ICMP intpred(eq), %2(s32), %3
- G_BRCOND %5, %bb.1
- G_BR %bb.2
-
- bb.1:
- %6:vcc(s1) = G_ICMP intpred(eq), %1, %3
- G_BR %bb.2
-
- bb.2:
- %7:vcc(s1) = G_PHI %4, %bb.0, %6, %bb.1
- S_SETPC_B64 undef $sgpr30_sgpr31, implicit %7
-
-...
-
----
name: phi_s32_ss_sbranch
legalized: true
regBankSelected: true
diff --git a/llvm/test/CodeGen/AMDGPU/calling-conventions.ll b/llvm/test/CodeGen/AMDGPU/calling-conventions.ll
index ce1ce649c227..15ebdd70ae88 100644
--- a/llvm/test/CodeGen/AMDGPU/calling-conventions.ll
+++ b/llvm/test/CodeGen/AMDGPU/calling-conventions.ll
@@ -2078,4 +2078,1218 @@ entry:
ret void
}
+define amdgpu_cs void @amdgpu_cs_i1(i1 %arg0) {
+; SI-LABEL: amdgpu_cs_i1:
+; SI: ; %bb.0:
+; SI-NEXT: v_and_b32_e32 v0, 1, v0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: amdgpu_cs_i1:
+; VI: ; %bb.0:
+; VI-NEXT: v_and_b32_e32 v0, 1, v0
+; VI-NEXT: flat_store_byte v[0:1], v0
+; VI-NEXT: s_endpgm
+;
+; GFX11-LABEL: amdgpu_cs_i1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX11-NEXT: global_store_b8 v[0:1], v0, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+ store i1 %arg0, ptr addrspace(1) undef
+ ret void
+}
+
+define amdgpu_cs void @amdgpu_cs_v8i1(<8 x i1> %arg0) {
+; SI-LABEL: amdgpu_cs_v8i1:
+; SI: ; %bb.0:
+; SI-NEXT: v_lshlrev_b32_e32 v7, 3, v7
+; SI-NEXT: v_and_b32_e32 v6, 1, v6
+; SI-NEXT: v_lshlrev_b32_e32 v5, 1, v5
+; SI-NEXT: v_and_b32_e32 v4, 1, v4
+; SI-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; SI-NEXT: v_and_b32_e32 v2, 1, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v1
+; SI-NEXT: v_and_b32_e32 v0, 1, v0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: v_lshlrev_b32_e32 v6, 2, v6
+; SI-NEXT: v_or_b32_e32 v4, v4, v5
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v2
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_or_b32_e32 v1, v7, v6
+; SI-NEXT: v_and_b32_e32 v4, 3, v4
+; SI-NEXT: v_or_b32_e32 v2, v3, v2
+; SI-NEXT: v_and_b32_e32 v0, 3, v0
+; SI-NEXT: v_or_b32_e32 v1, v4, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 4, v1
+; SI-NEXT: v_and_b32_e32 v0, 15, v0
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: amdgpu_cs_v8i1:
+; VI: ; %bb.0:
+; VI-NEXT: v_and_b32_e32 v6, 1, v6
+; VI-NEXT: v_lshlrev_b16_e32 v5, 1, v5
+; VI-NEXT: v_and_b32_e32 v4, 1, v4
+; VI-NEXT: v_and_b32_e32 v2, 1, v2
+; VI-NEXT: v_lshlrev_b16_e32 v1, 1, v1
+; VI-NEXT: v_and_b32_e32 v0, 1, v0
+; VI-NEXT: v_lshlrev_b16_e32 v7, 3, v7
+; VI-NEXT: v_lshlrev_b16_e32 v6, 2, v6
+; VI-NEXT: v_or_b32_e32 v4, v4, v5
+; VI-NEXT: v_lshlrev_b16_e32 v3, 3, v3
+; VI-NEXT: v_lshlrev_b16_e32 v2, 2, v2
+; VI-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-NEXT: v_or_b32_e32 v6, v7, v6
+; VI-NEXT: v_and_b32_e32 v4, 3, v4
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_and_b32_e32 v0, 3, v0
+; VI-NEXT: v_or_b32_e32 v4, v4, v6
+; VI-NEXT: v_or_b32_e32 v0, v0, v2
+; VI-NEXT: v_lshlrev_b16_e32 v4, 4, v4
+; VI-NEXT: v_and_b32_e32 v0, 15, v0
+; VI-NEXT: v_or_b32_e32 v0, v0, v4
+; VI-NEXT: flat_store_byte v[0:1], v0
+; VI-NEXT: s_endpgm
+;
+; GFX11-LABEL: amdgpu_cs_v8i1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e32 v6, 1, v6
+; GFX11-NEXT: v_lshlrev_b16 v5, 1, v5
+; GFX11-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX11-NEXT: v_and_b32_e32 v2, 1, v2
+; GFX11-NEXT: v_lshlrev_b16 v1, 1, v1
+; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX11-NEXT: v_lshlrev_b16 v7, 3, v7
+; GFX11-NEXT: v_lshlrev_b16 v6, 2, v6
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX11-NEXT: v_lshlrev_b16 v3, 3, v3
+; GFX11-NEXT: v_lshlrev_b16 v2, 2, v2
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_or_b32_e32 v1, v7, v6
+; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX11-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or_b32_e32 v1, v4, v1
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b16 v1, 4, v1
+; GFX11-NEXT: v_and_b32_e32 v0, 15, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: global_store_b8 v[0:1], v0, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+ store <8 x i1> %arg0, ptr addrspace(1) undef
+ ret void
+}
+
+define amdgpu_cs void @amdgpu_cs_v16i1(<16 x i1> %arg0) {
+; SI-LABEL: amdgpu_cs_v16i1:
+; SI: ; %bb.0:
+; SI-NEXT: v_lshlrev_b32_e32 v15, 3, v15
+; SI-NEXT: v_and_b32_e32 v14, 1, v14
+; SI-NEXT: v_lshlrev_b32_e32 v13, 1, v13
+; SI-NEXT: v_and_b32_e32 v12, 1, v12
+; SI-NEXT: v_lshlrev_b32_e32 v11, 3, v11
+; SI-NEXT: v_and_b32_e32 v10, 1, v10
+; SI-NEXT: v_lshlrev_b32_e32 v9, 1, v9
+; SI-NEXT: v_and_b32_e32 v8, 1, v8
+; SI-NEXT: v_lshlrev_b32_e32 v7, 3, v7
+; SI-NEXT: v_and_b32_e32 v6, 1, v6
+; SI-NEXT: v_lshlrev_b32_e32 v5, 1, v5
+; SI-NEXT: v_and_b32_e32 v4, 1, v4
+; SI-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; SI-NEXT: v_and_b32_e32 v2, 1, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v1
+; SI-NEXT: v_and_b32_e32 v0, 1, v0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: v_lshlrev_b32_e32 v14, 2, v14
+; SI-NEXT: v_or_b32_e32 v12, v12, v13
+; SI-NEXT: v_lshlrev_b32_e32 v10, 2, v10
+; SI-NEXT: v_or_b32_e32 v8, v8, v9
+; SI-NEXT: v_lshlrev_b32_e32 v6, 2, v6
+; SI-NEXT: v_or_b32_e32 v4, v4, v5
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v2
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_or_b32_e32 v1, v15, v14
+; SI-NEXT: v_and_b32_e32 v5, 3, v12
+; SI-NEXT: v_or_b32_e32 v9, v11, v10
+; SI-NEXT: v_and_b32_e32 v8, 3, v8
+; SI-NEXT: v_or_b32_e32 v6, v7, v6
+; SI-NEXT: v_and_b32_e32 v4, 3, v4
+; SI-NEXT: v_or_b32_e32 v2, v3, v2
+; SI-NEXT: v_and_b32_e32 v0, 3, v0
+; SI-NEXT: v_or_b32_e32 v1, v5, v1
+; SI-NEXT: v_or_b32_e32 v3, v8, v9
+; SI-NEXT: v_or_b32_e32 v4, v4, v6
+; SI-NEXT: v_or_b32_e32 v0, v0, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 12, v1
+; SI-NEXT: v_and_b32_e32 v2, 15, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 4, v4
+; SI-NEXT: v_and_b32_e32 v0, 15, v0
+; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
+; SI-NEXT: v_or_b32_e32 v0, v0, v3
+; SI-NEXT: v_or_b32_e32 v1, v1, v2
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: amdgpu_cs_v16i1:
+; VI: ; %bb.0:
+; VI-NEXT: v_and_b32_e32 v14, 1, v14
+; VI-NEXT: v_lshlrev_b16_e32 v13, 1, v13
+; VI-NEXT: v_and_b32_e32 v12, 1, v12
+; VI-NEXT: v_and_b32_e32 v10, 1, v10
+; VI-NEXT: v_lshlrev_b16_e32 v9, 1, v9
+; VI-NEXT: v_and_b32_e32 v8, 1, v8
+; VI-NEXT: v_and_b32_e32 v6, 1, v6
+; VI-NEXT: v_lshlrev_b16_e32 v5, 1, v5
+; VI-NEXT: v_and_b32_e32 v4, 1, v4
+; VI-NEXT: v_and_b32_e32 v2, 1, v2
+; VI-NEXT: v_lshlrev_b16_e32 v1, 1, v1
+; VI-NEXT: v_and_b32_e32 v0, 1, v0
+; VI-NEXT: v_lshlrev_b16_e32 v15, 3, v15
+; VI-NEXT: v_lshlrev_b16_e32 v14, 2, v14
+; VI-NEXT: v_or_b32_e32 v12, v12, v13
+; VI-NEXT: v_lshlrev_b16_e32 v11, 3, v11
+; VI-NEXT: v_lshlrev_b16_e32 v10, 2, v10
+; VI-NEXT: v_or_b32_e32 v8, v8, v9
+; VI-NEXT: v_lshlrev_b16_e32 v7, 3, v7
+; VI-NEXT: v_lshlrev_b16_e32 v6, 2, v6
+; VI-NEXT: v_or_b32_e32 v4, v4, v5
+; VI-NEXT: v_lshlrev_b16_e32 v3, 3, v3
+; VI-NEXT: v_lshlrev_b16_e32 v2, 2, v2
+; VI-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-NEXT: v_or_b32_e32 v14, v15, v14
+; VI-NEXT: v_and_b32_e32 v12, 3, v12
+; VI-NEXT: v_or_b32_e32 v10, v11, v10
+; VI-NEXT: v_and_b32_e32 v8, 3, v8
+; VI-NEXT: v_or_b32_e32 v6, v7, v6
+; VI-NEXT: v_and_b32_e32 v4, 3, v4
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_and_b32_e32 v0, 3, v0
+; VI-NEXT: v_or_b32_e32 v12, v12, v14
+; VI-NEXT: v_or_b32_e32 v8, v8, v10
+; VI-NEXT: v_mov_b32_e32 v9, 15
+; VI-NEXT: v_or_b32_e32 v4, v4, v6
+; VI-NEXT: v_or_b32_e32 v0, v0, v2
+; VI-NEXT: v_lshlrev_b16_e32 v12, 12, v12
+; VI-NEXT: v_and_b32_sdwa v8, v8, v9 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b16_e32 v4, 4, v4
+; VI-NEXT: v_and_b32_e32 v0, 15, v0
+; VI-NEXT: v_or_b32_e32 v8, v12, v8
+; VI-NEXT: v_or_b32_e32 v0, v0, v4
+; VI-NEXT: v_or_b32_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: flat_store_short v[0:1], v0
+; VI-NEXT: s_endpgm
+;
+; GFX11-LABEL: amdgpu_cs_v16i1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e32 v10, 1, v10
+; GFX11-NEXT: v_lshlrev_b16 v9, 1, v9
+; GFX11-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX11-NEXT: v_and_b32_e32 v6, 1, v6
+; GFX11-NEXT: v_lshlrev_b16 v5, 1, v5
+; GFX11-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX11-NEXT: v_and_b32_e32 v2, 1, v2
+; GFX11-NEXT: v_lshlrev_b16 v1, 1, v1
+; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX11-NEXT: v_and_b32_e32 v14, 1, v14
+; GFX11-NEXT: v_lshlrev_b16 v13, 1, v13
+; GFX11-NEXT: v_and_b32_e32 v12, 1, v12
+; GFX11-NEXT: v_lshlrev_b16 v11, 3, v11
+; GFX11-NEXT: v_lshlrev_b16 v10, 2, v10
+; GFX11-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX11-NEXT: v_lshlrev_b16 v7, 3, v7
+; GFX11-NEXT: v_lshlrev_b16 v6, 2, v6
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX11-NEXT: v_lshlrev_b16 v3, 3, v3
+; GFX11-NEXT: v_lshlrev_b16 v2, 2, v2
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_lshlrev_b16 v15, 3, v15
+; GFX11-NEXT: v_lshlrev_b16 v14, 2, v14
+; GFX11-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX11-NEXT: v_or_b32_e32 v10, v11, v10
+; GFX11-NEXT: v_and_b32_e32 v1, 3, v8
+; GFX11-NEXT: v_or_b32_e32 v5, v7, v6
+; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX11-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-NEXT: v_or_b32_e32 v3, v15, v14
+; GFX11-NEXT: v_and_b32_e32 v6, 3, v12
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v10
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or_b32_e32 v2, v6, v3
+; GFX11-NEXT: v_and_b32_e32 v1, 15, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b16 v3, 4, v4
+; GFX11-NEXT: v_and_b32_e32 v0, 15, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b16 v2, 12, v2
+; GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX11-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: global_store_b16 v[0:1], v0, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+ store <16 x i1> %arg0, ptr addrspace(1) undef
+ ret void
+}
+
+define amdgpu_cs void @amdgpu_cs_v32i1(<32 x i1> %arg0) {
+; SI-LABEL: amdgpu_cs_v32i1:
+; SI: ; %bb.0:
+; SI-NEXT: v_lshlrev_b32_e32 v29, 1, v29
+; SI-NEXT: v_and_b32_e32 v28, 1, v28
+; SI-NEXT: v_lshlrev_b32_e32 v25, 1, v25
+; SI-NEXT: v_and_b32_e32 v24, 1, v24
+; SI-NEXT: v_lshlrev_b32_e32 v21, 1, v21
+; SI-NEXT: v_and_b32_e32 v20, 1, v20
+; SI-NEXT: v_lshlrev_b32_e32 v17, 1, v17
+; SI-NEXT: v_and_b32_e32 v16, 1, v16
+; SI-NEXT: v_lshlrev_b32_e32 v13, 1, v13
+; SI-NEXT: v_and_b32_e32 v12, 1, v12
+; SI-NEXT: v_lshlrev_b32_e32 v9, 1, v9
+; SI-NEXT: v_and_b32_e32 v8, 1, v8
+; SI-NEXT: v_lshlrev_b32_e32 v5, 1, v5
+; SI-NEXT: v_and_b32_e32 v4, 1, v4
+; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v1
+; SI-NEXT: v_and_b32_e32 v0, 1, v0
+; SI-NEXT: v_lshlrev_b32_e32 v31, 3, v31
+; SI-NEXT: v_and_b32_e32 v30, 1, v30
+; SI-NEXT: v_lshlrev_b32_e32 v27, 3, v27
+; SI-NEXT: v_and_b32_e32 v26, 1, v26
+; SI-NEXT: v_lshlrev_b32_e32 v23, 3, v23
+; SI-NEXT: v_and_b32_e32 v22, 1, v22
+; SI-NEXT: v_lshlrev_b32_e32 v19, 3, v19
+; SI-NEXT: v_and_b32_e32 v18, 1, v18
+; SI-NEXT: v_lshlrev_b32_e32 v15, 3, v15
+; SI-NEXT: v_and_b32_e32 v14, 1, v14
+; SI-NEXT: v_lshlrev_b32_e32 v11, 3, v11
+; SI-NEXT: v_and_b32_e32 v10, 1, v10
+; SI-NEXT: v_lshlrev_b32_e32 v7, 3, v7
+; SI-NEXT: v_and_b32_e32 v6, 1, v6
+; SI-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; SI-NEXT: v_and_b32_e32 v2, 1, v2
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: v_or_b32_e32 v28, v28, v29
+; SI-NEXT: v_or_b32_e32 v24, v24, v25
+; SI-NEXT: v_or_b32_e32 v20, v20, v21
+; SI-NEXT: v_or_b32_e32 v16, v16, v17
+; SI-NEXT: v_or_b32_e32 v12, v12, v13
+; SI-NEXT: v_or_b32_e32 v8, v8, v9
+; SI-NEXT: v_or_b32_e32 v4, v4, v5
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v30
+; SI-NEXT: v_lshlrev_b32_e32 v5, 2, v26
+; SI-NEXT: v_lshlrev_b32_e32 v9, 2, v22
+; SI-NEXT: v_lshlrev_b32_e32 v13, 2, v18
+; SI-NEXT: v_lshlrev_b32_e32 v14, 2, v14
+; SI-NEXT: v_lshlrev_b32_e32 v10, 2, v10
+; SI-NEXT: v_lshlrev_b32_e32 v6, 2, v6
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v2
+; SI-NEXT: v_or_b32_e32 v1, v31, v1
+; SI-NEXT: v_or_b32_e32 v5, v27, v5
+; SI-NEXT: v_or_b32_e32 v9, v23, v9
+; SI-NEXT: v_and_b32_e32 v17, 3, v28
+; SI-NEXT: v_and_b32_e32 v18, 3, v24
+; SI-NEXT: v_and_b32_e32 v20, 3, v20
+; SI-NEXT: v_or_b32_e32 v13, v19, v13
+; SI-NEXT: v_and_b32_e32 v16, 3, v16
+; SI-NEXT: v_or_b32_e32 v14, v15, v14
+; SI-NEXT: v_and_b32_e32 v12, 3, v12
+; SI-NEXT: v_or_b32_e32 v10, v11, v10
+; SI-NEXT: v_and_b32_e32 v8, 3, v8
+; SI-NEXT: v_or_b32_e32 v6, v7, v6
+; SI-NEXT: v_and_b32_e32 v4, 3, v4
+; SI-NEXT: v_or_b32_e32 v2, v3, v2
+; SI-NEXT: v_and_b32_e32 v0, 3, v0
+; SI-NEXT: v_or_b32_e32 v1, v17, v1
+; SI-NEXT: v_or_b32_e32 v3, v18, v5
+; SI-NEXT: v_or_b32_e32 v5, v20, v9
+; SI-NEXT: v_or_b32_e32 v7, v16, v13
+; SI-NEXT: v_or_b32_e32 v9, v12, v14
+; SI-NEXT: v_or_b32_e32 v8, v8, v10
+; SI-NEXT: v_or_b32_e32 v4, v4, v6
+; SI-NEXT: v_or_b32_e32 v0, v0, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 12, v1
+; SI-NEXT: v_and_b32_e32 v2, 15, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 4, v5
+; SI-NEXT: v_and_b32_e32 v5, 15, v7
+; SI-NEXT: v_lshlrev_b32_e32 v6, 12, v9
+; SI-NEXT: v_and_b32_e32 v7, 15, v8
+; SI-NEXT: v_lshlrev_b32_e32 v4, 4, v4
+; SI-NEXT: v_and_b32_e32 v0, 15, v0
+; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
+; SI-NEXT: v_or_b32_e32 v3, v5, v3
+; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
+; SI-NEXT: v_or_b32_e32 v0, v0, v4
+; SI-NEXT: v_or_b32_e32 v1, v1, v2
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v3
+; SI-NEXT: v_or_b32_e32 v3, v6, v5
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v3
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: amdgpu_cs_v32i1:
+; VI: ; %bb.0:
+; VI-NEXT: v_and_b32_e32 v6, 1, v6
+; VI-NEXT: v_lshlrev_b16_e32 v5, 1, v5
+; VI-NEXT: v_and_b32_e32 v4, 1, v4
+; VI-NEXT: v_and_b32_e32 v2, 1, v2
+; VI-NEXT: v_lshlrev_b16_e32 v1, 1, v1
+; VI-NEXT: v_and_b32_e32 v0, 1, v0
+; VI-NEXT: v_lshlrev_b16_e32 v7, 3, v7
+; VI-NEXT: v_lshlrev_b16_e32 v6, 2, v6
+; VI-NEXT: v_or_b32_e32 v4, v4, v5
+; VI-NEXT: v_lshlrev_b16_e32 v3, 3, v3
+; VI-NEXT: v_lshlrev_b16_e32 v2, 2, v2
+; VI-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-NEXT: v_or_b32_e32 v6, v7, v6
+; VI-NEXT: v_and_b32_e32 v4, 3, v4
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_and_b32_e32 v0, 3, v0
+; VI-NEXT: v_or_b32_e32 v4, v4, v6
+; VI-NEXT: v_or_b32_e32 v0, v0, v2
+; VI-NEXT: v_lshlrev_b16_e32 v1, 4, v4
+; VI-NEXT: v_and_b32_e32 v0, 15, v0
+; VI-NEXT: v_and_b32_e32 v2, 1, v30
+; VI-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-NEXT: v_lshlrev_b16_e32 v1, 3, v31
+; VI-NEXT: v_lshlrev_b16_e32 v2, 2, v2
+; VI-NEXT: v_or_b32_e32 v1, v1, v2
+; VI-NEXT: v_lshlrev_b16_e32 v2, 1, v29
+; VI-NEXT: v_and_b32_e32 v3, 1, v28
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_and_b32_e32 v2, 3, v2
+; VI-NEXT: v_and_b32_e32 v3, 1, v26
+; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_lshlrev_b16_e32 v2, 3, v27
+; VI-NEXT: v_lshlrev_b16_e32 v3, 2, v3
+; VI-NEXT: v_and_b32_e32 v10, 1, v10
+; VI-NEXT: v_lshlrev_b16_e32 v9, 1, v9
+; VI-NEXT: v_and_b32_e32 v8, 1, v8
+; VI-NEXT: v_or_b32_e32 v2, v2, v3
+; VI-NEXT: v_lshlrev_b16_e32 v3, 1, v25
+; VI-NEXT: v_and_b32_e32 v4, 1, v24
+; VI-NEXT: v_lshlrev_b16_e32 v11, 3, v11
+; VI-NEXT: v_lshlrev_b16_e32 v10, 2, v10
+; VI-NEXT: v_or_b32_e32 v8, v8, v9
+; VI-NEXT: v_or_b32_e32 v3, v4, v3
+; VI-NEXT: v_or_b32_e32 v10, v11, v10
+; VI-NEXT: v_and_b32_e32 v8, 3, v8
+; VI-NEXT: v_and_b32_e32 v3, 3, v3
+; VI-NEXT: v_or_b32_e32 v8, v8, v10
+; VI-NEXT: v_mov_b32_e32 v10, 15
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_lshlrev_b16_e32 v1, 12, v1
+; VI-NEXT: v_and_b32_sdwa v2, v2, v10 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_and_b32_e32 v3, 1, v22
+; VI-NEXT: v_or_b32_e32 v1, v1, v2
+; VI-NEXT: v_lshlrev_b16_e32 v2, 3, v23
+; VI-NEXT: v_lshlrev_b16_e32 v3, 2, v3
+; VI-NEXT: v_or_b32_e32 v2, v2, v3
+; VI-NEXT: v_lshlrev_b16_e32 v3, 1, v21
+; VI-NEXT: v_and_b32_e32 v4, 1, v20
+; VI-NEXT: v_or_b32_e32 v3, v4, v3
+; VI-NEXT: v_and_b32_e32 v3, 3, v3
+; VI-NEXT: v_and_b32_e32 v4, 1, v18
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_lshlrev_b16_e32 v3, 3, v19
+; VI-NEXT: v_lshlrev_b16_e32 v4, 2, v4
+; VI-NEXT: v_and_b32_e32 v14, 1, v14
+; VI-NEXT: v_lshlrev_b16_e32 v13, 1, v13
+; VI-NEXT: v_and_b32_e32 v12, 1, v12
+; VI-NEXT: v_or_b32_e32 v3, v3, v4
+; VI-NEXT: v_lshlrev_b16_e32 v4, 1, v17
+; VI-NEXT: v_and_b32_e32 v5, 1, v16
+; VI-NEXT: v_lshlrev_b16_e32 v15, 3, v15
+; VI-NEXT: v_lshlrev_b16_e32 v14, 2, v14
+; VI-NEXT: v_or_b32_e32 v12, v12, v13
+; VI-NEXT: v_or_b32_e32 v4, v5, v4
+; VI-NEXT: v_or_b32_e32 v14, v15, v14
+; VI-NEXT: v_and_b32_e32 v12, 3, v12
+; VI-NEXT: v_and_b32_e32 v4, 3, v4
+; VI-NEXT: v_or_b32_e32 v12, v12, v14
+; VI-NEXT: v_or_b32_e32 v3, v4, v3
+; VI-NEXT: v_lshlrev_b16_e32 v9, 12, v12
+; VI-NEXT: v_and_b32_sdwa v8, v8, v10 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b16_e32 v2, 4, v2
+; VI-NEXT: v_and_b32_e32 v3, 15, v3
+; VI-NEXT: v_or_b32_e32 v8, v9, v8
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_or_b32_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: flat_store_dword v[0:1], v0
+; VI-NEXT: s_endpgm
+;
+; GFX11-LABEL: amdgpu_cs_v32i1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e32 v10, 1, v10
+; GFX11-NEXT: v_lshlrev_b16 v9, 1, v9
+; GFX11-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX11-NEXT: v_lshlrev_b16 v11, 3, v11
+; GFX11-NEXT: v_and_b32_e32 v6, 1, v6
+; GFX11-NEXT: v_lshlrev_b16 v10, 2, v10
+; GFX11-NEXT: v_and_b32_e32 v2, 1, v2
+; GFX11-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX11-NEXT: v_lshlrev_b16 v1, 1, v1
+; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX11-NEXT: v_and_b32_e32 v14, 1, v14
+; GFX11-NEXT: v_lshlrev_b16 v13, 1, v13
+; GFX11-NEXT: v_and_b32_e32 v12, 1, v12
+; GFX11-NEXT: v_lshlrev_b16 v5, 1, v5
+; GFX11-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX11-NEXT: v_or_b32_e32 v9, v11, v10
+; GFX11-NEXT: v_and_b32_e32 v8, 3, v8
+; GFX11-NEXT: v_lshlrev_b16 v7, 3, v7
+; GFX11-NEXT: v_lshlrev_b16 v6, 2, v6
+; GFX11-NEXT: v_lshlrev_b16 v3, 3, v3
+; GFX11-NEXT: v_lshlrev_b16 v2, 2, v2
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_lshlrev_b16 v15, 3, v15
+; GFX11-NEXT: v_lshlrev_b16 v14, 2, v14
+; GFX11-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX11-NEXT: v_or_b32_e32 v5, v7, v6
+; GFX11-NEXT: v_or_b32_e32 v6, v8, v9
+; GFX11-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX11-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX11-NEXT: v_or_b32_e32 v13, v15, v14
+; GFX11-NEXT: v_and_b32_e32 v12, 3, v12
+; GFX11-NEXT: v_and_b32_e32 v3, 15, v6
+; GFX11-NEXT: v_lshlrev_b16 v6, 1, v29
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_and_b32_e32 v2, 1, v26
+; GFX11-NEXT: v_and_b32_e32 v7, 1, v28
+; GFX11-NEXT: v_lshlrev_b16 v9, 1, v25
+; GFX11-NEXT: v_and_b32_e32 v10, 1, v24
+; GFX11-NEXT: v_and_b32_e32 v4, 3, v4
+; GFX11-NEXT: v_or_b32_e32 v1, v12, v13
+; GFX11-NEXT: v_lshlrev_b16 v8, 3, v27
+; GFX11-NEXT: v_lshlrev_b16 v2, 2, v2
+; GFX11-NEXT: v_or_b32_e32 v6, v7, v6
+; GFX11-NEXT: v_or_b32_e32 v7, v10, v9
+; GFX11-NEXT: v_and_b32_e32 v9, 1, v22
+; GFX11-NEXT: v_lshlrev_b16 v10, 1, v21
+; GFX11-NEXT: v_and_b32_e32 v12, 1, v20
+; GFX11-NEXT: v_and_b32_e32 v13, 1, v18
+; GFX11-NEXT: v_lshlrev_b16 v14, 1, v17
+; GFX11-NEXT: v_and_b32_e32 v15, 1, v16
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX11-NEXT: v_and_b32_e32 v5, 1, v30
+; GFX11-NEXT: v_or_b32_e32 v2, v8, v2
+; GFX11-NEXT: v_lshlrev_b16 v8, 3, v23
+; GFX11-NEXT: v_lshlrev_b16 v9, 2, v9
+; GFX11-NEXT: v_or_b32_e32 v10, v12, v10
+; GFX11-NEXT: v_lshlrev_b16 v12, 3, v19
+; GFX11-NEXT: v_lshlrev_b16 v13, 2, v13
+; GFX11-NEXT: v_or_b32_e32 v14, v15, v14
+; GFX11-NEXT: v_lshlrev_b16 v11, 3, v31
+; GFX11-NEXT: v_lshlrev_b16 v5, 2, v5
+; GFX11-NEXT: v_and_b32_e32 v7, 3, v7
+; GFX11-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX11-NEXT: v_and_b32_e32 v9, 3, v10
+; GFX11-NEXT: v_or_b32_e32 v10, v12, v13
+; GFX11-NEXT: v_and_b32_e32 v12, 3, v14
+; GFX11-NEXT: v_or_b32_e32 v5, v11, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 3, v6
+; GFX11-NEXT: v_or_b32_e32 v2, v7, v2
+; GFX11-NEXT: v_or_b32_e32 v7, v9, v8
+; GFX11-NEXT: v_or_b32_e32 v8, v12, v10
+; GFX11-NEXT: v_lshlrev_b16 v4, 4, v4
+; GFX11-NEXT: v_and_b32_e32 v0, 15, v0
+; GFX11-NEXT: v_or_b32_e32 v5, v6, v5
+; GFX11-NEXT: v_and_b32_e32 v2, 15, v2
+; GFX11-NEXT: v_lshlrev_b16 v6, 4, v7
+; GFX11-NEXT: v_and_b32_e32 v7, 15, v8
+; GFX11-NEXT: v_lshlrev_b16 v1, 12, v1
+; GFX11-NEXT: v_lshlrev_b16 v3, 8, v3
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX11-NEXT: v_lshlrev_b16 v4, 12, v5
+; GFX11-NEXT: v_lshlrev_b16 v2, 8, v2
+; GFX11-NEXT: v_or_b32_e32 v5, v7, v6
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_or_b32_e32 v1, v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: global_store_b32 v[0:1], v0, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+ store <32 x i1> %arg0, ptr addrspace(1) undef
+ ret void
+}
+
+define amdgpu_cs void @amdgpu_cs_inreg_i1(i1 inreg %arg0) {
+; SI-LABEL: amdgpu_cs_inreg_i1:
+; SI: ; %bb.0:
+; SI-NEXT: s_and_b32 s0, s0, 1
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: amdgpu_cs_inreg_i1:
+; VI: ; %bb.0:
+; VI-NEXT: s_and_b32 s0, s0, 1
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: flat_store_byte v[0:1], v0
+; VI-NEXT: s_endpgm
+;
+; GFX11-LABEL: amdgpu_cs_inreg_i1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_and_b32 s0, s0, 1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: global_store_b8 v[0:1], v0, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+ store i1 %arg0, ptr addrspace(1) undef
+ ret void
+}
+
+define amdgpu_cs void @amdgpu_cs_inreg_v8i1(<8 x i1> inreg %arg0) {
+; SI-LABEL: amdgpu_cs_inreg_v8i1:
+; SI: ; %bb.0:
+; SI-NEXT: s_lshl_b32 s7, s7, 3
+; SI-NEXT: s_and_b32 s6, s6, 1
+; SI-NEXT: s_lshl_b32 s5, s5, 1
+; SI-NEXT: s_and_b32 s4, s4, 1
+; SI-NEXT: s_lshl_b32 s8, s3, 3
+; SI-NEXT: s_and_b32 s2, s2, 1
+; SI-NEXT: s_lshl_b32 s1, s1, 1
+; SI-NEXT: s_and_b32 s0, s0, 1
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_lshl_b32 s6, s6, 2
+; SI-NEXT: s_or_b32 s4, s4, s5
+; SI-NEXT: s_lshl_b32 s2, s2, 2
+; SI-NEXT: s_or_b32 s0, s0, s1
+; SI-NEXT: s_or_b32 s1, s7, s6
+; SI-NEXT: s_and_b32 s4, s4, 3
+; SI-NEXT: s_or_b32 s2, s8, s2
+; SI-NEXT: s_and_b32 s0, s0, 3
+; SI-NEXT: s_or_b32 s1, s4, s1
+; SI-NEXT: s_or_b32 s0, s0, s2
+; SI-NEXT: s_lshl_b32 s1, s1, 4
+; SI-NEXT: s_and_b32 s0, s0, 15
+; SI-NEXT: s_or_b32 s0, s0, s1
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: amdgpu_cs_inreg_v8i1:
+; VI: ; %bb.0:
+; VI-NEXT: v_and_b32_e64 v1, s6, 1
+; VI-NEXT: v_lshlrev_b16_e64 v0, 3, s7
+; VI-NEXT: v_lshlrev_b16_e32 v1, 2, v1
+; VI-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-NEXT: v_lshlrev_b16_e64 v1, 1, s5
+; VI-NEXT: v_and_b32_e64 v2, s4, 1
+; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_and_b32_e32 v1, 3, v1
+; VI-NEXT: v_and_b32_e64 v2, s2, 1
+; VI-NEXT: v_or_b32_e32 v0, v1, v0
+; VI-NEXT: v_lshlrev_b16_e64 v1, 3, s3
+; VI-NEXT: v_lshlrev_b16_e32 v2, 2, v2
+; VI-NEXT: v_or_b32_e32 v1, v1, v2
+; VI-NEXT: v_lshlrev_b16_e64 v2, 1, s1
+; VI-NEXT: v_and_b32_e64 v3, s0, 1
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_and_b32_e32 v2, 3, v2
+; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_lshlrev_b16_e32 v0, 4, v0
+; VI-NEXT: v_and_b32_e32 v1, 15, v1
+; VI-NEXT: v_or_b32_e32 v0, v1, v0
+; VI-NEXT: flat_store_byte v[0:1], v0
+; VI-NEXT: s_endpgm
+;
+; GFX11-LABEL: amdgpu_cs_inreg_v8i1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v1, s6, 1
+; GFX11-NEXT: v_lshlrev_b16 v2, 1, s5
+; GFX11-NEXT: v_and_b32_e64 v3, s4, 1
+; GFX11-NEXT: v_and_b32_e64 v4, s2, 1
+; GFX11-NEXT: v_lshlrev_b16 v5, 1, s1
+; GFX11-NEXT: v_and_b32_e64 v6, s0, 1
+; GFX11-NEXT: v_lshlrev_b16 v0, 3, s7
+; GFX11-NEXT: v_lshlrev_b16 v1, 2, v1
+; GFX11-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX11-NEXT: v_lshlrev_b16 v3, 3, s3
+; GFX11-NEXT: v_lshlrev_b16 v4, 2, v4
+; GFX11-NEXT: v_or_b32_e32 v5, v6, v5
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_and_b32_e32 v1, 3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or_b32_e32 v2, v3, v4
+; GFX11-NEXT: v_and_b32_e32 v3, 3, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX11-NEXT: v_or_b32_e32 v1, v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b16 v0, 4, v0
+; GFX11-NEXT: v_and_b32_e32 v1, 15, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX11-NEXT: global_store_b8 v[0:1], v0, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+ store <8 x i1> %arg0, ptr addrspace(1) undef
+ ret void
+}
+
+define amdgpu_cs void @amdgpu_cs_inreg_v16i1(<16 x i1> inreg %arg0) {
+; SI-LABEL: amdgpu_cs_inreg_v16i1:
+; SI: ; %bb.0:
+; SI-NEXT: s_lshl_b32 s15, s15, 3
+; SI-NEXT: s_and_b32 s14, s14, 1
+; SI-NEXT: s_lshl_b32 s13, s13, 1
+; SI-NEXT: s_and_b32 s12, s12, 1
+; SI-NEXT: s_lshl_b32 s11, s11, 3
+; SI-NEXT: s_and_b32 s10, s10, 1
+; SI-NEXT: s_lshl_b32 s9, s9, 1
+; SI-NEXT: s_and_b32 s8, s8, 1
+; SI-NEXT: s_lshl_b32 s7, s7, 3
+; SI-NEXT: s_and_b32 s6, s6, 1
+; SI-NEXT: s_lshl_b32 s5, s5, 1
+; SI-NEXT: s_and_b32 s4, s4, 1
+; SI-NEXT: s_lshl_b32 s16, s3, 3
+; SI-NEXT: s_and_b32 s2, s2, 1
+; SI-NEXT: s_lshl_b32 s1, s1, 1
+; SI-NEXT: s_and_b32 s0, s0, 1
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_lshl_b32 s14, s14, 2
+; SI-NEXT: s_or_b32 s12, s12, s13
+; SI-NEXT: s_lshl_b32 s10, s10, 2
+; SI-NEXT: s_or_b32 s8, s8, s9
+; SI-NEXT: s_lshl_b32 s6, s6, 2
+; SI-NEXT: s_or_b32 s4, s4, s5
+; SI-NEXT: s_lshl_b32 s2, s2, 2
+; SI-NEXT: s_or_b32 s0, s0, s1
+; SI-NEXT: s_or_b32 s1, s15, s14
+; SI-NEXT: s_and_b32 s5, s12, 3
+; SI-NEXT: s_or_b32 s9, s11, s10
+; SI-NEXT: s_and_b32 s8, s8, 3
+; SI-NEXT: s_or_b32 s6, s7, s6
+; SI-NEXT: s_and_b32 s4, s4, 3
+; SI-NEXT: s_or_b32 s2, s16, s2
+; SI-NEXT: s_and_b32 s0, s0, 3
+; SI-NEXT: s_or_b32 s1, s5, s1
+; SI-NEXT: s_or_b32 s5, s8, s9
+; SI-NEXT: s_or_b32 s4, s4, s6
+; SI-NEXT: s_or_b32 s0, s0, s2
+; SI-NEXT: s_lshl_b32 s1, s1, 12
+; SI-NEXT: s_and_b32 s2, s5, 15
+; SI-NEXT: s_lshl_b32 s4, s4, 4
+; SI-NEXT: s_and_b32 s0, s0, 15
+; SI-NEXT: s_lshl_b32 s2, s2, 8
+; SI-NEXT: s_or_b32 s0, s0, s4
+; SI-NEXT: s_or_b32 s1, s1, s2
+; SI-NEXT: s_and_b32 s0, s0, 0xff
+; SI-NEXT: s_or_b32 s0, s0, s1
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: amdgpu_cs_inreg_v16i1:
+; VI: ; %bb.0:
+; VI-NEXT: v_and_b32_e64 v1, s14, 1
+; VI-NEXT: v_lshlrev_b16_e64 v0, 3, s15
+; VI-NEXT: v_lshlrev_b16_e32 v1, 2, v1
+; VI-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-NEXT: v_lshlrev_b16_e64 v1, 1, s13
+; VI-NEXT: v_and_b32_e64 v2, s12, 1
+; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_and_b32_e32 v1, 3, v1
+; VI-NEXT: v_and_b32_e64 v2, s10, 1
+; VI-NEXT: v_or_b32_e32 v0, v1, v0
+; VI-NEXT: v_lshlrev_b16_e64 v1, 3, s11
+; VI-NEXT: v_lshlrev_b16_e32 v2, 2, v2
+; VI-NEXT: v_or_b32_e32 v1, v1, v2
+; VI-NEXT: v_lshlrev_b16_e64 v2, 1, s9
+; VI-NEXT: v_and_b32_e64 v3, s8, 1
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_and_b32_e32 v2, 3, v2
+; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_mov_b32_e32 v2, 15
+; VI-NEXT: v_lshlrev_b16_e32 v0, 12, v0
+; VI-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_and_b32_e64 v2, s6, 1
+; VI-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-NEXT: v_lshlrev_b16_e64 v1, 3, s7
+; VI-NEXT: v_lshlrev_b16_e32 v2, 2, v2
+; VI-NEXT: v_or_b32_e32 v1, v1, v2
+; VI-NEXT: v_lshlrev_b16_e64 v2, 1, s5
+; VI-NEXT: v_and_b32_e64 v3, s4, 1
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_and_b32_e32 v2, 3, v2
+; VI-NEXT: v_and_b32_e64 v3, s2, 1
+; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_lshlrev_b16_e64 v2, 3, s3
+; VI-NEXT: v_lshlrev_b16_e32 v3, 2, v3
+; VI-NEXT: v_or_b32_e32 v2, v2, v3
+; VI-NEXT: v_lshlrev_b16_e64 v3, 1, s1
+; VI-NEXT: v_and_b32_e64 v4, s0, 1
+; VI-NEXT: v_or_b32_e32 v3, v4, v3
+; VI-NEXT: v_and_b32_e32 v3, 3, v3
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_lshlrev_b16_e32 v1, 4, v1
+; VI-NEXT: v_and_b32_e32 v2, 15, v2
+; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: flat_store_short v[0:1], v0
+; VI-NEXT: s_endpgm
+;
+; GFX11-LABEL: amdgpu_cs_inreg_v16i1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v0, s10, 1
+; GFX11-NEXT: v_lshlrev_b16 v2, 1, s13
+; GFX11-NEXT: v_and_b32_e64 v3, s12, 1
+; GFX11-NEXT: v_lshlrev_b16 v5, 1, s9
+; GFX11-NEXT: v_and_b32_e64 v6, s8, 1
+; GFX11-NEXT: v_lshlrev_b16 v4, 3, s11
+; GFX11-NEXT: v_lshlrev_b16 v0, 2, v0
+; GFX11-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX11-NEXT: v_and_b32_e64 v8, s4, 1
+; GFX11-NEXT: v_or_b32_e32 v3, v6, v5
+; GFX11-NEXT: v_and_b32_e64 v5, s6, 1
+; GFX11-NEXT: v_lshlrev_b16 v6, 1, s5
+; GFX11-NEXT: v_and_b32_e64 v9, s2, 1
+; GFX11-NEXT: v_lshlrev_b16 v10, 1, s1
+; GFX11-NEXT: v_and_b32_e64 v11, s0, 1
+; GFX11-NEXT: v_and_b32_e64 v1, s14, 1
+; GFX11-NEXT: v_or_b32_e32 v0, v4, v0
+; GFX11-NEXT: v_lshlrev_b16 v4, 3, s7
+; GFX11-NEXT: v_lshlrev_b16 v5, 2, v5
+; GFX11-NEXT: v_or_b32_e32 v6, v8, v6
+; GFX11-NEXT: v_lshlrev_b16 v8, 3, s3
+; GFX11-NEXT: v_lshlrev_b16 v9, 2, v9
+; GFX11-NEXT: v_or_b32_e32 v10, v11, v10
+; GFX11-NEXT: v_lshlrev_b16 v7, 3, s15
+; GFX11-NEXT: v_lshlrev_b16 v1, 2, v1
+; GFX11-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX11-NEXT: v_and_b32_e32 v5, 3, v6
+; GFX11-NEXT: v_or_b32_e32 v6, v8, v9
+; GFX11-NEXT: v_and_b32_e32 v8, 3, v10
+; GFX11-NEXT: v_or_b32_e32 v1, v7, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 3, v2
+; GFX11-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX11-NEXT: v_or_b32_e32 v3, v5, v4
+; GFX11-NEXT: v_or_b32_e32 v4, v8, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX11-NEXT: v_and_b32_e32 v0, 15, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b16 v2, 4, v3
+; GFX11-NEXT: v_and_b32_e32 v3, 15, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b16 v1, 12, v1
+; GFX11-NEXT: v_lshlrev_b16 v0, 8, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX11-NEXT: global_store_b16 v[0:1], v0, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+ store <16 x i1> %arg0, ptr addrspace(1) undef
+ ret void
+}
+
+define amdgpu_cs void @amdgpu_cs_inreg_v32i1(<32 x i1> inreg %arg0) {
+; SI-LABEL: amdgpu_cs_inreg_v32i1:
+; SI: ; %bb.0:
+; SI-NEXT: s_lshl_b32 s31, s31, 3
+; SI-NEXT: s_and_b32 s30, s30, 1
+; SI-NEXT: s_lshl_b32 s29, s29, 1
+; SI-NEXT: s_and_b32 s28, s28, 1
+; SI-NEXT: s_lshl_b32 s27, s27, 3
+; SI-NEXT: s_and_b32 s26, s26, 1
+; SI-NEXT: s_lshl_b32 s25, s25, 1
+; SI-NEXT: s_and_b32 s24, s24, 1
+; SI-NEXT: s_lshl_b32 s23, s23, 3
+; SI-NEXT: s_and_b32 s22, s22, 1
+; SI-NEXT: s_lshl_b32 s21, s21, 1
+; SI-NEXT: s_and_b32 s20, s20, 1
+; SI-NEXT: s_lshl_b32 s19, s19, 3
+; SI-NEXT: s_and_b32 s18, s18, 1
+; SI-NEXT: s_lshl_b32 s17, s17, 1
+; SI-NEXT: s_and_b32 s16, s16, 1
+; SI-NEXT: s_lshl_b32 s15, s15, 3
+; SI-NEXT: s_and_b32 s14, s14, 1
+; SI-NEXT: s_lshl_b32 s13, s13, 1
+; SI-NEXT: s_and_b32 s12, s12, 1
+; SI-NEXT: s_lshl_b32 s11, s11, 3
+; SI-NEXT: s_and_b32 s10, s10, 1
+; SI-NEXT: s_lshl_b32 s9, s9, 1
+; SI-NEXT: s_and_b32 s8, s8, 1
+; SI-NEXT: s_lshl_b32 s7, s7, 3
+; SI-NEXT: s_and_b32 s6, s6, 1
+; SI-NEXT: s_lshl_b32 s5, s5, 1
+; SI-NEXT: s_and_b32 s4, s4, 1
+; SI-NEXT: s_lshl_b32 s33, s3, 3
+; SI-NEXT: s_and_b32 s2, s2, 1
+; SI-NEXT: s_lshl_b32 s1, s1, 1
+; SI-NEXT: s_and_b32 s0, s0, 1
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_lshl_b32 s30, s30, 2
+; SI-NEXT: s_or_b32 s28, s28, s29
+; SI-NEXT: s_lshl_b32 s26, s26, 2
+; SI-NEXT: s_or_b32 s24, s24, s25
+; SI-NEXT: s_lshl_b32 s22, s22, 2
+; SI-NEXT: s_or_b32 s20, s20, s21
+; SI-NEXT: s_lshl_b32 s18, s18, 2
+; SI-NEXT: s_or_b32 s16, s16, s17
+; SI-NEXT: s_lshl_b32 s14, s14, 2
+; SI-NEXT: s_or_b32 s12, s12, s13
+; SI-NEXT: s_lshl_b32 s10, s10, 2
+; SI-NEXT: s_or_b32 s8, s8, s9
+; SI-NEXT: s_lshl_b32 s6, s6, 2
+; SI-NEXT: s_or_b32 s4, s4, s5
+; SI-NEXT: s_lshl_b32 s2, s2, 2
+; SI-NEXT: s_or_b32 s0, s0, s1
+; SI-NEXT: s_or_b32 s1, s31, s30
+; SI-NEXT: s_and_b32 s5, s28, 3
+; SI-NEXT: s_or_b32 s9, s27, s26
+; SI-NEXT: s_and_b32 s13, s24, 3
+; SI-NEXT: s_or_b32 s17, s23, s22
+; SI-NEXT: s_and_b32 s20, s20, 3
+; SI-NEXT: s_or_b32 s18, s19, s18
+; SI-NEXT: s_and_b32 s16, s16, 3
+; SI-NEXT: s_or_b32 s14, s15, s14
+; SI-NEXT: s_and_b32 s12, s12, 3
+; SI-NEXT: s_or_b32 s10, s11, s10
+; SI-NEXT: s_and_b32 s8, s8, 3
+; SI-NEXT: s_or_b32 s6, s7, s6
+; SI-NEXT: s_and_b32 s4, s4, 3
+; SI-NEXT: s_or_b32 s2, s33, s2
+; SI-NEXT: s_and_b32 s0, s0, 3
+; SI-NEXT: s_or_b32 s1, s5, s1
+; SI-NEXT: s_or_b32 s5, s13, s9
+; SI-NEXT: s_or_b32 s7, s20, s17
+; SI-NEXT: s_or_b32 s9, s16, s18
+; SI-NEXT: s_or_b32 s11, s12, s14
+; SI-NEXT: s_or_b32 s8, s8, s10
+; SI-NEXT: s_or_b32 s4, s4, s6
+; SI-NEXT: s_or_b32 s0, s0, s2
+; SI-NEXT: s_lshl_b32 s1, s1, 12
+; SI-NEXT: s_and_b32 s2, s5, 15
+; SI-NEXT: s_lshl_b32 s5, s7, 4
+; SI-NEXT: s_and_b32 s6, s9, 15
+; SI-NEXT: s_lshl_b32 s7, s11, 12
+; SI-NEXT: s_and_b32 s8, s8, 15
+; SI-NEXT: s_lshl_b32 s4, s4, 4
+; SI-NEXT: s_and_b32 s0, s0, 15
+; SI-NEXT: s_lshl_b32 s2, s2, 8
+; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_lshl_b32 s6, s8, 8
+; SI-NEXT: s_or_b32 s0, s0, s4
+; SI-NEXT: s_or_b32 s1, s1, s2
+; SI-NEXT: s_and_b32 s2, s5, 0xff
+; SI-NEXT: s_or_b32 s4, s7, s6
+; SI-NEXT: s_and_b32 s0, s0, 0xff
+; SI-NEXT: s_or_b32 s1, s2, s1
+; SI-NEXT: s_or_b32 s0, s0, s4
+; SI-NEXT: s_lshl_b32 s1, s1, 16
+; SI-NEXT: s_and_b32 s0, s0, 0xffff
+; SI-NEXT: s_or_b32 s0, s0, s1
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: amdgpu_cs_inreg_v32i1:
+; VI: ; %bb.0:
+; VI-NEXT: v_and_b32_e64 v1, s14, 1
+; VI-NEXT: v_lshlrev_b16_e64 v0, 3, s15
+; VI-NEXT: v_lshlrev_b16_e32 v1, 2, v1
+; VI-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-NEXT: v_lshlrev_b16_e64 v1, 1, s13
+; VI-NEXT: v_and_b32_e64 v2, s12, 1
+; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_and_b32_e32 v1, 3, v1
+; VI-NEXT: v_and_b32_e64 v2, s10, 1
+; VI-NEXT: v_or_b32_e32 v0, v1, v0
+; VI-NEXT: v_lshlrev_b16_e64 v1, 3, s11
+; VI-NEXT: v_lshlrev_b16_e32 v2, 2, v2
+; VI-NEXT: v_or_b32_e32 v1, v1, v2
+; VI-NEXT: v_lshlrev_b16_e64 v2, 1, s9
+; VI-NEXT: v_and_b32_e64 v3, s8, 1
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_and_b32_e32 v2, 3, v2
+; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_mov_b32_e32 v2, 15
+; VI-NEXT: v_lshlrev_b16_e32 v0, 12, v0
+; VI-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_and_b32_e64 v3, s6, 1
+; VI-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-NEXT: v_lshlrev_b16_e64 v1, 3, s7
+; VI-NEXT: v_lshlrev_b16_e32 v3, 2, v3
+; VI-NEXT: v_or_b32_e32 v1, v1, v3
+; VI-NEXT: v_lshlrev_b16_e64 v3, 1, s5
+; VI-NEXT: v_and_b32_e64 v4, s4, 1
+; VI-NEXT: v_or_b32_e32 v3, v4, v3
+; VI-NEXT: v_and_b32_e32 v3, 3, v3
+; VI-NEXT: v_and_b32_e64 v4, s2, 1
+; VI-NEXT: v_or_b32_e32 v1, v3, v1
+; VI-NEXT: v_lshlrev_b16_e64 v3, 3, s3
+; VI-NEXT: v_lshlrev_b16_e32 v4, 2, v4
+; VI-NEXT: v_or_b32_e32 v3, v3, v4
+; VI-NEXT: v_lshlrev_b16_e64 v4, 1, s1
+; VI-NEXT: v_and_b32_e64 v5, s0, 1
+; VI-NEXT: v_or_b32_e32 v4, v5, v4
+; VI-NEXT: v_and_b32_e32 v4, 3, v4
+; VI-NEXT: v_or_b32_e32 v3, v4, v3
+; VI-NEXT: v_lshlrev_b16_e32 v1, 4, v1
+; VI-NEXT: v_and_b32_e32 v3, 15, v3
+; VI-NEXT: v_or_b32_e32 v1, v3, v1
+; VI-NEXT: v_and_b32_e64 v3, s30, 1
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b16_e64 v1, 3, s31
+; VI-NEXT: v_lshlrev_b16_e32 v3, 2, v3
+; VI-NEXT: v_or_b32_e32 v1, v1, v3
+; VI-NEXT: v_lshlrev_b16_e64 v3, 1, s29
+; VI-NEXT: v_and_b32_e64 v4, s28, 1
+; VI-NEXT: v_or_b32_e32 v3, v4, v3
+; VI-NEXT: v_and_b32_e32 v3, 3, v3
+; VI-NEXT: v_and_b32_e64 v4, s26, 1
+; VI-NEXT: v_or_b32_e32 v1, v3, v1
+; VI-NEXT: v_lshlrev_b16_e64 v3, 3, s27
+; VI-NEXT: v_lshlrev_b16_e32 v4, 2, v4
+; VI-NEXT: v_or_b32_e32 v3, v3, v4
+; VI-NEXT: v_lshlrev_b16_e64 v4, 1, s25
+; VI-NEXT: v_and_b32_e64 v5, s24, 1
+; VI-NEXT: v_or_b32_e32 v4, v5, v4
+; VI-NEXT: v_and_b32_e32 v4, 3, v4
+; VI-NEXT: v_or_b32_e32 v3, v4, v3
+; VI-NEXT: v_lshlrev_b16_e32 v1, 12, v1
+; VI-NEXT: v_and_b32_sdwa v2, v3, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_and_b32_e64 v3, s22, 1
+; VI-NEXT: v_or_b32_e32 v1, v1, v2
+; VI-NEXT: v_lshlrev_b16_e64 v2, 3, s23
+; VI-NEXT: v_lshlrev_b16_e32 v3, 2, v3
+; VI-NEXT: v_or_b32_e32 v2, v2, v3
+; VI-NEXT: v_lshlrev_b16_e64 v3, 1, s21
+; VI-NEXT: v_and_b32_e64 v4, s20, 1
+; VI-NEXT: v_or_b32_e32 v3, v4, v3
+; VI-NEXT: v_and_b32_e32 v3, 3, v3
+; VI-NEXT: v_and_b32_e64 v4, s18, 1
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_lshlrev_b16_e64 v3, 3, s19
+; VI-NEXT: v_lshlrev_b16_e32 v4, 2, v4
+; VI-NEXT: v_or_b32_e32 v3, v3, v4
+; VI-NEXT: v_lshlrev_b16_e64 v4, 1, s17
+; VI-NEXT: v_and_b32_e64 v5, s16, 1
+; VI-NEXT: v_or_b32_e32 v4, v5, v4
+; VI-NEXT: v_and_b32_e32 v4, 3, v4
+; VI-NEXT: v_or_b32_e32 v3, v4, v3
+; VI-NEXT: v_lshlrev_b16_e32 v2, 4, v2
+; VI-NEXT: v_and_b32_e32 v3, 15, v3
+; VI-NEXT: v_or_b32_e32 v2, v3, v2
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: flat_store_dword v[0:1], v0
+; VI-NEXT: s_endpgm
+;
+; GFX11-LABEL: amdgpu_cs_inreg_v32i1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v0, s14, 1
+; GFX11-NEXT: v_lshlrev_b16 v1, 1, s13
+; GFX11-NEXT: v_and_b32_e64 v2, s12, 1
+; GFX11-NEXT: v_lshlrev_b16 v3, 3, s15
+; GFX11-NEXT: v_lshlrev_b16 v4, 1, s9
+; GFX11-NEXT: v_lshlrev_b16 v0, 2, v0
+; GFX11-NEXT: v_and_b32_e64 v5, s8, 1
+; GFX11-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX11-NEXT: v_and_b32_e64 v2, s10, 1
+; GFX11-NEXT: v_lshlrev_b16 v6, 1, s5
+; GFX11-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX11-NEXT: v_lshlrev_b16 v3, 3, s11
+; GFX11-NEXT: v_or_b32_e32 v4, v5, v4
+; GFX11-NEXT: v_lshlrev_b16 v2, 2, v2
+; GFX11-NEXT: v_and_b32_e64 v5, s6, 1
+; GFX11-NEXT: v_and_b32_e64 v7, s4, 1
+; GFX11-NEXT: v_lshlrev_b16 v8, 1, s1
+; GFX11-NEXT: v_and_b32_e64 v9, s0, 1
+; GFX11-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 3, v4
+; GFX11-NEXT: v_lshlrev_b16 v4, 3, s7
+; GFX11-NEXT: v_lshlrev_b16 v5, 2, v5
+; GFX11-NEXT: v_or_b32_e32 v6, v7, v6
+; GFX11-NEXT: v_and_b32_e64 v7, s2, 1
+; GFX11-NEXT: v_and_b32_e32 v1, 3, v1
+; GFX11-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX11-NEXT: v_and_b32_e32 v5, 3, v6
+; GFX11-NEXT: v_lshlrev_b16 v6, 3, s3
+; GFX11-NEXT: v_lshlrev_b16 v7, 2, v7
+; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX11-NEXT: v_or_b32_e32 v1, v3, v2
+; GFX11-NEXT: v_or_b32_e32 v2, v5, v4
+; GFX11-NEXT: v_and_b32_e32 v4, 3, v8
+; GFX11-NEXT: v_or_b32_e32 v3, v6, v7
+; GFX11-NEXT: v_lshlrev_b16 v6, 1, s29
+; GFX11-NEXT: v_and_b32_e64 v7, s28, 1
+; GFX11-NEXT: v_lshlrev_b16 v9, 1, s25
+; GFX11-NEXT: v_and_b32_e64 v10, s24, 1
+; GFX11-NEXT: v_or_b32_e32 v3, v4, v3
+; GFX11-NEXT: v_and_b32_e64 v4, s26, 1
+; GFX11-NEXT: v_lshlrev_b16 v8, 3, s27
+; GFX11-NEXT: v_or_b32_e32 v6, v7, v6
+; GFX11-NEXT: v_or_b32_e32 v7, v10, v9
+; GFX11-NEXT: v_and_b32_e64 v9, s22, 1
+; GFX11-NEXT: v_lshlrev_b16 v4, 2, v4
+; GFX11-NEXT: v_lshlrev_b16 v10, 1, s21
+; GFX11-NEXT: v_and_b32_e64 v12, s20, 1
+; GFX11-NEXT: v_and_b32_e64 v13, s18, 1
+; GFX11-NEXT: v_lshlrev_b16 v14, 1, s17
+; GFX11-NEXT: v_and_b32_e64 v15, s16, 1
+; GFX11-NEXT: v_and_b32_e64 v5, s30, 1
+; GFX11-NEXT: v_or_b32_e32 v4, v8, v4
+; GFX11-NEXT: v_lshlrev_b16 v8, 3, s23
+; GFX11-NEXT: v_lshlrev_b16 v9, 2, v9
+; GFX11-NEXT: v_or_b32_e32 v10, v12, v10
+; GFX11-NEXT: v_lshlrev_b16 v12, 3, s19
+; GFX11-NEXT: v_lshlrev_b16 v13, 2, v13
+; GFX11-NEXT: v_or_b32_e32 v14, v15, v14
+; GFX11-NEXT: v_lshlrev_b16 v11, 3, s31
+; GFX11-NEXT: v_lshlrev_b16 v5, 2, v5
+; GFX11-NEXT: v_and_b32_e32 v7, 3, v7
+; GFX11-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX11-NEXT: v_and_b32_e32 v9, 3, v10
+; GFX11-NEXT: v_or_b32_e32 v10, v12, v13
+; GFX11-NEXT: v_and_b32_e32 v12, 3, v14
+; GFX11-NEXT: v_or_b32_e32 v5, v11, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 3, v6
+; GFX11-NEXT: v_or_b32_e32 v4, v7, v4
+; GFX11-NEXT: v_or_b32_e32 v7, v9, v8
+; GFX11-NEXT: v_or_b32_e32 v8, v12, v10
+; GFX11-NEXT: v_and_b32_e32 v1, 15, v1
+; GFX11-NEXT: v_lshlrev_b16 v2, 4, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 15, v3
+; GFX11-NEXT: v_or_b32_e32 v5, v6, v5
+; GFX11-NEXT: v_and_b32_e32 v4, 15, v4
+; GFX11-NEXT: v_lshlrev_b16 v6, 4, v7
+; GFX11-NEXT: v_and_b32_e32 v7, 15, v8
+; GFX11-NEXT: v_lshlrev_b16 v0, 12, v0
+; GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
+; GFX11-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX11-NEXT: v_lshlrev_b16 v3, 12, v5
+; GFX11-NEXT: v_lshlrev_b16 v4, 8, v4
+; GFX11-NEXT: v_or_b32_e32 v5, v7, v6
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or_b32_e32 v2, v3, v4
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX11-NEXT: v_or_b32_e32 v1, v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: global_store_b32 v[0:1], v0, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+ store <32 x i1> %arg0, ptr addrspace(1) undef
+ ret void
+}
+
+define amdgpu_cs void @amdgpu_cs_i1_sext(i1 signext %arg0) {
+; SI-LABEL: amdgpu_cs_i1_sext:
+; SI: ; %bb.0:
+; SI-NEXT: v_and_b32_e32 v0, 1, v0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: amdgpu_cs_i1_sext:
+; VI: ; %bb.0:
+; VI-NEXT: v_and_b32_e32 v0, 1, v0
+; VI-NEXT: flat_store_byte v[0:1], v0
+; VI-NEXT: s_endpgm
+;
+; GFX11-LABEL: amdgpu_cs_i1_sext:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX11-NEXT: global_store_b8 v[0:1], v0, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+ store i1 %arg0, ptr addrspace(1) undef
+ ret void
+}
+
+define amdgpu_cs void @amdgpu_cs_i1_zext(i1 zeroext %arg0) {
+; SI-LABEL: amdgpu_cs_i1_zext:
+; SI: ; %bb.0:
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: amdgpu_cs_i1_zext:
+; VI: ; %bb.0:
+; VI-NEXT: flat_store_byte v[0:1], v0
+; VI-NEXT: s_endpgm
+;
+; GFX11-LABEL: amdgpu_cs_i1_zext:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_store_b8 v[0:1], v0, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+ store i1 %arg0, ptr addrspace(1) undef
+ ret void
+}
+
attributes #0 = { nounwind noinline }
diff --git a/llvm/test/CodeGen/AMDGPU/clamp.ll b/llvm/test/CodeGen/AMDGPU/clamp.ll
index 7c1c24f4a67d..dfadd8d205b0 100644
--- a/llvm/test/CodeGen/AMDGPU/clamp.ll
+++ b/llvm/test/CodeGen/AMDGPU/clamp.ll
@@ -525,6 +525,7 @@ define amdgpu_kernel void @v_clamp_multi_use_max_f32(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_max_num_f32_e32 v1, 0, v1
; GFX12-NEXT: v_min_num_f32_e32 v2, 1.0, v1
; GFX12-NEXT: global_store_b32 v0, v2, s[0:1]
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_store_b32 v[0:1], v1, off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_nop 0
diff --git a/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null-vector.ll b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null-vector.ll
new file mode 100644
index 000000000000..94c571a29f99
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null-vector.ll
@@ -0,0 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=amdgcn-- -amdgpu-codegenprepare -S < %s | FileCheck -check-prefix=OPT %s
+
+; Check that CGP doesn't try to create a amdgcn.addrspace.nonnull of vector, as that's not supported.
+
+define <4 x ptr> @vec_of_local_to_flat_nonnull_arg() {
+; OPT-LABEL: define <4 x ptr> @vec_of_local_to_flat_nonnull_arg() {
+; OPT-NEXT: [[X:%.*]] = addrspacecast <4 x ptr addrspace(3)> zeroinitializer to <4 x ptr>
+; OPT-NEXT: ret <4 x ptr> [[X]]
+;
+ %x = addrspacecast <4 x ptr addrspace(3)> zeroinitializer to <4 x ptr>
+ ret <4 x ptr> %x
+}
diff --git a/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
new file mode 100644
index 000000000000..bcdfb75ab1ef
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
@@ -0,0 +1,272 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=amdgcn-- -amdgpu-codegenprepare -S < %s | FileCheck -check-prefix=OPT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,DAGISEL-ASM
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,GISEL-ASM
+
+; Tests that we can avoid nullptr checks for addrspacecasts from/to priv/local.
+;
+; Whenever a testcase is successful, we should see the addrspacecast replaced with the intrinsic
+; and the resulting code should have no select/cndmask null check for the pointer.
+
+define void @local_to_flat_nonnull_arg(ptr addrspace(3) nonnull %ptr) {
+; OPT-LABEL: define void @local_to_flat_nonnull_arg(
+; OPT-SAME: ptr addrspace(3) nonnull [[PTR:%.*]]) {
+; OPT-NEXT: [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p3(ptr addrspace(3) [[PTR]])
+; OPT-NEXT: store volatile i32 7, ptr [[TMP1]], align 4
+; OPT-NEXT: ret void
+;
+; ASM-LABEL: local_to_flat_nonnull_arg:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_mov_b64 s[4:5], src_shared_base
+; ASM-NEXT: v_mov_b32_e32 v1, s5
+; ASM-NEXT: v_mov_b32_e32 v2, 7
+; ASM-NEXT: flat_store_dword v[0:1], v2
+; ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %x = addrspacecast ptr addrspace(3) %ptr to ptr
+ store volatile i32 7, ptr %x
+ ret void
+}
+
+define void @private_to_flat_nonnull_arg(ptr addrspace(5) nonnull %ptr) {
+; OPT-LABEL: define void @private_to_flat_nonnull_arg(
+; OPT-SAME: ptr addrspace(5) nonnull [[PTR:%.*]]) {
+; OPT-NEXT: [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[PTR]])
+; OPT-NEXT: store volatile i32 7, ptr [[TMP1]], align 4
+; OPT-NEXT: ret void
+;
+; ASM-LABEL: private_to_flat_nonnull_arg:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT: v_mov_b32_e32 v1, s5
+; ASM-NEXT: v_mov_b32_e32 v2, 7
+; ASM-NEXT: flat_store_dword v[0:1], v2
+; ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %x = addrspacecast ptr addrspace(5) %ptr to ptr
+ store volatile i32 7, ptr %x
+ ret void
+}
+
+define void @flat_to_local_nonnull_arg(ptr nonnull %ptr) {
+; OPT-LABEL: define void @flat_to_local_nonnull_arg(
+; OPT-SAME: ptr nonnull [[PTR:%.*]]) {
+; OPT-NEXT: [[TMP1:%.*]] = call ptr addrspace(3) @llvm.amdgcn.addrspacecast.nonnull.p3.p0(ptr [[PTR]])
+; OPT-NEXT: store volatile i32 7, ptr addrspace(3) [[TMP1]], align 4
+; OPT-NEXT: ret void
+;
+; ASM-LABEL: flat_to_local_nonnull_arg:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: v_mov_b32_e32 v1, 7
+; ASM-NEXT: ds_write_b32 v0, v1
+; ASM-NEXT: s_waitcnt lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %x = addrspacecast ptr %ptr to ptr addrspace(3)
+ store volatile i32 7, ptr addrspace(3) %x
+ ret void
+}
+
+define void @flat_to_private_nonnull_arg(ptr nonnull %ptr) {
+; OPT-LABEL: define void @flat_to_private_nonnull_arg(
+; OPT-SAME: ptr nonnull [[PTR:%.*]]) {
+; OPT-NEXT: [[TMP1:%.*]] = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr [[PTR]])
+; OPT-NEXT: store volatile i32 7, ptr addrspace(5) [[TMP1]], align 4
+; OPT-NEXT: ret void
+;
+; ASM-LABEL: flat_to_private_nonnull_arg:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: v_mov_b32_e32 v1, 7
+; ASM-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; ASM-NEXT: s_waitcnt vmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %x = addrspacecast ptr %ptr to ptr addrspace(5)
+ store volatile i32 7, ptr addrspace(5) %x
+ ret void
+}
+
+define void @private_alloca_to_flat(ptr %ptr) {
+; OPT-LABEL: define void @private_alloca_to_flat(
+; OPT-SAME: ptr [[PTR:%.*]]) {
+; OPT-NEXT: [[ALLOCA:%.*]] = alloca i8, align 1, addrspace(5)
+; OPT-NEXT: [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[ALLOCA]])
+; OPT-NEXT: store volatile i32 7, ptr [[TMP1]], align 4
+; OPT-NEXT: ret void
+;
+; ASM-LABEL: private_alloca_to_flat:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT: v_lshrrev_b32_e64 v0, 6, s32
+; ASM-NEXT: v_mov_b32_e32 v1, s5
+; ASM-NEXT: v_mov_b32_e32 v2, 7
+; ASM-NEXT: flat_store_dword v[0:1], v2
+; ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %alloca = alloca i8, addrspace(5)
+ %x = addrspacecast ptr addrspace(5) %alloca to ptr
+ store volatile i32 7, ptr %x
+ ret void
+}
+
+@lds = internal unnamed_addr addrspace(3) global i8 poison, align 4
+
+define void @knownbits_on_flat_to_priv(ptr %ptr) {
+; OPT-LABEL: define void @knownbits_on_flat_to_priv(
+; OPT-SAME: ptr [[PTR:%.*]]) {
+; OPT-NEXT: [[PTR_INT:%.*]] = ptrtoint ptr [[PTR]] to i64
+; OPT-NEXT: [[PTR_OR:%.*]] = or i64 [[PTR_INT]], 15
+; OPT-NEXT: [[KB_PTR:%.*]] = inttoptr i64 [[PTR_OR]] to ptr
+; OPT-NEXT: [[TMP1:%.*]] = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr [[KB_PTR]])
+; OPT-NEXT: store volatile i32 7, ptr addrspace(5) [[TMP1]], align 4
+; OPT-NEXT: ret void
+;
+; ASM-LABEL: knownbits_on_flat_to_priv:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: v_or_b32_e32 v0, 15, v0
+; ASM-NEXT: v_mov_b32_e32 v1, 7
+; ASM-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; ASM-NEXT: s_waitcnt vmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %ptr.int = ptrtoint ptr %ptr to i64
+ %ptr.or = or i64 %ptr.int, 15 ; set some low bits
+ %kb.ptr = inttoptr i64 %ptr.or to ptr
+ %x = addrspacecast ptr %kb.ptr to ptr addrspace(5)
+ store volatile i32 7, ptr addrspace(5) %x
+ ret void
+}
+
+define void @knownbits_on_priv_to_flat(ptr addrspace(5) %ptr) {
+; OPT-LABEL: define void @knownbits_on_priv_to_flat(
+; OPT-SAME: ptr addrspace(5) [[PTR:%.*]]) {
+; OPT-NEXT: [[PTR_INT:%.*]] = ptrtoint ptr addrspace(5) [[PTR]] to i32
+; OPT-NEXT: [[PTR_OR:%.*]] = and i32 [[PTR_INT]], 65535
+; OPT-NEXT: [[KB_PTR:%.*]] = inttoptr i32 [[PTR_OR]] to ptr addrspace(5)
+; OPT-NEXT: [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[KB_PTR]])
+; OPT-NEXT: store volatile i32 7, ptr [[TMP1]], align 4
+; OPT-NEXT: ret void
+;
+; ASM-LABEL: knownbits_on_priv_to_flat:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; ASM-NEXT: v_mov_b32_e32 v1, s5
+; ASM-NEXT: v_mov_b32_e32 v2, 7
+; ASM-NEXT: flat_store_dword v[0:1], v2
+; ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %ptr.int = ptrtoint ptr addrspace(5) %ptr to i32
+ %ptr.or = and i32 %ptr.int, 65535 ; ensure only lower 16 bits can be set.
+ %kb.ptr = inttoptr i32 %ptr.or to ptr addrspace(5)
+ %x = addrspacecast ptr addrspace(5) %kb.ptr to ptr
+ store volatile i32 7, ptr %x
+ ret void
+}
+
+define void @recursive_phis(i1 %cond, ptr addrspace(5) %ptr) {
+; OPT-LABEL: define void @recursive_phis(
+; OPT-SAME: i1 [[COND:%.*]], ptr addrspace(5) [[PTR:%.*]]) {
+; OPT-NEXT: entry:
+; OPT-NEXT: [[ALLOCA:%.*]] = alloca i8, align 1, addrspace(5)
+; OPT-NEXT: br i1 [[COND]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; OPT: then:
+; OPT-NEXT: [[PTR_INT:%.*]] = ptrtoint ptr addrspace(5) [[PTR]] to i32
+; OPT-NEXT: [[PTR_OR:%.*]] = and i32 [[PTR_INT]], 65535
+; OPT-NEXT: [[KB_PTR:%.*]] = inttoptr i32 [[PTR_OR]] to ptr addrspace(5)
+; OPT-NEXT: br label [[FINALLY:%.*]]
+; OPT: else:
+; OPT-NEXT: [[OTHER_PHI:%.*]] = phi ptr addrspace(5) [ [[ALLOCA]], [[ENTRY:%.*]] ], [ [[PHI_PTR:%.*]], [[FINALLY]] ]
+; OPT-NEXT: br label [[FINALLY]]
+; OPT: finally:
+; OPT-NEXT: [[PHI_PTR]] = phi ptr addrspace(5) [ [[KB_PTR]], [[THEN]] ], [ [[OTHER_PHI]], [[ELSE]] ]
+; OPT-NEXT: [[TMP0:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[PHI_PTR]])
+; OPT-NEXT: store volatile i32 7, ptr [[TMP0]], align 4
+; OPT-NEXT: br i1 [[COND]], label [[ELSE]], label [[END:%.*]]
+; OPT: end:
+; OPT-NEXT: ret void
+;
+; DAGISEL-ASM-LABEL: recursive_phis:
+; DAGISEL-ASM: ; %bb.0: ; %entry
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: v_and_b32_e32 v0, 1, v0
+; DAGISEL-ASM-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; DAGISEL-ASM-NEXT: v_lshrrev_b32_e64 v0, 6, s32
+; DAGISEL-ASM-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; DAGISEL-ASM-NEXT: ; %bb.1: ; %then
+; DAGISEL-ASM-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; DAGISEL-ASM-NEXT: ; %bb.2: ; %finallyendcf.split
+; DAGISEL-ASM-NEXT: s_or_b64 exec, exec, s[4:5]
+; DAGISEL-ASM-NEXT: s_xor_b64 s[6:7], vcc, -1
+; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], 0
+; DAGISEL-ASM-NEXT: s_mov_b64 s[8:9], src_private_base
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; DAGISEL-ASM-NEXT: .LBB7_3: ; %finally
+; DAGISEL-ASM-NEXT: ; =>This Inner Loop Header: Depth=1
+; DAGISEL-ASM-NEXT: s_and_b64 s[10:11], exec, s[6:7]
+; DAGISEL-ASM-NEXT: s_or_b64 s[4:5], s[10:11], s[4:5]
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s9
+; DAGISEL-ASM-NEXT: flat_store_dword v[0:1], v2
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0)
+; DAGISEL-ASM-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; DAGISEL-ASM-NEXT: s_cbranch_execnz .LBB7_3
+; DAGISEL-ASM-NEXT: ; %bb.4: ; %end
+; DAGISEL-ASM-NEXT: s_or_b64 exec, exec, s[4:5]
+; DAGISEL-ASM-NEXT: s_waitcnt lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-ASM-LABEL: recursive_phis:
+; GISEL-ASM: ; %bb.0: ; %entry
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-ASM-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-ASM-NEXT: v_lshrrev_b32_e64 v0, 6, s32
+; GISEL-ASM-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-ASM-NEXT: ; %bb.1: ; %then
+; GISEL-ASM-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GISEL-ASM-NEXT: ; %bb.2: ; %finallyendcf.split
+; GISEL-ASM-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-ASM-NEXT: s_mov_b64 s[8:9], src_private_base
+; GISEL-ASM-NEXT: s_mov_b64 s[6:7], 0
+; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; GISEL-ASM-NEXT: .LBB7_3: ; %finally
+; GISEL-ASM-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-ASM-NEXT: s_and_b64 s[8:9], exec, s[4:5]
+; GISEL-ASM-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GISEL-ASM-NEXT: flat_store_dword v[0:1], v2
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0)
+; GISEL-ASM-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GISEL-ASM-NEXT: s_cbranch_execnz .LBB7_3
+; GISEL-ASM-NEXT: ; %bb.4: ; %end
+; GISEL-ASM-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-ASM-NEXT: s_waitcnt lgkmcnt(0)
+; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %alloca = alloca i8, addrspace(5)
+ br i1 %cond, label %then, label %else
+
+then:
+ %ptr.int = ptrtoint ptr addrspace(5) %ptr to i32
+ %ptr.or = and i32 %ptr.int, 65535 ; ensure low bits are zeroes
+ %kb.ptr = inttoptr i32 %ptr.or to ptr addrspace(5)
+ br label %finally
+
+else:
+ %other.phi = phi ptr addrspace(5) [%alloca, %entry], [%phi.ptr, %finally]
+ br label %finally
+
+finally:
+ %phi.ptr = phi ptr addrspace(5) [%kb.ptr, %then], [%other.phi, %else]
+ %x = addrspacecast ptr addrspace(5) %phi.ptr to ptr
+ store volatile i32 7, ptr %x
+ br i1 %cond, label %else, label %end
+
+end:
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/ctlz.ll b/llvm/test/CodeGen/AMDGPU/ctlz.ll
index 9307d8952293..4decf39d0401 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz.ll
@@ -492,9 +492,9 @@ define amdgpu_kernel void @v_ctlz_i8(ptr addrspace(1) noalias %out, ptr addrspac
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
; SI-NEXT: v_ffbh_u32_e32 v0, v0
; SI-NEXT: v_min_u32_e32 v0, 32, v0
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, 24, v0
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
@@ -512,9 +512,9 @@ define amdgpu_kernel void @v_ctlz_i8(ptr addrspace(1) noalias %out, ptr addrspac
; VI-NEXT: s_mov_b32 s4, s0
; VI-NEXT: s_mov_b32 s5, s1
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
; VI-NEXT: v_ffbh_u32_e32 v0, v0
; VI-NEXT: v_min_u32_e32 v0, 32, v0
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, 24, v0
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
@@ -522,7 +522,7 @@ define amdgpu_kernel void @v_ctlz_i8(ptr addrspace(1) noalias %out, ptr addrspac
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
-; EG-NEXT: ALU 16, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 15, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
; EG-NEXT: CF_END
; EG-NEXT: PAD
@@ -531,15 +531,14 @@ define amdgpu_kernel void @v_ctlz_i8(ptr addrspace(1) noalias %out, ptr addrspac
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
-; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
-; EG-NEXT: FFBH_UINT T1.W, PV.W,
-; EG-NEXT: AND_INT * T2.W, KC0[2].Y, literal.x,
-; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT * T0.W, T0.W, literal.x, PV.W,
-; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
+; EG-NEXT: FFBH_UINT * T0.W, T0.X,
+; EG-NEXT: CNDE_INT T0.W, T0.X, literal.x, PV.W,
+; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.y,
+; EG-NEXT: 32(4.484155e-44), 3(4.203895e-45)
+; EG-NEXT: ADD_INT * T0.W, PV.W, literal.x,
+; EG-NEXT: -24(nan), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
-; EG-NEXT: LSHL * T1.W, T2.W, literal.y,
+; EG-NEXT: LSHL * T1.W, T1.W, literal.y,
; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
; EG-NEXT: LSHL T0.X, PV.W, PS,
; EG-NEXT: LSHL * T0.W, literal.x, PS,
@@ -556,9 +555,9 @@ define amdgpu_kernel void @v_ctlz_i8(ptr addrspace(1) noalias %out, ptr addrspac
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_load_ubyte v1, v0, s[2:3]
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v1
; GFX10-NEXT: v_ffbh_u32_e32 v1, v1
; GFX10-NEXT: v_min_u32_e32 v1, 32, v1
+; GFX10-NEXT: v_subrev_nc_u32_e32 v1, 24, v1
; GFX10-NEXT: global_store_byte v0, v1, s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -582,10 +581,10 @@ define amdgpu_kernel void @v_ctlz_i8(ptr addrspace(1) noalias %out, ptr addrspac
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_load_u8 v1, v0, s[2:3]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 24, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_clz_i32_u32_e32 v1, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_min_u32_e32 v1, 32, v1
+; GFX11-NEXT: v_subrev_nc_u32_e32 v1, 24, v1
; GFX11-NEXT: global_store_b8 v0, v1, s[0:1]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
diff --git a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
index 2830e5258e92..21aff62b9226 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
@@ -314,8 +314,9 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_lshl_b32 s2, s2, 24
-; SI-NEXT: s_flbit_i32_b32 s4, s2
+; SI-NEXT: s_and_b32 s2, s2, 0xff
+; SI-NEXT: s_flbit_i32_b32 s2, s2
+; SI-NEXT: s_sub_i32 s4, s2, 24
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
@@ -326,8 +327,9 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_lshl_b32 s2, s2, 24
+; VI-NEXT: s_and_b32 s2, s2, 0xff
; VI-NEXT: s_flbit_i32_b32 s2, s2
+; VI-NEXT: s_sub_i32 s2, s2, 24
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
@@ -347,13 +349,13 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, 0.0,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
-; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
-; EG-NEXT: FFBH_UINT T0.W, PV.W,
+; EG-NEXT: FFBH_UINT T0.W, T0.X,
; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PV.W, literal.x,
+; EG-NEXT: -24(nan), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
-; EG-NEXT: LSHL * T1.W, PS, literal.y,
+; EG-NEXT: LSHL * T1.W, T1.W, literal.y,
; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
; EG-NEXT: LSHL T0.X, PV.W, PS,
; EG-NEXT: LSHL * T0.W, literal.x, PS,
@@ -389,8 +391,9 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_lshl_b32 s2, s2, 16
-; SI-NEXT: s_flbit_i32_b32 s4, s2
+; SI-NEXT: s_and_b32 s2, s2, 0xffff
+; SI-NEXT: s_flbit_i32_b32 s2, s2
+; SI-NEXT: s_add_i32 s4, s2, -16
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
@@ -423,13 +426,13 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, 0.0,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
-; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
-; EG-NEXT: FFBH_UINT T0.W, PV.W,
+; EG-NEXT: FFBH_UINT T0.W, T0.X,
; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PV.W, literal.x,
+; EG-NEXT: -16(nan), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
-; EG-NEXT: LSHL * T1.W, PS, literal.y,
+; EG-NEXT: LSHL * T1.W, T1.W, literal.y,
; EG-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
; EG-NEXT: LSHL T0.X, PV.W, PS,
; EG-NEXT: LSHL * T0.W, literal.x, PS,
@@ -587,8 +590,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v0
-; SI-NEXT: v_ffbh_u32_e32 v1, v1
+; SI-NEXT: v_ffbh_u32_e32 v1, v0
+; SI-NEXT: v_subrev_i32_e32 v1, vcc, 24, v1
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
@@ -602,8 +605,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: flat_load_ubyte v0, v[0:1]
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_lshlrev_b32_e32 v1, 24, v0
-; VI-NEXT: v_ffbh_u32_e32 v1, v1
+; VI-NEXT: v_ffbh_u32_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+; VI-NEXT: v_subrev_u32_e32 v1, vcc, 24, v1
; VI-NEXT: v_cmp_ne_u16_e32 vcc, 0, v0
; VI-NEXT: v_cndmask_b32_e32 v2, 32, v1, vcc
; VI-NEXT: v_mov_b32_e32 v0, s0
@@ -615,7 +618,7 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
-; EG-NEXT: ALU 16, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 15, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
; EG-NEXT: CF_END
; EG-NEXT: PAD
@@ -624,11 +627,10 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
-; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
-; EG-NEXT: FFBH_UINT T0.W, PV.W,
-; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
-; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: FFBH_UINT * T0.W, T0.X,
+; EG-NEXT: ADD_INT T0.W, PV.W, literal.x,
+; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.y,
+; EG-NEXT: -24(nan), 3(4.203895e-45)
; EG-NEXT: CNDE_INT * T0.W, T0.X, literal.x, PV.W,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
@@ -683,8 +685,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v0
-; SI-NEXT: v_ffbh_u32_e32 v1, v1
+; SI-NEXT: v_ffbh_u32_e32 v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, -16, v1
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
@@ -719,7 +721,7 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
-; EG-NEXT: ALU 16, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 15, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
; EG-NEXT: CF_END
; EG-NEXT: PAD
@@ -728,11 +730,10 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
-; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
-; EG-NEXT: FFBH_UINT T0.W, PV.W,
-; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
-; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: FFBH_UINT * T0.W, T0.X,
+; EG-NEXT: ADD_INT T0.W, PV.W, literal.x,
+; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.y,
+; EG-NEXT: -16(nan), 3(4.203895e-45)
; EG-NEXT: CNDE_INT * T0.W, T0.X, literal.x, PV.W,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
@@ -1101,8 +1102,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8(ptr addrspace(1) noalias %out, p
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
; SI-NEXT: v_ffbh_u32_e32 v0, v0
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, 24, v0
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
@@ -1115,8 +1116,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8(ptr addrspace(1) noalias %out, p
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: flat_load_ubyte v0, v[0:1]
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; VI-NEXT: v_ffbh_u32_e32 v2, v0
+; VI-NEXT: v_ffbh_u32_e32 v0, v0
+; VI-NEXT: v_subrev_u32_e32 v2, vcc, 24, v0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_store_byte v[0:1], v2
@@ -1135,13 +1136,13 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8(ptr addrspace(1) noalias %out, p
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, T0.X,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
-; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
-; EG-NEXT: FFBH_UINT T0.W, PV.W,
+; EG-NEXT: FFBH_UINT T0.W, T0.X,
; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PV.W, literal.x,
+; EG-NEXT: -24(nan), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
-; EG-NEXT: LSHL * T1.W, PS, literal.y,
+; EG-NEXT: LSHL * T1.W, T1.W, literal.y,
; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
; EG-NEXT: LSHL T0.X, PV.W, PS,
; EG-NEXT: LSHL * T0.W, literal.x, PS,
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index 5296ad3ab51d..2f3d5d9d140c 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -2310,2860 +2310,6 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
ret i128 %div
}
-define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
-; GFX9-LABEL: v_srem_i128_vv:
-; GFX9: ; %bb.0: ; %_udiv-special-cases
-; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_ashrrev_i32_e32 v20, 31, v3
-; GFX9-NEXT: v_xor_b32_e32 v0, v0, v20
-; GFX9-NEXT: v_xor_b32_e32 v10, v2, v20
-; GFX9-NEXT: v_xor_b32_e32 v1, v1, v20
-; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v20
-; GFX9-NEXT: v_xor_b32_e32 v9, v3, v20
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v20, vcc
-; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v7
-; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v10, v20, vcc
-; GFX9-NEXT: v_xor_b32_e32 v4, v4, v8
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v9, v20, vcc
-; GFX9-NEXT: v_xor_b32_e32 v5, v5, v8
-; GFX9-NEXT: v_sub_co_u32_e32 v23, vcc, v4, v8
-; GFX9-NEXT: v_xor_b32_e32 v6, v6, v8
-; GFX9-NEXT: v_subb_co_u32_e32 v21, vcc, v5, v8, vcc
-; GFX9-NEXT: v_xor_b32_e32 v7, v7, v8
-; GFX9-NEXT: v_subb_co_u32_e32 v4, vcc, v6, v8, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v8, vcc
-; GFX9-NEXT: v_or_b32_e32 v7, v21, v5
-; GFX9-NEXT: v_or_b32_e32 v6, v23, v4
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GFX9-NEXT: v_or_b32_e32 v7, v3, v1
-; GFX9-NEXT: v_or_b32_e32 v6, v2, v0
-; GFX9-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
-; GFX9-NEXT: v_ffbh_u32_e32 v6, v4
-; GFX9-NEXT: v_add_u32_e32 v6, 32, v6
-; GFX9-NEXT: v_ffbh_u32_e32 v7, v5
-; GFX9-NEXT: v_min_u32_e32 v6, v6, v7
-; GFX9-NEXT: v_ffbh_u32_e32 v7, v23
-; GFX9-NEXT: v_add_u32_e32 v7, 32, v7
-; GFX9-NEXT: v_ffbh_u32_e32 v8, v21
-; GFX9-NEXT: v_min_u32_e32 v7, v7, v8
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 64, v7
-; GFX9-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, 0, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
-; GFX9-NEXT: v_ffbh_u32_e32 v9, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
-; GFX9-NEXT: v_ffbh_u32_e32 v7, v0
-; GFX9-NEXT: v_add_u32_e32 v7, 32, v7
-; GFX9-NEXT: v_min_u32_e32 v7, v7, v9
-; GFX9-NEXT: v_ffbh_u32_e32 v9, v2
-; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
-; GFX9-NEXT: v_ffbh_u32_e32 v10, v3
-; GFX9-NEXT: v_min_u32_e32 v9, v9, v10
-; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, 0, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, 64, v9
-; GFX9-NEXT: v_addc_co_u32_e64 v10, s[6:7], 0, 0, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX9-NEXT: s_mov_b64 s[6:7], 0x7f
-; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc
-; GFX9-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v7
-; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v8, v10, vcc
-; GFX9-NEXT: v_mov_b32_e32 v9, 0
-; GFX9-NEXT: v_subbrev_co_u32_e32 v8, vcc, 0, v9, vcc
-; GFX9-NEXT: v_subbrev_co_u32_e32 v9, vcc, 0, v9, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[6:7]
-; GFX9-NEXT: v_or_b32_e32 v13, v7, v9
-; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; GFX9-NEXT: v_mov_b32_e32 v22, v20
-; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v10, vcc
-; GFX9-NEXT: v_and_b32_e32 v10, 1, v10
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v10
-; GFX9-NEXT: v_xor_b32_e32 v10, 0x7f, v6
-; GFX9-NEXT: v_or_b32_e32 v12, v10, v8
-; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
-; GFX9-NEXT: s_xor_b64 s[6:7], s[4:5], -1
-; GFX9-NEXT: v_cndmask_b32_e64 v11, v1, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v12, v0, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v10, v3, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[4:5]
-; GFX9-NEXT: s_and_b64 s[4:5], s[6:7], vcc
-; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
-; GFX9-NEXT: s_cbranch_execz .LBB2_6
-; GFX9-NEXT: ; %bb.1: ; %udiv-bb1
-; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, 1, v6
-; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v7, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v26, vcc, 0, v8, vcc
-; GFX9-NEXT: v_sub_u32_e32 v13, 0x7f, v6
-; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, 0, v9, vcc
-; GFX9-NEXT: v_sub_u32_e32 v11, 64, v13
-; GFX9-NEXT: v_or_b32_e32 v8, v25, v27
-; GFX9-NEXT: v_or_b32_e32 v7, v24, v26
-; GFX9-NEXT: v_lshlrev_b64 v[9:10], v13, v[0:1]
-; GFX9-NEXT: v_lshrrev_b64 v[11:12], v11, v[2:3]
-; GFX9-NEXT: v_sub_u32_e32 v6, 63, v6
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8]
-; GFX9-NEXT: v_lshlrev_b64 v[6:7], v6, v[2:3]
-; GFX9-NEXT: v_or_b32_e32 v8, v10, v12
-; GFX9-NEXT: v_or_b32_e32 v9, v9, v11
-; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v13
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v13
-; GFX9-NEXT: v_lshlrev_b64 v[12:13], v13, v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v8, 0
-; GFX9-NEXT: v_mov_b32_e32 v10, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v1, s[6:7]
-; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v0, s[6:7]
-; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, v13, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v9, 0
-; GFX9-NEXT: v_mov_b32_e32 v11, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, v12, s[4:5]
-; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execz .LBB2_5
-; GFX9-NEXT: ; %bb.2: ; %udiv-preheader
-; GFX9-NEXT: v_sub_u32_e32 v10, 64, v24
-; GFX9-NEXT: v_lshrrev_b64 v[8:9], v24, v[2:3]
-; GFX9-NEXT: v_lshlrev_b64 v[10:11], v10, v[0:1]
-; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v24
-; GFX9-NEXT: v_or_b32_e32 v10, v8, v10
-; GFX9-NEXT: v_subrev_u32_e32 v8, 64, v24
-; GFX9-NEXT: v_or_b32_e32 v11, v9, v11
-; GFX9-NEXT: v_lshrrev_b64 v[8:9], v8, v[0:1]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v24
-; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v15, v9, v3, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v10, v8, v10, vcc
-; GFX9-NEXT: v_lshrrev_b64 v[8:9], v24, v[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v14, v10, v2, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v17, 0, v9, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v16, 0, v8, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v28, vcc, -1, v23
-; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, -1, v21, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v30, vcc, -1, v4, vcc
-; GFX9-NEXT: v_mov_b32_e32 v18, 0
-; GFX9-NEXT: v_mov_b32_e32 v10, 0
-; GFX9-NEXT: v_addc_co_u32_e32 v31, vcc, -1, v5, vcc
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: v_mov_b32_e32 v19, 0
-; GFX9-NEXT: v_mov_b32_e32 v11, 0
-; GFX9-NEXT: v_mov_b32_e32 v9, 0
-; GFX9-NEXT: .LBB2_3: ; %udiv-do-while
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_lshrrev_b32_e32 v32, 31, v15
-; GFX9-NEXT: v_lshlrev_b64 v[14:15], 1, v[14:15]
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 31, v7
-; GFX9-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
-; GFX9-NEXT: v_lshrrev_b32_e32 v8, 31, v13
-; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[16:17]
-; GFX9-NEXT: v_or_b32_e32 v14, v14, v33
-; GFX9-NEXT: v_or3_b32 v6, v6, v8, v10
-; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v28, v14
-; GFX9-NEXT: v_or_b32_e32 v16, v16, v32
-; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v29, v15, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v30, v16, vcc
-; GFX9-NEXT: v_lshlrev_b64 v[12:13], 1, v[12:13]
-; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v31, v17, vcc
-; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v8
-; GFX9-NEXT: v_or_b32_e32 v12, v18, v12
-; GFX9-NEXT: v_and_b32_e32 v18, v8, v23
-; GFX9-NEXT: v_or_b32_e32 v13, v19, v13
-; GFX9-NEXT: v_and_b32_e32 v19, v8, v21
-; GFX9-NEXT: v_sub_co_u32_e32 v14, vcc, v14, v18
-; GFX9-NEXT: v_and_b32_e32 v32, v8, v4
-; GFX9-NEXT: v_subb_co_u32_e32 v15, vcc, v15, v19, vcc
-; GFX9-NEXT: v_and_b32_e32 v33, v8, v5
-; GFX9-NEXT: v_subb_co_u32_e32 v16, vcc, v16, v32, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v17, vcc, v17, v33, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, -1, v24
-; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v25, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v26, vcc, -1, v26, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v27, vcc
-; GFX9-NEXT: v_or_b32_e32 v18, v24, v26
-; GFX9-NEXT: v_or_b32_e32 v19, v25, v27
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
-; GFX9-NEXT: v_and_b32_e32 v8, 1, v8
-; GFX9-NEXT: v_mov_b32_e32 v19, v9
-; GFX9-NEXT: v_or3_b32 v7, v7, 0, v11
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v18, v8
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB2_3
-; GFX9-NEXT: ; %bb.4: ; %Flow
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: .LBB2_5: ; %Flow2
-; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT: v_lshlrev_b64 v[14:15], 1, v[12:13]
-; GFX9-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
-; GFX9-NEXT: v_lshrrev_b32_e32 v12, 31, v13
-; GFX9-NEXT: v_or3_b32 v11, v7, 0, v11
-; GFX9-NEXT: v_or3_b32 v12, v6, v12, v10
-; GFX9-NEXT: v_or_b32_e32 v10, v9, v15
-; GFX9-NEXT: v_or_b32_e32 v13, v8, v14
-; GFX9-NEXT: .LBB2_6: ; %Flow3
-; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
-; GFX9-NEXT: v_mul_lo_u32 v16, v13, v5
-; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v23, v13, 0
-; GFX9-NEXT: v_mov_b32_e32 v15, 0
-; GFX9-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v13, v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v14, v6
-; GFX9-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v21, v13, v[14:15]
-; GFX9-NEXT: v_mul_lo_u32 v9, v10, v4
-; GFX9-NEXT: v_mul_lo_u32 v11, v11, v23
-; GFX9-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-NEXT: v_mov_b32_e32 v14, v15
-; GFX9-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v23, v10, v[13:14]
-; GFX9-NEXT: v_add3_u32 v8, v8, v16, v9
-; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v12, v23, v[7:8]
-; GFX9-NEXT: v_mov_b32_e32 v8, v14
-; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v4, v8
-; GFX9-NEXT: v_addc_co_u32_e64 v9, s[4:5], 0, 0, vcc
-; GFX9-NEXT: v_mul_lo_u32 v12, v12, v21
-; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v21, v10, v[8:9]
-; GFX9-NEXT: v_add3_u32 v4, v11, v7, v12
-; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v8, v6
-; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v9, v4, vcc
-; GFX9-NEXT: v_mov_b32_e32 v7, v13
-; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v5
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v7, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v4, vcc
-; GFX9-NEXT: v_xor_b32_e32 v5, v0, v20
-; GFX9-NEXT: v_xor_b32_e32 v0, v2, v20
-; GFX9-NEXT: v_xor_b32_e32 v4, v1, v22
-; GFX9-NEXT: v_xor_b32_e32 v1, v3, v22
-; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v20
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v22, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v5, v20, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v4, v22, vcc
-; GFX9-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-O0-LABEL: v_srem_i128_vv:
-; GFX9-O0: ; %bb.0: ; %_udiv-special-cases
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v0
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
-; GFX9-O0-NEXT: s_waitcnt vmcnt(1)
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v1
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
-; GFX9-O0-NEXT: v_ashrrev_i64 v[12:13], s4, v[6:7]
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v13
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
-; GFX9-O0-NEXT: v_ashrrev_i64 v[6:7], s4, v[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v13
-; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
-; GFX9-O0-NEXT: v_xor_b32_e64 v13, v11, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_xor_b32_e64 v15, v4, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
-; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_xor_b32_e64 v7, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
-; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v4
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_xor_b32_e64 v2, v2, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v16
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v14
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v9, vcc, v9, v12
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v10, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v13, vcc, v11, v12, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v10, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v8
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v1, vcc, v1, v6
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v5, v6, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
-; GFX9-O0-NEXT: v_or_b32_e64 v3, v8, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O0-NEXT: v_or_b32_e64 v1, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0
-; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[1:2], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: v_or_b32_e64 v15, v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v9, v3, v1
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v15
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], s[6:7]
-; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v6
-; GFX9-O0-NEXT: s_mov_b32 s9, 32
-; GFX9-O0-NEXT: v_add_u32_e64 v6, v6, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v7, v7
-; GFX9-O0-NEXT: v_min_u32_e64 v6, v6, v7
-; GFX9-O0-NEXT: s_mov_b32 s8, 0
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v7
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v5
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v8, v8
-; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
-; GFX9-O0-NEXT: s_mov_b64 s[10:11], 64
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
-; GFX9-O0-NEXT: s_mov_b32 s12, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
-; GFX9-O0-NEXT: s_mov_b32 s14, s11
-; GFX9-O0-NEXT: v_add_co_u32_e64 v8, s[12:13], v8, s12
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, s14
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[12:13], v5, v9, s[12:13]
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[12:13], v[11:12], s[6:7]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v10, s[12:13]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, v6, v7, s[12:13]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v1
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v2
-; GFX9-O0-NEXT: v_min_u32_e64 v6, v5, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v3
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v11, v4
-; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v11
-; GFX9-O0-NEXT: ; implicit-def: $sgpr9
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
-; GFX9-O0-NEXT: s_mov_b32 s8, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
-; GFX9-O0-NEXT: s_mov_b32 s10, s11
-; GFX9-O0-NEXT: v_add_co_u32_e64 v11, s[8:9], v11, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s10
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[8:9], v5, v12, s[8:9]
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[8:9], v[13:14], s[6:7]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: s_mov_b32 s10, s6
-; GFX9-O0-NEXT: s_mov_b32 s11, s7
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v8
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v9, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s10
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v8, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, s11
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s11
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
-; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[14:15]
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v7, v10, s[8:9]
-; GFX9-O0-NEXT: v_and_b32_e64 v7, 1, v7
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[8:9], v7, 1
-; GFX9-O0-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], -1
-; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: s_mov_b32 s14, s13
-; GFX9-O0-NEXT: v_xor_b32_e64 v7, v7, s14
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
-; GFX9-O0-NEXT: v_xor_b32_e64 v5, v5, s12
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v7, v7, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[6:7], v[5:6], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v2, v5, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s10
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v4, v5, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s10
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5
-; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7]
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec
-; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 2
-; GFX9-O0-NEXT: v_writelane_b32 v0, s5, 3
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execz .LBB2_3
-; GFX9-O0-NEXT: s_branch .LBB2_8
-; GFX9-O0-NEXT: .LBB2_1: ; %Flow
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v0, 4
-; GFX9-O0-NEXT: v_readlane_b32 s5, v0, 5
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: ; %bb.2: ; %Flow
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(6)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB2_5
-; GFX9-O0-NEXT: .LBB2_3: ; %Flow2
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v4, 2
-; GFX9-O0-NEXT: v_readlane_b32 s5, v4, 3
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB2_9
-; GFX9-O0-NEXT: .LBB2_4: ; %udiv-loop-exit
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 1
-; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[0:1]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_lshlrev_b64 v[9:10], s4, v[9:10]
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
-; GFX9-O0-NEXT: v_or3_b32 v4, v4, v11, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v9
-; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB2_3
-; GFX9-O0-NEXT: .LBB2_5: ; %Flow1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v8, 6
-; GFX9-O0-NEXT: v_readlane_b32 s5, v8, 7
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB2_4
-; GFX9-O0-NEXT: .LBB2_6: ; %udiv-do-while
-; GFX9-O0-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s6, v16, 8
-; GFX9-O0-NEXT: v_readlane_b32 s7, v16, 9
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: s_waitcnt vmcnt(16)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[29:30], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v30
-; GFX9-O0-NEXT: s_mov_b32 s5, 1
-; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], s5, v[23:24]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v24
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v29
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
-; GFX9-O0-NEXT: v_or_b32_e64 v23, v5, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[2:3]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], s4, v[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v30
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v29
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v3, v4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s5, v[0:1]
-; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[6:7]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v30
-; GFX9-O0-NEXT: s_waitcnt vmcnt(10)
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v28
-; GFX9-O0-NEXT: v_or3_b32 v6, v6, v7, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v29
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v27
-; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: s_waitcnt vmcnt(8)
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v26
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v25
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
-; GFX9-O0-NEXT: v_ashrrev_i64 v[13:14], s4, v[11:12]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], 1
-; GFX9-O0-NEXT: s_mov_b32 s8, s5
-; GFX9-O0-NEXT: v_and_b32_e64 v12, v7, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_and_b32_e64 v14, v11, s4
-; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v23, v22
-; GFX9-O0-NEXT: v_and_b32_e64 v23, v7, v23
-; GFX9-O0-NEXT: v_and_b32_e64 v21, v11, v21
-; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v23, v20
-; GFX9-O0-NEXT: v_and_b32_e64 v7, v7, v23
-; GFX9-O0-NEXT: v_and_b32_e64 v23, v11, v19
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v24
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v22
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v20
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v10, vcc, v10, v19, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v4, vcc, v4, v11, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v5, v7, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v8
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 killed $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
-; GFX9-O0-NEXT: s_mov_b32 s5, s8
-; GFX9-O0-NEXT: s_mov_b32 s4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v20, vcc, v11, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v11, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v10, v11, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v10, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr20 killed $vgpr20 def $vgpr20_vgpr21 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v21, v9
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v21
-; GFX9-O0-NEXT: v_or_b32_e64 v19, v19, v22
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v17, v17, v18
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v19
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[12:13]
-; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
-; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 4
-; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 5
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
-; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 8
-; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 9
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execnz .LBB2_6
-; GFX9-O0-NEXT: s_branch .LBB2_1
-; GFX9-O0-NEXT: .LBB2_7: ; %udiv-preheader
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(9)
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
-; GFX9-O0-NEXT: s_mov_b32 s6, 64
-; GFX9-O0-NEXT: v_sub_u32_e64 v12, s6, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], v12, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v24
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v23
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
-; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v4, s6
-; GFX9-O0-NEXT: v_sub_u32_e64 v5, v4, s6
-; GFX9-O0-NEXT: v_lshrrev_b64 v[23:24], v5, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[4:5]
-; GFX9-O0-NEXT: s_mov_b32 s6, 0
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v22
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v23
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v21
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], v4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v5
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: s_mov_b32 s8, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v12, v12, v15, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v4
-; GFX9-O0-NEXT: s_mov_b32 s8, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v14
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
-; GFX9-O0-NEXT: s_mov_b32 s5, s8
-; GFX9-O0-NEXT: s_mov_b32 s4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v12, vcc, v12, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v15, v17, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v15, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v15, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v13
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
-; GFX9-O0-NEXT: v_writelane_b32 v16, s4, 8
-; GFX9-O0-NEXT: v_writelane_b32 v16, s5, 9
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB2_6
-; GFX9-O0-NEXT: .LBB2_8: ; %udiv-bb1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
-; GFX9-O0-NEXT: s_mov_b32 s5, s6
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
-; GFX9-O0-NEXT: s_mov_b32 s4, s7
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: s_mov_b32 s8, s6
-; GFX9-O0-NEXT: s_mov_b32 s9, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s9
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f
-; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[5:6], v3, v[11:12]
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v6
-; GFX9-O0-NEXT: s_mov_b32 s4, 64
-; GFX9-O0-NEXT: v_sub_u32_e64 v14, s4, v3
-; GFX9-O0-NEXT: v_lshrrev_b64 v[14:15], v14, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v15
-; GFX9-O0-NEXT: v_or_b32_e64 v13, v13, v16
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v6
-; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v3, s4
-; GFX9-O0-NEXT: s_mov_b32 s10, 63
-; GFX9-O0-NEXT: v_sub_u32_e64 v4, s10, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[13:14], v4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[4:5]
-; GFX9-O0-NEXT: s_mov_b32 s10, 0
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[10:11], v3, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[10:11]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[10:11]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[7:8], v3, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, s9
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[4:5]
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v4, v7, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10
-; GFX9-O0-NEXT: v_or_b32_e64 v3, v3, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v1, v1, v2
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[1:2], s[6:7]
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec
-; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
-; GFX9-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 6
-; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 7
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execz .LBB2_5
-; GFX9-O0-NEXT: s_branch .LBB2_7
-; GFX9-O0-NEXT: .LBB2_9: ; %udiv-end
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 32
-; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v17
-; GFX9-O0-NEXT: v_mul_lo_u32 v3, v1, v0
-; GFX9-O0-NEXT: v_lshrrev_b64 v[17:18], s4, v[17:18]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v17
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mul_lo_u32 v2, v5, v2
-; GFX9-O0-NEXT: v_mad_u64_u32 v[17:18], s[6:7], v5, v0, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v18
-; GFX9-O0-NEXT: v_add3_u32 v2, v0, v2, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 killed $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: s_mov_b32 s5, 0
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v18
-; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v17
-; GFX9-O0-NEXT: v_or_b32_e64 v17, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v0
-; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v11
-; GFX9-O0-NEXT: v_mul_lo_u32 v3, v2, v6
-; GFX9-O0-NEXT: v_lshrrev_b64 v[11:12], s4, v[11:12]
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v19
-; GFX9-O0-NEXT: v_mul_lo_u32 v11, v11, v0
-; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v2, v0, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v20
-; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v11
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 killed $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v18
-; GFX9-O0-NEXT: v_add_co_u32_e64 v17, s[6:7], v11, v12
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v2
-; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v6, v1, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v12
-; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v19
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v6, v5, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v19
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v21, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
-; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v21, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v21
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v19
-; GFX9-O0-NEXT: v_or_b32_e64 v23, v11, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v6
-; GFX9-O0-NEXT: v_mad_u64_u32 v[11:12], s[6:7], v0, v5, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v21, v12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v24
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v22
-; GFX9-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v5, v20
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v19, s[6:7], v6, v19, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v6
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0xffffffff
-; GFX9-O0-NEXT: s_mov_b32 s8, s7
-; GFX9-O0-NEXT: v_and_b32_e64 v19, v19, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
-; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
-; GFX9-O0-NEXT: v_and_b32_e64 v21, v20, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v19
-; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v0, v1, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v23, v19
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v24
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v1
-; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v23
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 killed $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_or_b32_e64 v23, v1, v19
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v24
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v22
-; GFX9-O0-NEXT: v_add_co_u32_e64 v0, s[6:7], v0, v20
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v19, s[6:7], v1, v19, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v19
-; GFX9-O0-NEXT: v_lshrrev_b64 v[21:22], s4, v[0:1]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v22
-; GFX9-O0-NEXT: v_add_co_u32_e64 v19, s[6:7], v19, v20
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v5, v6, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v20
-; GFX9-O0-NEXT: v_add_co_u32_e64 v19, s[6:7], v5, v6
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v18
-; GFX9-O0-NEXT: v_add_co_u32_e64 v2, s[6:7], v2, v6
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v3, v5, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: v_lshlrev_b64 v[0:1], s4, v[0:1]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v11
-; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v1
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v14
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v12
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v11, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v6, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v2, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v10
-; GFX9-O0-NEXT: v_xor_b32_e64 v3, v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: v_xor_b32_e64 v9, v6, v5
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_xor_b32_e64 v3, v3, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
-; GFX9-O0-NEXT: v_xor_b32_e64 v0, v0, v8
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v10
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v7, vcc, v7, v8
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v5, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: ; kill: killed $vgpr4
-; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
- %div = srem i128 %lhs, %rhs
- ret i128 %div
-}
-
-define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
-; GFX9-LABEL: v_urem_i128_vv:
-; GFX9: ; %bb.0: ; %_udiv-special-cases
-; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_or_b32_e32 v9, v5, v7
-; GFX9-NEXT: v_or_b32_e32 v8, v4, v6
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GFX9-NEXT: v_or_b32_e32 v9, v1, v3
-; GFX9-NEXT: v_or_b32_e32 v8, v0, v2
-; GFX9-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
-; GFX9-NEXT: v_ffbh_u32_e32 v8, v6
-; GFX9-NEXT: v_add_u32_e32 v8, 32, v8
-; GFX9-NEXT: v_ffbh_u32_e32 v9, v7
-; GFX9-NEXT: v_min_u32_e32 v8, v8, v9
-; GFX9-NEXT: v_ffbh_u32_e32 v9, v4
-; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
-; GFX9-NEXT: v_ffbh_u32_e32 v10, v5
-; GFX9-NEXT: v_min_u32_e32 v9, v9, v10
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, 64, v9
-; GFX9-NEXT: v_addc_co_u32_e64 v10, s[6:7], 0, 0, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GFX9-NEXT: v_ffbh_u32_e32 v11, v3
-; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
-; GFX9-NEXT: v_ffbh_u32_e32 v9, v2
-; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
-; GFX9-NEXT: v_min_u32_e32 v9, v9, v11
-; GFX9-NEXT: v_ffbh_u32_e32 v11, v0
-; GFX9-NEXT: v_add_u32_e32 v11, 32, v11
-; GFX9-NEXT: v_ffbh_u32_e32 v12, v1
-; GFX9-NEXT: v_min_u32_e32 v11, v11, v12
-; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, 64, v11
-; GFX9-NEXT: v_addc_co_u32_e64 v12, s[6:7], 0, 0, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; GFX9-NEXT: s_mov_b64 s[6:7], 0x7f
-; GFX9-NEXT: v_cndmask_b32_e32 v9, v11, v9, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, 0, vcc
-; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v8, v9
-; GFX9-NEXT: v_subb_co_u32_e32 v9, vcc, v10, v12, vcc
-; GFX9-NEXT: v_mov_b32_e32 v11, 0
-; GFX9-NEXT: v_subbrev_co_u32_e32 v10, vcc, 0, v11, vcc
-; GFX9-NEXT: v_subbrev_co_u32_e32 v11, vcc, 0, v11, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9]
-; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
-; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; GFX9-NEXT: v_cndmask_b32_e32 v12, v13, v12, vcc
-; GFX9-NEXT: v_and_b32_e32 v12, 1, v12
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v12
-; GFX9-NEXT: v_xor_b32_e32 v12, 0x7f, v8
-; GFX9-NEXT: v_or_b32_e32 v13, v9, v11
-; GFX9-NEXT: v_or_b32_e32 v12, v12, v10
-; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
-; GFX9-NEXT: s_xor_b64 s[6:7], s[4:5], -1
-; GFX9-NEXT: v_cndmask_b32_e64 v15, v3, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v14, v2, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v13, v1, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v12, v0, 0, s[4:5]
-; GFX9-NEXT: s_and_b64 s[4:5], s[6:7], vcc
-; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
-; GFX9-NEXT: s_cbranch_execz .LBB3_6
-; GFX9-NEXT: ; %bb.1: ; %udiv-bb1
-; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, 1, v8
-; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v9, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v24, vcc, 0, v10, vcc
-; GFX9-NEXT: v_sub_u32_e32 v15, 0x7f, v8
-; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v11, vcc
-; GFX9-NEXT: v_sub_u32_e32 v13, 64, v15
-; GFX9-NEXT: v_or_b32_e32 v10, v23, v25
-; GFX9-NEXT: v_or_b32_e32 v9, v22, v24
-; GFX9-NEXT: v_lshlrev_b64 v[11:12], v15, v[2:3]
-; GFX9-NEXT: v_lshrrev_b64 v[13:14], v13, v[0:1]
-; GFX9-NEXT: v_sub_u32_e32 v8, 63, v8
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10]
-; GFX9-NEXT: v_lshlrev_b64 v[8:9], v8, v[0:1]
-; GFX9-NEXT: v_or_b32_e32 v10, v12, v14
-; GFX9-NEXT: v_or_b32_e32 v11, v11, v13
-; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v15
-; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v11, s[4:5]
-; GFX9-NEXT: v_lshlrev_b64 v[10:11], v15, v[0:1]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v15
-; GFX9-NEXT: v_mov_b32_e32 v12, 0
-; GFX9-NEXT: v_mov_b32_e32 v14, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v3, s[6:7]
-; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v2, s[6:7]
-; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, v11, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v13, 0
-; GFX9-NEXT: v_mov_b32_e32 v15, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, v10, s[4:5]
-; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execz .LBB3_5
-; GFX9-NEXT: ; %bb.2: ; %udiv-preheader
-; GFX9-NEXT: v_sub_u32_e32 v14, 64, v22
-; GFX9-NEXT: v_lshrrev_b64 v[12:13], v22, v[0:1]
-; GFX9-NEXT: v_lshlrev_b64 v[14:15], v14, v[2:3]
-; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v22
-; GFX9-NEXT: v_or_b32_e32 v14, v12, v14
-; GFX9-NEXT: v_subrev_u32_e32 v12, 64, v22
-; GFX9-NEXT: v_or_b32_e32 v15, v13, v15
-; GFX9-NEXT: v_lshrrev_b64 v[12:13], v12, v[2:3]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v22
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v13, v15, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v17, v13, v1, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v14, v12, v14, vcc
-; GFX9-NEXT: v_lshrrev_b64 v[12:13], v22, v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e64 v16, v14, v0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v19, 0, v13, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v18, 0, v12, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v26, vcc, -1, v4
-; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v5, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v28, vcc, -1, v6, vcc
-; GFX9-NEXT: v_mov_b32_e32 v20, 0
-; GFX9-NEXT: v_mov_b32_e32 v14, 0
-; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, -1, v7, vcc
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: v_mov_b32_e32 v21, 0
-; GFX9-NEXT: v_mov_b32_e32 v15, 0
-; GFX9-NEXT: v_mov_b32_e32 v13, 0
-; GFX9-NEXT: .LBB3_3: ; %udiv-do-while
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_lshrrev_b32_e32 v12, 31, v11
-; GFX9-NEXT: v_lshlrev_b64 v[10:11], 1, v[10:11]
-; GFX9-NEXT: v_lshlrev_b64 v[18:19], 1, v[18:19]
-; GFX9-NEXT: v_or_b32_e32 v10, v20, v10
-; GFX9-NEXT: v_lshrrev_b32_e32 v20, 31, v17
-; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[16:17]
-; GFX9-NEXT: v_or_b32_e32 v18, v18, v20
-; GFX9-NEXT: v_lshrrev_b32_e32 v20, 31, v9
-; GFX9-NEXT: v_or_b32_e32 v16, v16, v20
-; GFX9-NEXT: v_sub_co_u32_e32 v20, vcc, v26, v16
-; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v27, v17, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v28, v18, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v29, v19, vcc
-; GFX9-NEXT: v_ashrrev_i32_e32 v30, 31, v20
-; GFX9-NEXT: v_and_b32_e32 v20, v30, v4
-; GFX9-NEXT: v_sub_co_u32_e32 v16, vcc, v16, v20
-; GFX9-NEXT: v_and_b32_e32 v20, v30, v5
-; GFX9-NEXT: v_subb_co_u32_e32 v17, vcc, v17, v20, vcc
-; GFX9-NEXT: v_and_b32_e32 v20, v30, v6
-; GFX9-NEXT: v_subb_co_u32_e32 v18, vcc, v18, v20, vcc
-; GFX9-NEXT: v_and_b32_e32 v20, v30, v7
-; GFX9-NEXT: v_subb_co_u32_e32 v19, vcc, v19, v20, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, -1, v22
-; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v23, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v24, vcc, -1, v24, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v25, vcc
-; GFX9-NEXT: v_or_b32_e32 v11, v21, v11
-; GFX9-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
-; GFX9-NEXT: v_or_b32_e32 v20, v22, v24
-; GFX9-NEXT: v_or_b32_e32 v21, v23, v25
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[20:21]
-; GFX9-NEXT: v_or3_b32 v8, v8, v12, v14
-; GFX9-NEXT: v_and_b32_e32 v12, 1, v30
-; GFX9-NEXT: v_mov_b32_e32 v21, v13
-; GFX9-NEXT: v_or3_b32 v9, v9, 0, v15
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v20, v12
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: ; %bb.4: ; %Flow
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: .LBB3_5: ; %Flow2
-; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[10:11]
-; GFX9-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
-; GFX9-NEXT: v_lshrrev_b32_e32 v10, 31, v11
-; GFX9-NEXT: v_or3_b32 v15, v9, 0, v15
-; GFX9-NEXT: v_or3_b32 v14, v8, v10, v14
-; GFX9-NEXT: v_or_b32_e32 v13, v13, v17
-; GFX9-NEXT: v_or_b32_e32 v12, v12, v16
-; GFX9-NEXT: .LBB3_6: ; %Flow3
-; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
-; GFX9-NEXT: v_mul_lo_u32 v19, v12, v7
-; GFX9-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v4, v12, 0
-; GFX9-NEXT: v_mov_b32_e32 v17, 0
-; GFX9-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v12, v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v16, v8
-; GFX9-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v5, v12, v[16:17]
-; GFX9-NEXT: v_mul_lo_u32 v18, v13, v6
-; GFX9-NEXT: v_mul_lo_u32 v16, v15, v4
-; GFX9-NEXT: v_mov_b32_e32 v6, v12
-; GFX9-NEXT: v_mov_b32_e32 v12, v17
-; GFX9-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v4, v13, v[11:12]
-; GFX9-NEXT: v_add3_u32 v10, v10, v19, v18
-; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v14, v4, v[9:10]
-; GFX9-NEXT: v_mov_b32_e32 v4, v12
-; GFX9-NEXT: v_mul_lo_u32 v10, v14, v5
-; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, v6, v4
-; GFX9-NEXT: v_addc_co_u32_e64 v15, s[4:5], 0, 0, vcc
-; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v5, v13, v[14:15]
-; GFX9-NEXT: v_add3_u32 v6, v16, v9, v10
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v6, vcc
-; GFX9-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v7
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v5, vcc
-; GFX9-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-O0-LABEL: v_urem_i128_vv:
-; GFX9-O0: ; %bb.0: ; %_udiv-special-cases
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v0
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
-; GFX9-O0-NEXT: v_or_b32_e64 v3, v8, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O0-NEXT: v_or_b32_e64 v1, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0
-; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[1:2], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: v_or_b32_e64 v15, v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v9, v3, v1
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v15
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], s[6:7]
-; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v6
-; GFX9-O0-NEXT: s_mov_b32 s9, 32
-; GFX9-O0-NEXT: v_add_u32_e64 v6, v6, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v7, v7
-; GFX9-O0-NEXT: v_min_u32_e64 v6, v6, v7
-; GFX9-O0-NEXT: s_mov_b32 s8, 0
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v7
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v5
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v8, v8
-; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
-; GFX9-O0-NEXT: s_mov_b64 s[10:11], 64
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
-; GFX9-O0-NEXT: s_mov_b32 s12, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
-; GFX9-O0-NEXT: s_mov_b32 s14, s11
-; GFX9-O0-NEXT: v_add_co_u32_e64 v8, s[12:13], v8, s12
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, s14
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[12:13], v5, v9, s[12:13]
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: s_mov_b64 s[12:13], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[12:13], v[11:12], s[12:13]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v10, s[12:13]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, v6, v7, s[12:13]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v1
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v2
-; GFX9-O0-NEXT: v_min_u32_e64 v6, v5, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v3
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v11, v4
-; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v11
-; GFX9-O0-NEXT: ; implicit-def: $sgpr9
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
-; GFX9-O0-NEXT: s_mov_b32 s8, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
-; GFX9-O0-NEXT: s_mov_b32 s10, s11
-; GFX9-O0-NEXT: v_add_co_u32_e64 v11, s[8:9], v11, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s10
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[8:9], v5, v12, s[8:9]
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[8:9], v[13:14], s[8:9]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: s_mov_b32 s10, s6
-; GFX9-O0-NEXT: s_mov_b32 s11, s7
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v8
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v9, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s10
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v8, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, s11
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s11
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
-; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[14:15]
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v7, v10, s[8:9]
-; GFX9-O0-NEXT: v_and_b32_e64 v7, 1, v7
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[8:9], v7, 1
-; GFX9-O0-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], -1
-; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: s_mov_b32 s14, s13
-; GFX9-O0-NEXT: v_xor_b32_e64 v7, v7, s14
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
-; GFX9-O0-NEXT: v_xor_b32_e64 v5, v5, s12
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v7, v7, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[6:7], v[5:6], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v2, v5, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s10
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v4, v5, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s10
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5
-; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7]
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec
-; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 2
-; GFX9-O0-NEXT: v_writelane_b32 v0, s5, 3
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execz .LBB3_3
-; GFX9-O0-NEXT: s_branch .LBB3_8
-; GFX9-O0-NEXT: .LBB3_1: ; %Flow
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v0, 4
-; GFX9-O0-NEXT: v_readlane_b32 s5, v0, 5
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: ; %bb.2: ; %Flow
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(6)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB3_5
-; GFX9-O0-NEXT: .LBB3_3: ; %Flow2
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v4, 2
-; GFX9-O0-NEXT: v_readlane_b32 s5, v4, 3
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB3_9
-; GFX9-O0-NEXT: .LBB3_4: ; %udiv-loop-exit
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 1
-; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[0:1]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_lshlrev_b64 v[9:10], s4, v[9:10]
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
-; GFX9-O0-NEXT: v_or3_b32 v4, v4, v11, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v9
-; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB3_3
-; GFX9-O0-NEXT: .LBB3_5: ; %Flow1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v8, 6
-; GFX9-O0-NEXT: v_readlane_b32 s5, v8, 7
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB3_4
-; GFX9-O0-NEXT: .LBB3_6: ; %udiv-do-while
-; GFX9-O0-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s6, v16, 8
-; GFX9-O0-NEXT: v_readlane_b32 s7, v16, 9
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: s_waitcnt vmcnt(16)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[29:30], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v30
-; GFX9-O0-NEXT: s_mov_b32 s5, 1
-; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], s5, v[23:24]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v24
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v29
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
-; GFX9-O0-NEXT: v_or_b32_e64 v23, v5, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[2:3]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], s4, v[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v30
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v29
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v3, v4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s5, v[0:1]
-; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[6:7]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v30
-; GFX9-O0-NEXT: s_waitcnt vmcnt(10)
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v28
-; GFX9-O0-NEXT: v_or3_b32 v6, v6, v7, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v29
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v27
-; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: s_waitcnt vmcnt(8)
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v26
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v25
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
-; GFX9-O0-NEXT: v_ashrrev_i64 v[13:14], s4, v[11:12]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], 1
-; GFX9-O0-NEXT: s_mov_b32 s8, s5
-; GFX9-O0-NEXT: v_and_b32_e64 v12, v7, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_and_b32_e64 v14, v11, s4
-; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v23, v22
-; GFX9-O0-NEXT: v_and_b32_e64 v23, v7, v23
-; GFX9-O0-NEXT: v_and_b32_e64 v21, v11, v21
-; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v23, v20
-; GFX9-O0-NEXT: v_and_b32_e64 v7, v7, v23
-; GFX9-O0-NEXT: v_and_b32_e64 v23, v11, v19
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v24
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v22
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v20
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v10, vcc, v10, v19, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v4, vcc, v4, v11, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v5, v7, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v8
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 killed $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
-; GFX9-O0-NEXT: s_mov_b32 s5, s8
-; GFX9-O0-NEXT: s_mov_b32 s4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v20, vcc, v11, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v11, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v10, v11, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v10, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr20 killed $vgpr20 def $vgpr20_vgpr21 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v21, v9
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v21
-; GFX9-O0-NEXT: v_or_b32_e64 v19, v19, v22
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v17, v17, v18
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v19
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[12:13]
-; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
-; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 4
-; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 5
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
-; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 8
-; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 9
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execnz .LBB3_6
-; GFX9-O0-NEXT: s_branch .LBB3_1
-; GFX9-O0-NEXT: .LBB3_7: ; %udiv-preheader
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(9)
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
-; GFX9-O0-NEXT: s_mov_b32 s6, 64
-; GFX9-O0-NEXT: v_sub_u32_e64 v12, s6, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], v12, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v24
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v23
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
-; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v4, s6
-; GFX9-O0-NEXT: v_sub_u32_e64 v5, v4, s6
-; GFX9-O0-NEXT: v_lshrrev_b64 v[23:24], v5, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[4:5]
-; GFX9-O0-NEXT: s_mov_b32 s6, 0
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v22
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v23
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v21
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], v4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v5
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: s_mov_b32 s8, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v12, v12, v15, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v4
-; GFX9-O0-NEXT: s_mov_b32 s8, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v14
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
-; GFX9-O0-NEXT: s_mov_b32 s5, s8
-; GFX9-O0-NEXT: s_mov_b32 s4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v12, vcc, v12, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v15, v17, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v15, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v15, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v13
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
-; GFX9-O0-NEXT: v_writelane_b32 v16, s4, 8
-; GFX9-O0-NEXT: v_writelane_b32 v16, s5, 9
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB3_6
-; GFX9-O0-NEXT: .LBB3_8: ; %udiv-bb1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
-; GFX9-O0-NEXT: s_mov_b32 s5, s6
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
-; GFX9-O0-NEXT: s_mov_b32 s4, s7
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: s_mov_b32 s8, s6
-; GFX9-O0-NEXT: s_mov_b32 s9, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s9
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f
-; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[5:6], v3, v[11:12]
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v6
-; GFX9-O0-NEXT: s_mov_b32 s4, 64
-; GFX9-O0-NEXT: v_sub_u32_e64 v14, s4, v3
-; GFX9-O0-NEXT: v_lshrrev_b64 v[14:15], v14, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v15
-; GFX9-O0-NEXT: v_or_b32_e64 v13, v13, v16
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v6
-; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v3, s4
-; GFX9-O0-NEXT: s_mov_b32 s10, 63
-; GFX9-O0-NEXT: v_sub_u32_e64 v4, s10, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[13:14], v4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[4:5]
-; GFX9-O0-NEXT: s_mov_b32 s10, 0
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[10:11], v3, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[10:11]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[10:11]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[7:8], v3, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, s9
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[4:5]
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v4, v7, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10
-; GFX9-O0-NEXT: v_or_b32_e64 v3, v3, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v1, v1, v2
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[1:2], s[6:7]
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec
-; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
-; GFX9-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 6
-; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 7
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execz .LBB3_5
-; GFX9-O0-NEXT: s_branch .LBB3_7
-; GFX9-O0-NEXT: .LBB3_9: ; %udiv-end
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 32
-; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v13
-; GFX9-O0-NEXT: v_mul_lo_u32 v5, v6, v2
-; GFX9-O0-NEXT: v_lshrrev_b64 v[13:14], s4, v[13:14]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mul_lo_u32 v3, v7, v3
-; GFX9-O0-NEXT: v_mad_u64_u32 v[13:14], s[6:7], v7, v2, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
-; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: v_lshlrev_b64 v[17:18], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v18
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 killed $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: s_mov_b32 s5, 0
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
-; GFX9-O0-NEXT: v_or_b32_e64 v13, v3, v5
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
-; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[15:16]
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v11
-; GFX9-O0-NEXT: v_mul_lo_u32 v3, v2, v8
-; GFX9-O0-NEXT: v_lshrrev_b64 v[11:12], s4, v[11:12]
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v15
-; GFX9-O0-NEXT: v_mul_lo_u32 v11, v11, v5
-; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v2, v5, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v16
-; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v11
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 killed $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v16
-; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v14
-; GFX9-O0-NEXT: v_add_co_u32_e64 v13, s[6:7], v11, v12
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
-; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v8, v6, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v12
-; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v16
-; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v15
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v8, v7, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
-; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v16
-; GFX9-O0-NEXT: v_or_b32_e64 v8, v8, v17
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v15
-; GFX9-O0-NEXT: v_or_b32_e64 v19, v11, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v8
-; GFX9-O0-NEXT: v_mad_u64_u32 v[11:12], s[6:7], v5, v7, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v19
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v18
-; GFX9-O0-NEXT: v_add_co_u32_e64 v7, s[6:7], v7, v16
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v15, s[6:7], v8, v15, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v8
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0xffffffff
-; GFX9-O0-NEXT: s_mov_b32 s8, s7
-; GFX9-O0-NEXT: v_and_b32_e64 v15, v15, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v7
-; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
-; GFX9-O0-NEXT: v_and_b32_e64 v17, v16, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
-; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v5, v6, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v15
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v6
-; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v16
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 killed $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_or_b32_e64 v19, v6, v15
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v19
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v18
-; GFX9-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v5, v16
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v15, s[6:7], v6, v15, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v15
-; GFX9-O0-NEXT: v_lshrrev_b64 v[17:18], s4, v[5:6]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
-; GFX9-O0-NEXT: v_add_co_u32_e64 v15, s[6:7], v15, v16
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v7, v8, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
-; GFX9-O0-NEXT: v_add_co_u32_e64 v15, s[6:7], v7, v8
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
-; GFX9-O0-NEXT: v_add_co_u32_e64 v2, s[6:7], v2, v8
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v3, v7, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v7
-; GFX9-O0-NEXT: v_lshlrev_b64 v[6:7], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v10
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v7, vcc, v7, v8
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v5, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: ; kill: killed $vgpr4
-; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
- %div = urem i128 %lhs, %rhs
- ret i128 %div
-}
-
define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
; GFX9-LABEL: v_sdiv_i128_v_pow2k:
; GFX9: ; %bb.0:
@@ -5246,106 +2392,6 @@ define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
ret i128 %div
}
-define i128 @v_srem_i128_v_pow2k(i128 %lhs) {
-; GFX9-LABEL: v_srem_i128_v_pow2k:
-; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v3
-; GFX9-NEXT: v_mov_b32_e32 v5, v4
-; GFX9-NEXT: v_lshrrev_b64 v[4:5], 31, v[4:5]
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v0, v4
-; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v1, v5, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v2, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v3, vcc
-; GFX9-NEXT: v_and_b32_e32 v4, -2, v4
-; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 0, v0
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v4, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v5, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v6, vcc
-; GFX9-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-O0-LABEL: v_srem_i128_v_pow2k:
-; GFX9-O0: ; %bb.0:
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_ashrrev_i64 v[6:7], s4, v[6:7]
-; GFX9-O0-NEXT: s_mov_b32 s4, 31
-; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s4, v[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v7
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: s_mov_b32 s5, s6
-; GFX9-O0-NEXT: s_mov_b32 s4, s7
-; GFX9-O0-NEXT: v_add_co_u32_e32 v6, vcc, v5, v4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v4, vcc, v0, v2, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v3, v2, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
-; GFX9-O0-NEXT: s_mov_b32 s6, -2
-; GFX9-O0-NEXT: s_mov_b32 s4, 0
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
-; GFX9-O0-NEXT: s_mov_b32 s5, s6
-; GFX9-O0-NEXT: s_mov_b32 s6, s5
-; GFX9-O0-NEXT: v_and_b32_e64 v4, v4, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_and_b32_e64 v9, v6, s4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v9
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v7
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: s_mov_b32 s4, 32
-; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: v_lshrrev_b64 v[3:4], s4, v[3:4]
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr3_vgpr4 killed $exec
-; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
- %div = srem i128 %lhs, 8589934592
- ret i128 %div
-}
-
define i128 @v_udiv_i128_v_pow2k(i128 %lhs) {
; GFX9-LABEL: v_udiv_i128_v_pow2k:
; GFX9: ; %bb.0:
@@ -5392,55 +2438,6 @@ define i128 @v_udiv_i128_v_pow2k(i128 %lhs) {
ret i128 %div
}
-define i128 @v_urem_i128_v_pow2k(i128 %lhs) {
-; GFX9-LABEL: v_urem_i128_v_pow2k:
-; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_and_b32_e32 v1, 1, v1
-; GFX9-NEXT: v_mov_b32_e32 v2, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, 0
-; GFX9-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-O0-LABEL: v_urem_i128_v_pow2k:
-; GFX9-O0: ; %bb.0:
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr1 killed $exec
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: s_mov_b32 s6, 1
-; GFX9-O0-NEXT: s_mov_b32 s4, -1
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
-; GFX9-O0-NEXT: s_mov_b32 s5, s6
-; GFX9-O0-NEXT: s_mov_b32 s6, s5
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: v_and_b32_e64 v3, v2, s6
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_and_b32_e64 v1, v0, s4
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: s_mov_b32 s4, 32
-; GFX9-O0-NEXT: v_lshrrev_b64 v[1:2], s4, v[1:2]
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
- %div = urem i128 %lhs, 8589934592
- ret i128 %div
-}
-
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX9-SDAG: {{.*}}
; GFX9-SDAG-O0: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index 687d84565692..850be72f06c7 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -893,6 +893,7 @@ define void @store_load_vindex_foo(i32 %idx) {
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b32 v0, v2, s32 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b32 v0, v1, s32 scope:SCOPE_SYS
@@ -964,6 +965,7 @@ define void @store_load_vindex_foo(i32 %idx) {
; GFX12-PAL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX12-PAL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-PAL-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b32 v0, v2, s32 scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_load_b32 v0, v1, s32 scope:SCOPE_SYS
@@ -2137,6 +2139,7 @@ define void @store_load_vindex_small_offset_foo(i32 %idx) {
; GFX12-NEXT: scratch_load_b32 v3, off, s32 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b32 v0, v2, s32 offset:256 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b32 v0, v1, s32 offset:256 scope:SCOPE_SYS
@@ -2221,6 +2224,7 @@ define void @store_load_vindex_small_offset_foo(i32 %idx) {
; GFX12-PAL-NEXT: scratch_load_b32 v3, off, s32 scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_loadcnt 0x0
; GFX12-PAL-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b32 v0, v2, s32 offset:256 scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_load_b32 v0, v1, s32 offset:256 scope:SCOPE_SYS
@@ -3382,6 +3386,7 @@ define void @store_load_vindex_large_offset_foo(i32 %idx) {
; GFX12-NEXT: scratch_load_b32 v3, off, s32 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b32 v0, v2, s32 offset:16384 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b32 v0, v1, s32 offset:16384 scope:SCOPE_SYS
@@ -3468,6 +3473,7 @@ define void @store_load_vindex_large_offset_foo(i32 %idx) {
; GFX12-PAL-NEXT: scratch_load_b32 v3, off, s32 scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_loadcnt 0x0
; GFX12-PAL-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b32 v0, v2, s32 offset:16384 scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_load_b32 v0, v1, s32 offset:16384 scope:SCOPE_SYS
@@ -3714,6 +3720,7 @@ define void @store_load_large_imm_offset_foo() {
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b32 off, v0, s32 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b32 off, v1, s32 offset:16000 scope:SCOPE_SYS
@@ -3789,6 +3796,7 @@ define void @store_load_large_imm_offset_foo() {
; GFX12-PAL-NEXT: s_wait_bvhcnt 0x0
; GFX12-PAL-NEXT: s_wait_kmcnt 0x0
; GFX12-PAL-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b32 off, v0, s32 scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b32 off, v1, s32 offset:16000 scope:SCOPE_SYS
@@ -3998,6 +4006,7 @@ define void @store_load_i64_aligned(ptr addrspace(5) nocapture %arg) {
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_mov_b32 v2, 0
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b64 v0, v[1:2], off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b64 v[0:1], v0, off scope:SCOPE_SYS
@@ -4055,6 +4064,7 @@ define void @store_load_i64_aligned(ptr addrspace(5) nocapture %arg) {
; GFX12-PAL-NEXT: s_wait_bvhcnt 0x0
; GFX12-PAL-NEXT: s_wait_kmcnt 0x0
; GFX12-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_mov_b32 v2, 0
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b64 v0, v[1:2], off scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_load_b64 v[0:1], v0, off scope:SCOPE_SYS
@@ -4107,6 +4117,7 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_mov_b32 v2, 0
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b64 v0, v[1:2], off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b64 v[0:1], v0, off scope:SCOPE_SYS
@@ -4164,6 +4175,7 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX12-PAL-NEXT: s_wait_bvhcnt 0x0
; GFX12-PAL-NEXT: s_wait_kmcnt 0x0
; GFX12-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_mov_b32 v2, 0
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b64 v0, v[1:2], off scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_load_b64 v[0:1], v0, off scope:SCOPE_SYS
@@ -4220,6 +4232,7 @@ define void @store_load_v3i32_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
; GFX12-NEXT: v_mov_b32_e32 v3, 3
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b96 v0, v[1:3], off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b96 v[0:2], v0, off scope:SCOPE_SYS
@@ -4282,6 +4295,7 @@ define void @store_load_v3i32_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX12-PAL-NEXT: s_wait_kmcnt 0x0
; GFX12-PAL-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
; GFX12-PAL-NEXT: v_mov_b32_e32 v3, 3
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b96 v0, v[1:3], off scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_load_b96 v[0:2], v0, off scope:SCOPE_SYS
@@ -4340,6 +4354,7 @@ define void @store_load_v4i32_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
; GFX12-NEXT: v_dual_mov_b32 v3, 3 :: v_dual_mov_b32 v4, 4
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b128 v0, v[1:4], off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_b128 v[0:3], v0, off scope:SCOPE_SYS
@@ -4405,6 +4420,7 @@ define void @store_load_v4i32_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX12-PAL-NEXT: s_wait_kmcnt 0x0
; GFX12-PAL-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
; GFX12-PAL-NEXT: v_dual_mov_b32 v3, 3 :: v_dual_mov_b32 v4, 4
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b128 v0, v[1:4], off scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_load_b128 v[0:3], v0, off scope:SCOPE_SYS
@@ -4456,6 +4472,7 @@ define void @store_load_i32_negative_unaligned(ptr addrspace(5) nocapture %arg)
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v1, 1
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b8 v0, v1, off offset:-1 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_u8 v0, v0, off offset:-1 scope:SCOPE_SYS
@@ -4523,6 +4540,7 @@ define void @store_load_i32_negative_unaligned(ptr addrspace(5) nocapture %arg)
; GFX12-PAL-NEXT: s_wait_bvhcnt 0x0
; GFX12-PAL-NEXT: s_wait_kmcnt 0x0
; GFX12-PAL-NEXT: v_mov_b32_e32 v1, 1
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b8 v0, v1, off offset:-1 scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_load_u8 v0, v0, off offset:-1 scope:SCOPE_SYS
@@ -4576,6 +4594,7 @@ define void @store_load_i32_large_negative_unaligned(ptr addrspace(5) nocapture
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v1, 1
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_store_b8 v0, v1, off offset:-4225 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: scratch_load_u8 v0, v0, off offset:-4225 scope:SCOPE_SYS
@@ -4644,6 +4663,7 @@ define void @store_load_i32_large_negative_unaligned(ptr addrspace(5) nocapture
; GFX12-PAL-NEXT: s_wait_bvhcnt 0x0
; GFX12-PAL-NEXT: s_wait_kmcnt 0x0
; GFX12-PAL-NEXT: v_mov_b32_e32 v1, 1
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_store_b8 v0, v1, off offset:-4225 scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
; GFX12-PAL-NEXT: scratch_load_u8 v0, v0, off offset:-4225 scope:SCOPE_SYS
diff --git a/llvm/test/CodeGen/AMDGPU/fmaxnum.ll b/llvm/test/CodeGen/AMDGPU/fmaxnum.ll
index 09898f1442fb..38640a18b5ae 100644
--- a/llvm/test/CodeGen/AMDGPU/fmaxnum.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmaxnum.ll
@@ -152,7 +152,7 @@ define amdgpu_kernel void @constant_fold_fmax_f32_p0_n0(ptr addrspace(1) %out) #
; GCN-LABEL: {{^}}constant_fold_fmax_f32_n0_p0:
; GCN-NOT: v_max_f32_e32
-; GCN: v_bfrev_b32_e32 [[REG:v[0-9]+]], 1{{$}}
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword [[REG]]
define amdgpu_kernel void @constant_fold_fmax_f32_n0_p0(ptr addrspace(1) %out) #0 {
%val = call float @llvm.maxnum.f32(float -0.0, float 0.0)
diff --git a/llvm/test/CodeGen/AMDGPU/fminnum.ll b/llvm/test/CodeGen/AMDGPU/fminnum.ll
index 844d26a6225b..65b311845a6b 100644
--- a/llvm/test/CodeGen/AMDGPU/fminnum.ll
+++ b/llvm/test/CodeGen/AMDGPU/fminnum.ll
@@ -150,7 +150,7 @@ define amdgpu_kernel void @constant_fold_fmin_f32_p0_p0(ptr addrspace(1) %out) #
; GCN-LABEL: {{^}}constant_fold_fmin_f32_p0_n0:
; GCN-NOT: v_min_f32_e32
-; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0
+; GCN: v_bfrev_b32_e32 [[REG:v[0-9]+]], 1{{$}}
; GCN: buffer_store_dword [[REG]]
define amdgpu_kernel void @constant_fold_fmin_f32_p0_n0(ptr addrspace(1) %out) #0 {
%val = call float @llvm.minnum.f32(float 0.0, float -0.0)
diff --git a/llvm/test/CodeGen/AMDGPU/iglp.opt.reentry.ll b/llvm/test/CodeGen/AMDGPU/iglp.opt.reentry.ll
new file mode 100644
index 000000000000..1113acb3c030
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/iglp.opt.reentry.ll
@@ -0,0 +1,15 @@
+; RUN: llc -mtriple=amdgcn -mcpu=gfx908 -O3 < %s | FileCheck %s
+
+; Test should not result in build failure
+; CHECK-LABEL: shouldNotReApply
+
+define amdgpu_kernel void @shouldNotReApply() {
+entry:
+ tail call void @llvm.amdgcn.sched.barrier(i32 0)
+ store <4 x i32> zeroinitializer, ptr addrspace(3) null, align 2147483648
+ tail call void @llvm.amdgcn.sched.group.barrier(i32 0, i32 0, i32 0)
+ tail call void @llvm.amdgcn.sched.barrier(i32 0)
+ store i32 0, ptr addrspace(5) null, align 2147483648
+ tail call void @llvm.amdgcn.sched.group.barrier(i32 0, i32 0, i32 0)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.addrspacecast.nonnull.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.addrspacecast.nonnull.ll
new file mode 100644
index 000000000000..265353675b34
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.addrspacecast.nonnull.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,DAGISEL-ASM
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,GISEL-ASM
+
+define void @local_to_flat(ptr addrspace(3) %ptr) {
+; ASM-LABEL: local_to_flat:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_mov_b64 s[4:5], src_shared_base
+; ASM-NEXT: v_mov_b32_e32 v1, s5
+; ASM-NEXT: v_mov_b32_e32 v2, 7
+; ASM-NEXT: flat_store_dword v[0:1], v2
+; ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %1 = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p3(ptr addrspace(3) %ptr)
+ store volatile i32 7, ptr %1, align 4
+ ret void
+}
+
+define void @private_to_flat(ptr addrspace(5) %ptr) {
+; ASM-LABEL: private_to_flat:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT: v_mov_b32_e32 v1, s5
+; ASM-NEXT: v_mov_b32_e32 v2, 7
+; ASM-NEXT: flat_store_dword v[0:1], v2
+; ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %1 = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %ptr)
+ store volatile i32 7, ptr %1, align 4
+ ret void
+}
+
+define void @flat_to_local(ptr %ptr) {
+; ASM-LABEL: flat_to_local:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: v_mov_b32_e32 v1, 7
+; ASM-NEXT: ds_write_b32 v0, v1
+; ASM-NEXT: s_waitcnt lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %1 = call ptr addrspace(3) @llvm.amdgcn.addrspacecast.nonnull.p3.p0(ptr %ptr)
+ store volatile i32 7, ptr addrspace(3) %1, align 4
+ ret void
+}
+
+define void @flat_to_private(ptr %ptr) {
+; ASM-LABEL: flat_to_private:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: v_mov_b32_e32 v1, 7
+; ASM-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; ASM-NEXT: s_waitcnt vmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %1 = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr %ptr)
+ store volatile i32 7, ptr addrspace(5) %1, align 4
+ ret void
+}
+
+declare ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p3(ptr addrspace(3))
+declare ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5))
+declare ptr addrspace(3) @llvm.amdgcn.addrspacecast.nonnull.p3.p0(ptr)
+declare ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr)
+
+declare <4 x ptr> @llvm.amdgcn.addrspacecast.nonnull.v4p0.v4p3(<4 x ptr addrspace(3)>)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; DAGISEL-ASM: {{.*}}
+; GISEL-ASM: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/lower-work-group-id-intrinsics-hsa.ll b/llvm/test/CodeGen/AMDGPU/lower-work-group-id-intrinsics-hsa.ll
index afa914c8375f..9547f08d3eba 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-work-group-id-intrinsics-hsa.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-work-group-id-intrinsics-hsa.ll
@@ -269,6 +269,7 @@ define void @workgroup_ids_device_func(ptr addrspace(1) %outx, ptr addrspace(1)
; GFX12-NEXT: v_dual_mov_b32 v6, ttmp9 :: v_dual_mov_b32 v7, s0
; GFX12-NEXT: s_lshr_b32 s1, ttmp7, 16
; GFX12-NEXT: v_mov_b32_e32 v8, s1
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_store_b32 v[0:1], v6, off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_store_b32 v[2:3], v7, off scope:SCOPE_SYS
diff --git a/llvm/test/CodeGen/AMDGPU/omod.ll b/llvm/test/CodeGen/AMDGPU/omod.ll
index fa1ca66ef415..769d035858ca 100644
--- a/llvm/test/CodeGen/AMDGPU/omod.ll
+++ b/llvm/test/CodeGen/AMDGPU/omod.ll
@@ -651,8 +651,8 @@ define amdgpu_ps void @v_omod_mul4_multi_use_f32(float %a) #0 {
; GFX12-NEXT: v_add_f32_e32 v0, 1.0, v0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_mul_f32_e32 v1, 4.0, v0
-; GFX12-NEXT: s_clause 0x1
; GFX12-NEXT: global_store_b32 v[0:1], v1, off
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_store_b32 v[0:1], v0, off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_nop 0
diff --git a/llvm/test/CodeGen/AMDGPU/rem_i128.ll b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
new file mode 100644
index 000000000000..6ba66ccf7186
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
@@ -0,0 +1,3014 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG %s
+; RUN: llc -O0 -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0,GFX9-SDAG-O0 %s
+
+; FIXME: GlobalISel missing the power-of-2 cases in legalization. https://github.com/llvm/llvm-project/issues/80671
+; xUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9,GFX9 %s
+; xUN: llc -O0 -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0,GFX9-O0 %s}}
+
+define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
+; GFX9-LABEL: v_srem_i128_vv:
+; GFX9: ; %bb.0: ; %_udiv-special-cases
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v20, 31, v3
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v20
+; GFX9-NEXT: v_xor_b32_e32 v10, v2, v20
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v20
+; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v20
+; GFX9-NEXT: v_xor_b32_e32 v9, v3, v20
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v20, vcc
+; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v7
+; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v10, v20, vcc
+; GFX9-NEXT: v_xor_b32_e32 v4, v4, v8
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v9, v20, vcc
+; GFX9-NEXT: v_xor_b32_e32 v5, v5, v8
+; GFX9-NEXT: v_sub_co_u32_e32 v23, vcc, v4, v8
+; GFX9-NEXT: v_xor_b32_e32 v6, v6, v8
+; GFX9-NEXT: v_subb_co_u32_e32 v21, vcc, v5, v8, vcc
+; GFX9-NEXT: v_xor_b32_e32 v7, v7, v8
+; GFX9-NEXT: v_subb_co_u32_e32 v4, vcc, v6, v8, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v8, vcc
+; GFX9-NEXT: v_or_b32_e32 v7, v21, v5
+; GFX9-NEXT: v_or_b32_e32 v6, v23, v4
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GFX9-NEXT: v_or_b32_e32 v7, v3, v1
+; GFX9-NEXT: v_or_b32_e32 v6, v2, v0
+; GFX9-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
+; GFX9-NEXT: v_ffbh_u32_e32 v6, v4
+; GFX9-NEXT: v_add_u32_e32 v6, 32, v6
+; GFX9-NEXT: v_ffbh_u32_e32 v7, v5
+; GFX9-NEXT: v_min_u32_e32 v6, v6, v7
+; GFX9-NEXT: v_ffbh_u32_e32 v7, v23
+; GFX9-NEXT: v_add_u32_e32 v7, 32, v7
+; GFX9-NEXT: v_ffbh_u32_e32 v8, v21
+; GFX9-NEXT: v_min_u32_e32 v7, v7, v8
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 64, v7
+; GFX9-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, 0, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GFX9-NEXT: v_ffbh_u32_e32 v9, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; GFX9-NEXT: v_ffbh_u32_e32 v7, v0
+; GFX9-NEXT: v_add_u32_e32 v7, 32, v7
+; GFX9-NEXT: v_min_u32_e32 v7, v7, v9
+; GFX9-NEXT: v_ffbh_u32_e32 v9, v2
+; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
+; GFX9-NEXT: v_ffbh_u32_e32 v10, v3
+; GFX9-NEXT: v_min_u32_e32 v9, v9, v10
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, 0, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, 64, v9
+; GFX9-NEXT: v_addc_co_u32_e64 v10, s[6:7], 0, 0, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GFX9-NEXT: s_mov_b64 s[6:7], 0x7f
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v7, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc
+; GFX9-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v7
+; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v8, v10, vcc
+; GFX9-NEXT: v_mov_b32_e32 v9, 0
+; GFX9-NEXT: v_subbrev_co_u32_e32 v8, vcc, 0, v9, vcc
+; GFX9-NEXT: v_subbrev_co_u32_e32 v9, vcc, 0, v9, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[6:7]
+; GFX9-NEXT: v_or_b32_e32 v13, v7, v9
+; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GFX9-NEXT: v_mov_b32_e32 v22, v20
+; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v10, vcc
+; GFX9-NEXT: v_and_b32_e32 v10, 1, v10
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v10
+; GFX9-NEXT: v_xor_b32_e32 v10, 0x7f, v6
+; GFX9-NEXT: v_or_b32_e32 v12, v10, v8
+; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
+; GFX9-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GFX9-NEXT: v_cndmask_b32_e64 v11, v1, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v0, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v3, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[4:5]
+; GFX9-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GFX9-NEXT: s_cbranch_execz .LBB0_6
+; GFX9-NEXT: ; %bb.1: ; %udiv-bb1
+; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, 1, v6
+; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v7, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v26, vcc, 0, v8, vcc
+; GFX9-NEXT: v_sub_u32_e32 v13, 0x7f, v6
+; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, 0, v9, vcc
+; GFX9-NEXT: v_sub_u32_e32 v11, 64, v13
+; GFX9-NEXT: v_or_b32_e32 v8, v25, v27
+; GFX9-NEXT: v_or_b32_e32 v7, v24, v26
+; GFX9-NEXT: v_lshlrev_b64 v[9:10], v13, v[0:1]
+; GFX9-NEXT: v_lshrrev_b64 v[11:12], v11, v[2:3]
+; GFX9-NEXT: v_sub_u32_e32 v6, 63, v6
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; GFX9-NEXT: v_lshlrev_b64 v[6:7], v6, v[2:3]
+; GFX9-NEXT: v_or_b32_e32 v8, v10, v12
+; GFX9-NEXT: v_or_b32_e32 v9, v9, v11
+; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v13
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v13
+; GFX9-NEXT: v_lshlrev_b64 v[12:13], v13, v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0
+; GFX9-NEXT: v_mov_b32_e32 v10, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v1, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v0, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, v13, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v9, 0
+; GFX9-NEXT: v_mov_b32_e32 v11, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, v12, s[4:5]
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execz .LBB0_5
+; GFX9-NEXT: ; %bb.2: ; %udiv-preheader
+; GFX9-NEXT: v_sub_u32_e32 v10, 64, v24
+; GFX9-NEXT: v_lshrrev_b64 v[8:9], v24, v[2:3]
+; GFX9-NEXT: v_lshlrev_b64 v[10:11], v10, v[0:1]
+; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v24
+; GFX9-NEXT: v_or_b32_e32 v10, v8, v10
+; GFX9-NEXT: v_subrev_u32_e32 v8, 64, v24
+; GFX9-NEXT: v_or_b32_e32 v11, v9, v11
+; GFX9-NEXT: v_lshrrev_b64 v[8:9], v8, v[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v24
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v15, v9, v3, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v8, v10, vcc
+; GFX9-NEXT: v_lshrrev_b64 v[8:9], v24, v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v14, v10, v2, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e32 v17, 0, v9, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v16, 0, v8, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v28, vcc, -1, v23
+; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, -1, v21, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v30, vcc, -1, v4, vcc
+; GFX9-NEXT: v_mov_b32_e32 v18, 0
+; GFX9-NEXT: v_mov_b32_e32 v10, 0
+; GFX9-NEXT: v_addc_co_u32_e32 v31, vcc, -1, v5, vcc
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v19, 0
+; GFX9-NEXT: v_mov_b32_e32 v11, 0
+; GFX9-NEXT: v_mov_b32_e32 v9, 0
+; GFX9-NEXT: .LBB0_3: ; %udiv-do-while
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_lshrrev_b32_e32 v32, 31, v15
+; GFX9-NEXT: v_lshlrev_b64 v[14:15], 1, v[14:15]
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 31, v7
+; GFX9-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 31, v13
+; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[16:17]
+; GFX9-NEXT: v_or_b32_e32 v14, v14, v33
+; GFX9-NEXT: v_or3_b32 v6, v6, v8, v10
+; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v28, v14
+; GFX9-NEXT: v_or_b32_e32 v16, v16, v32
+; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v29, v15, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v30, v16, vcc
+; GFX9-NEXT: v_lshlrev_b64 v[12:13], 1, v[12:13]
+; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v31, v17, vcc
+; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v8
+; GFX9-NEXT: v_or_b32_e32 v12, v18, v12
+; GFX9-NEXT: v_and_b32_e32 v18, v8, v23
+; GFX9-NEXT: v_or_b32_e32 v13, v19, v13
+; GFX9-NEXT: v_and_b32_e32 v19, v8, v21
+; GFX9-NEXT: v_sub_co_u32_e32 v14, vcc, v14, v18
+; GFX9-NEXT: v_and_b32_e32 v32, v8, v4
+; GFX9-NEXT: v_subb_co_u32_e32 v15, vcc, v15, v19, vcc
+; GFX9-NEXT: v_and_b32_e32 v33, v8, v5
+; GFX9-NEXT: v_subb_co_u32_e32 v16, vcc, v16, v32, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v17, vcc, v17, v33, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, -1, v24
+; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v25, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v26, vcc, -1, v26, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v27, vcc
+; GFX9-NEXT: v_or_b32_e32 v18, v24, v26
+; GFX9-NEXT: v_or_b32_e32 v19, v25, v27
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX9-NEXT: v_mov_b32_e32 v19, v9
+; GFX9-NEXT: v_or3_b32 v7, v7, 0, v11
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v18, v8
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB0_3
+; GFX9-NEXT: ; %bb.4: ; %Flow
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT: .LBB0_5: ; %Flow2
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_lshlrev_b64 v[14:15], 1, v[12:13]
+; GFX9-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
+; GFX9-NEXT: v_lshrrev_b32_e32 v12, 31, v13
+; GFX9-NEXT: v_or3_b32 v11, v7, 0, v11
+; GFX9-NEXT: v_or3_b32 v12, v6, v12, v10
+; GFX9-NEXT: v_or_b32_e32 v10, v9, v15
+; GFX9-NEXT: v_or_b32_e32 v13, v8, v14
+; GFX9-NEXT: .LBB0_6: ; %Flow3
+; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX9-NEXT: v_mul_lo_u32 v16, v13, v5
+; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v23, v13, 0
+; GFX9-NEXT: v_mov_b32_e32 v15, 0
+; GFX9-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v13, v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v21, v13, v[14:15]
+; GFX9-NEXT: v_mul_lo_u32 v9, v10, v4
+; GFX9-NEXT: v_mul_lo_u32 v11, v11, v23
+; GFX9-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-NEXT: v_mov_b32_e32 v14, v15
+; GFX9-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v23, v10, v[13:14]
+; GFX9-NEXT: v_add3_u32 v8, v8, v16, v9
+; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v12, v23, v[7:8]
+; GFX9-NEXT: v_mov_b32_e32 v8, v14
+; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v4, v8
+; GFX9-NEXT: v_addc_co_u32_e64 v9, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mul_lo_u32 v12, v12, v21
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v21, v10, v[8:9]
+; GFX9-NEXT: v_add3_u32 v4, v11, v7, v12
+; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v8, v6
+; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v9, v4, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, v13
+; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v5
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v7, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v4, vcc
+; GFX9-NEXT: v_xor_b32_e32 v5, v0, v20
+; GFX9-NEXT: v_xor_b32_e32 v0, v2, v20
+; GFX9-NEXT: v_xor_b32_e32 v4, v1, v22
+; GFX9-NEXT: v_xor_b32_e32 v1, v3, v22
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v20
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v22, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v5, v20, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v4, v22, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-O0-LABEL: v_srem_i128_vv:
+; GFX9-O0: ; %bb.0: ; %_udiv-special-cases
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
+; GFX9-O0-NEXT: s_waitcnt vmcnt(1)
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v1
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
+; GFX9-O0-NEXT: v_ashrrev_i64 v[12:13], s4, v[6:7]
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v13
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
+; GFX9-O0-NEXT: v_ashrrev_i64 v[6:7], s4, v[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v13
+; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-O0-NEXT: v_xor_b32_e64 v13, v11, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v10
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_xor_b32_e64 v15, v4, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_xor_b32_e64 v7, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v4
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_xor_b32_e64 v2, v2, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v16
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v14
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v9, vcc, v9, v12
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v10, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v13, vcc, v11, v12, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v10, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v8
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v1, vcc, v1, v6
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v5, v6, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-O0-NEXT: v_or_b32_e64 v3, v8, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
+; GFX9-O0-NEXT: v_or_b32_e64 v1, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0
+; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[1:2], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: v_or_b32_e64 v15, v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v9, v3, v1
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v15
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], s[6:7]
+; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-O0-NEXT: s_mov_b32 s9, 32
+; GFX9-O0-NEXT: v_add_u32_e64 v6, v6, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-O0-NEXT: v_min_u32_e64 v6, v6, v7
+; GFX9-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v7
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v8, v8
+; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-O0-NEXT: s_mov_b64 s[10:11], 64
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
+; GFX9-O0-NEXT: s_mov_b32 s12, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
+; GFX9-O0-NEXT: s_mov_b32 s14, s11
+; GFX9-O0-NEXT: v_add_co_u32_e64 v8, s[12:13], v8, s12
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, s14
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[12:13], v5, v9, s[12:13]
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[12:13], v[11:12], s[6:7]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v10, s[12:13]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, v6, v7, s[12:13]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v1
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v2
+; GFX9-O0-NEXT: v_min_u32_e64 v6, v5, v6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v3
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v11, v4
+; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v11
+; GFX9-O0-NEXT: ; implicit-def: $sgpr9
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
+; GFX9-O0-NEXT: s_mov_b32 s8, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
+; GFX9-O0-NEXT: s_mov_b32 s10, s11
+; GFX9-O0-NEXT: v_add_co_u32_e64 v11, s[8:9], v11, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s10
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[8:9], v5, v12, s[8:9]
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[8:9], v[13:14], s[6:7]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-O0-NEXT: s_mov_b32 s10, s6
+; GFX9-O0-NEXT: s_mov_b32 s11, s7
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v8
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v9, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v8, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, s11
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s11
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
+; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
+; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
+; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[14:15]
+; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v7, v10, s[8:9]
+; GFX9-O0-NEXT: v_and_b32_e64 v7, 1, v7
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[8:9], v7, 1
+; GFX9-O0-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: s_mov_b32 s14, s13
+; GFX9-O0-NEXT: v_xor_b32_e64 v7, v7, s14
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
+; GFX9-O0-NEXT: v_xor_b32_e64 v5, v5, s12
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v7, v7, v10
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[6:7], v[5:6], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v2, v5, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v4, v5, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s10
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec
+; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 2
+; GFX9-O0-NEXT: v_writelane_b32 v0, s5, 3
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execz .LBB0_3
+; GFX9-O0-NEXT: s_branch .LBB0_8
+; GFX9-O0-NEXT: .LBB0_1: ; %Flow
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v0, 4
+; GFX9-O0-NEXT: v_readlane_b32 s5, v0, 5
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: ; %bb.2: ; %Flow
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(6)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB0_5
+; GFX9-O0-NEXT: .LBB0_3: ; %Flow2
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v4, 2
+; GFX9-O0-NEXT: v_readlane_b32 s5, v4, 3
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB0_9
+; GFX9-O0-NEXT: .LBB0_4: ; %udiv-loop-exit
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[0:1]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_lshlrev_b64 v[9:10], s4, v[9:10]
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
+; GFX9-O0-NEXT: v_or3_b32 v4, v4, v11, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v9
+; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB0_3
+; GFX9-O0-NEXT: .LBB0_5: ; %Flow1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v8, 6
+; GFX9-O0-NEXT: v_readlane_b32 s5, v8, 7
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB0_4
+; GFX9-O0-NEXT: .LBB0_6: ; %udiv-do-while
+; GFX9-O0-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s6, v16, 8
+; GFX9-O0-NEXT: v_readlane_b32 s7, v16, 9
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: s_waitcnt vmcnt(16)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[29:30], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v30
+; GFX9-O0-NEXT: s_mov_b32 s5, 1
+; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], s5, v[23:24]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v24
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v29
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
+; GFX9-O0-NEXT: v_or_b32_e64 v23, v5, v10
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[2:3]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], s4, v[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v30
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v29
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v3, v4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s5, v[0:1]
+; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[6:7]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v30
+; GFX9-O0-NEXT: s_waitcnt vmcnt(10)
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v28
+; GFX9-O0-NEXT: v_or3_b32 v6, v6, v7, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v29
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v27
+; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-O0-NEXT: s_waitcnt vmcnt(8)
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v26
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v25
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
+; GFX9-O0-NEXT: v_ashrrev_i64 v[13:14], s4, v[11:12]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], 1
+; GFX9-O0-NEXT: s_mov_b32 s8, s5
+; GFX9-O0-NEXT: v_and_b32_e64 v12, v7, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
+; GFX9-O0-NEXT: v_and_b32_e64 v14, v11, s4
+; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v23, v22
+; GFX9-O0-NEXT: v_and_b32_e64 v23, v7, v23
+; GFX9-O0-NEXT: v_and_b32_e64 v21, v11, v21
+; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v23, v20
+; GFX9-O0-NEXT: v_and_b32_e64 v7, v7, v23
+; GFX9-O0-NEXT: v_and_b32_e64 v23, v11, v19
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v24
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v22
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v20
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v10, vcc, v10, v19, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v4, vcc, v4, v11, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v5, v7, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v8
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 killed $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
+; GFX9-O0-NEXT: s_mov_b32 s5, s8
+; GFX9-O0-NEXT: s_mov_b32 s4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v20, vcc, v11, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v11, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v10, v11, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v10, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr20 killed $vgpr20 def $vgpr20_vgpr21 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v21, v9
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v21
+; GFX9-O0-NEXT: v_or_b32_e64 v19, v19, v22
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v17, v17, v18
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v19
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[12:13]
+; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 4
+; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 5
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 8
+; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 9
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execnz .LBB0_6
+; GFX9-O0-NEXT: s_branch .LBB0_1
+; GFX9-O0-NEXT: .LBB0_7: ; %udiv-preheader
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(9)
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-O0-NEXT: s_mov_b32 s6, 64
+; GFX9-O0-NEXT: v_sub_u32_e64 v12, s6, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], v12, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v24
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v23
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
+; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v4, s6
+; GFX9-O0-NEXT: v_sub_u32_e64 v5, v4, s6
+; GFX9-O0-NEXT: v_lshrrev_b64 v[23:24], v5, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[4:5]
+; GFX9-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v22
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v23
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v21
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], v4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v5
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: s_mov_b32 s8, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v12, v12, v15, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v4
+; GFX9-O0-NEXT: s_mov_b32 s8, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v14
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
+; GFX9-O0-NEXT: s_mov_b32 s5, s8
+; GFX9-O0-NEXT: s_mov_b32 s4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v12, vcc, v12, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v15, v17, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v15, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v15, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v13
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
+; GFX9-O0-NEXT: v_writelane_b32 v16, s4, 8
+; GFX9-O0-NEXT: v_writelane_b32 v16, s5, 9
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB0_6
+; GFX9-O0-NEXT: .LBB0_8: ; %udiv-bb1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
+; GFX9-O0-NEXT: s_mov_b32 s5, s6
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
+; GFX9-O0-NEXT: s_mov_b32 s4, s7
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: s_mov_b32 s8, s6
+; GFX9-O0-NEXT: s_mov_b32 s9, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s9
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f
+; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[5:6], v3, v[11:12]
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v6
+; GFX9-O0-NEXT: s_mov_b32 s4, 64
+; GFX9-O0-NEXT: v_sub_u32_e64 v14, s4, v3
+; GFX9-O0-NEXT: v_lshrrev_b64 v[14:15], v14, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v15
+; GFX9-O0-NEXT: v_or_b32_e64 v13, v13, v16
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v6
+; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v3, s4
+; GFX9-O0-NEXT: s_mov_b32 s10, 63
+; GFX9-O0-NEXT: v_sub_u32_e64 v4, s10, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[13:14], v4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[4:5]
+; GFX9-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[10:11], v3, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[10:11]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[10:11]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[7:8], v3, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[4:5]
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v4, v7, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10
+; GFX9-O0-NEXT: v_or_b32_e64 v3, v3, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[1:2], s[6:7]
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec
+; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 6
+; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 7
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execz .LBB0_5
+; GFX9-O0-NEXT: s_branch .LBB0_7
+; GFX9-O0-NEXT: .LBB0_9: ; %udiv-end
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 32
+; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v17
+; GFX9-O0-NEXT: v_mul_lo_u32 v3, v1, v0
+; GFX9-O0-NEXT: v_lshrrev_b64 v[17:18], s4, v[17:18]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v17
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mul_lo_u32 v2, v5, v2
+; GFX9-O0-NEXT: v_mad_u64_u32 v[17:18], s[6:7], v5, v0, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v18
+; GFX9-O0-NEXT: v_add3_u32 v2, v0, v2, v3
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 killed $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: s_mov_b32 s5, 0
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v18
+; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v17
+; GFX9-O0-NEXT: v_or_b32_e64 v17, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v0
+; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v11
+; GFX9-O0-NEXT: v_mul_lo_u32 v3, v2, v6
+; GFX9-O0-NEXT: v_lshrrev_b64 v[11:12], s4, v[11:12]
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v19
+; GFX9-O0-NEXT: v_mul_lo_u32 v11, v11, v0
+; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v2, v0, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v20
+; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v11
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 killed $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v18
+; GFX9-O0-NEXT: v_add_co_u32_e64 v17, s[6:7], v11, v12
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v2
+; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v6, v1, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v12
+; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v19
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v6, v5, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v19
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v21, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
+; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v21, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v21
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v19
+; GFX9-O0-NEXT: v_or_b32_e64 v23, v11, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v6
+; GFX9-O0-NEXT: v_mad_u64_u32 v[11:12], s[6:7], v0, v5, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v21, v12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v24
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v22
+; GFX9-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v5, v20
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v19, s[6:7], v6, v19, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v6
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0xffffffff
+; GFX9-O0-NEXT: s_mov_b32 s8, s7
+; GFX9-O0-NEXT: v_and_b32_e64 v19, v19, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
+; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
+; GFX9-O0-NEXT: v_and_b32_e64 v21, v20, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v19
+; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v0, v1, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v23, v19
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v24
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v1
+; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v23
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 killed $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_or_b32_e64 v23, v1, v19
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v24
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v22
+; GFX9-O0-NEXT: v_add_co_u32_e64 v0, s[6:7], v0, v20
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v19, s[6:7], v1, v19, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v19
+; GFX9-O0-NEXT: v_lshrrev_b64 v[21:22], s4, v[0:1]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v22
+; GFX9-O0-NEXT: v_add_co_u32_e64 v19, s[6:7], v19, v20
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v5, v6, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v20
+; GFX9-O0-NEXT: v_add_co_u32_e64 v19, s[6:7], v5, v6
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v18
+; GFX9-O0-NEXT: v_add_co_u32_e64 v2, s[6:7], v2, v6
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v3, v5, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: v_lshlrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v11
+; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v1
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v14
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v12
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v11, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v6, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v2, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v10
+; GFX9-O0-NEXT: v_xor_b32_e64 v3, v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: v_xor_b32_e64 v9, v6, v5
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_xor_b32_e64 v3, v3, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-O0-NEXT: v_xor_b32_e64 v0, v0, v8
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v10
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v7, vcc, v7, v8
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v5, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: ; kill: killed $vgpr4
+; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_nop 0
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+ %div = srem i128 %lhs, %rhs
+ ret i128 %div
+}
+
+define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
+; GFX9-LABEL: v_urem_i128_vv:
+; GFX9: ; %bb.0: ; %_udiv-special-cases
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v9, v5, v7
+; GFX9-NEXT: v_or_b32_e32 v8, v4, v6
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX9-NEXT: v_or_b32_e32 v9, v1, v3
+; GFX9-NEXT: v_or_b32_e32 v8, v0, v2
+; GFX9-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
+; GFX9-NEXT: v_ffbh_u32_e32 v8, v6
+; GFX9-NEXT: v_add_u32_e32 v8, 32, v8
+; GFX9-NEXT: v_ffbh_u32_e32 v9, v7
+; GFX9-NEXT: v_min_u32_e32 v8, v8, v9
+; GFX9-NEXT: v_ffbh_u32_e32 v9, v4
+; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
+; GFX9-NEXT: v_ffbh_u32_e32 v10, v5
+; GFX9-NEXT: v_min_u32_e32 v9, v9, v10
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, 64, v9
+; GFX9-NEXT: v_addc_co_u32_e64 v10, s[6:7], 0, 0, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GFX9-NEXT: v_ffbh_u32_e32 v11, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX9-NEXT: v_ffbh_u32_e32 v9, v2
+; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
+; GFX9-NEXT: v_min_u32_e32 v9, v9, v11
+; GFX9-NEXT: v_ffbh_u32_e32 v11, v0
+; GFX9-NEXT: v_add_u32_e32 v11, 32, v11
+; GFX9-NEXT: v_ffbh_u32_e32 v12, v1
+; GFX9-NEXT: v_min_u32_e32 v11, v11, v12
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, 64, v11
+; GFX9-NEXT: v_addc_co_u32_e64 v12, s[6:7], 0, 0, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GFX9-NEXT: s_mov_b64 s[6:7], 0x7f
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v11, v9, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, 0, vcc
+; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v8, v9
+; GFX9-NEXT: v_subb_co_u32_e32 v9, vcc, v10, v12, vcc
+; GFX9-NEXT: v_mov_b32_e32 v11, 0
+; GFX9-NEXT: v_subbrev_co_u32_e32 v10, vcc, 0, v11, vcc
+; GFX9-NEXT: v_subbrev_co_u32_e32 v11, vcc, 0, v11, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e32 v12, v13, v12, vcc
+; GFX9-NEXT: v_and_b32_e32 v12, 1, v12
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v12
+; GFX9-NEXT: v_xor_b32_e32 v12, 0x7f, v8
+; GFX9-NEXT: v_or_b32_e32 v13, v9, v11
+; GFX9-NEXT: v_or_b32_e32 v12, v12, v10
+; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
+; GFX9-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GFX9-NEXT: v_cndmask_b32_e64 v15, v3, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v14, v2, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, v1, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v0, 0, s[4:5]
+; GFX9-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GFX9-NEXT: s_cbranch_execz .LBB1_6
+; GFX9-NEXT: ; %bb.1: ; %udiv-bb1
+; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, 1, v8
+; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v9, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v24, vcc, 0, v10, vcc
+; GFX9-NEXT: v_sub_u32_e32 v15, 0x7f, v8
+; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v11, vcc
+; GFX9-NEXT: v_sub_u32_e32 v13, 64, v15
+; GFX9-NEXT: v_or_b32_e32 v10, v23, v25
+; GFX9-NEXT: v_or_b32_e32 v9, v22, v24
+; GFX9-NEXT: v_lshlrev_b64 v[11:12], v15, v[2:3]
+; GFX9-NEXT: v_lshrrev_b64 v[13:14], v13, v[0:1]
+; GFX9-NEXT: v_sub_u32_e32 v8, 63, v8
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10]
+; GFX9-NEXT: v_lshlrev_b64 v[8:9], v8, v[0:1]
+; GFX9-NEXT: v_or_b32_e32 v10, v12, v14
+; GFX9-NEXT: v_or_b32_e32 v11, v11, v13
+; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v15
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v11, s[4:5]
+; GFX9-NEXT: v_lshlrev_b64 v[10:11], v15, v[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v15
+; GFX9-NEXT: v_mov_b32_e32 v12, 0
+; GFX9-NEXT: v_mov_b32_e32 v14, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v3, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v2, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, v11, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-NEXT: v_mov_b32_e32 v15, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, v10, s[4:5]
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execz .LBB1_5
+; GFX9-NEXT: ; %bb.2: ; %udiv-preheader
+; GFX9-NEXT: v_sub_u32_e32 v14, 64, v22
+; GFX9-NEXT: v_lshrrev_b64 v[12:13], v22, v[0:1]
+; GFX9-NEXT: v_lshlrev_b64 v[14:15], v14, v[2:3]
+; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v22
+; GFX9-NEXT: v_or_b32_e32 v14, v12, v14
+; GFX9-NEXT: v_subrev_u32_e32 v12, 64, v22
+; GFX9-NEXT: v_or_b32_e32 v15, v13, v15
+; GFX9-NEXT: v_lshrrev_b64 v[12:13], v12, v[2:3]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v22
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v13, v15, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v17, v13, v1, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v12, v14, vcc
+; GFX9-NEXT: v_lshrrev_b64 v[12:13], v22, v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v16, v14, v0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e32 v19, 0, v13, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v18, 0, v12, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v26, vcc, -1, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v5, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v28, vcc, -1, v6, vcc
+; GFX9-NEXT: v_mov_b32_e32 v20, 0
+; GFX9-NEXT: v_mov_b32_e32 v14, 0
+; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, -1, v7, vcc
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v21, 0
+; GFX9-NEXT: v_mov_b32_e32 v15, 0
+; GFX9-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-NEXT: .LBB1_3: ; %udiv-do-while
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_lshrrev_b32_e32 v12, 31, v11
+; GFX9-NEXT: v_lshlrev_b64 v[10:11], 1, v[10:11]
+; GFX9-NEXT: v_lshlrev_b64 v[18:19], 1, v[18:19]
+; GFX9-NEXT: v_or_b32_e32 v10, v20, v10
+; GFX9-NEXT: v_lshrrev_b32_e32 v20, 31, v17
+; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[16:17]
+; GFX9-NEXT: v_or_b32_e32 v18, v18, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v20, 31, v9
+; GFX9-NEXT: v_or_b32_e32 v16, v16, v20
+; GFX9-NEXT: v_sub_co_u32_e32 v20, vcc, v26, v16
+; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v27, v17, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v28, v18, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v29, v19, vcc
+; GFX9-NEXT: v_ashrrev_i32_e32 v30, 31, v20
+; GFX9-NEXT: v_and_b32_e32 v20, v30, v4
+; GFX9-NEXT: v_sub_co_u32_e32 v16, vcc, v16, v20
+; GFX9-NEXT: v_and_b32_e32 v20, v30, v5
+; GFX9-NEXT: v_subb_co_u32_e32 v17, vcc, v17, v20, vcc
+; GFX9-NEXT: v_and_b32_e32 v20, v30, v6
+; GFX9-NEXT: v_subb_co_u32_e32 v18, vcc, v18, v20, vcc
+; GFX9-NEXT: v_and_b32_e32 v20, v30, v7
+; GFX9-NEXT: v_subb_co_u32_e32 v19, vcc, v19, v20, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, -1, v22
+; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v23, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v24, vcc, -1, v24, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v25, vcc
+; GFX9-NEXT: v_or_b32_e32 v11, v21, v11
+; GFX9-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-NEXT: v_or_b32_e32 v20, v22, v24
+; GFX9-NEXT: v_or_b32_e32 v21, v23, v25
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[20:21]
+; GFX9-NEXT: v_or3_b32 v8, v8, v12, v14
+; GFX9-NEXT: v_and_b32_e32 v12, 1, v30
+; GFX9-NEXT: v_mov_b32_e32 v21, v13
+; GFX9-NEXT: v_or3_b32 v9, v9, 0, v15
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v20, v12
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB1_3
+; GFX9-NEXT: ; %bb.4: ; %Flow
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT: .LBB1_5: ; %Flow2
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[10:11]
+; GFX9-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-NEXT: v_lshrrev_b32_e32 v10, 31, v11
+; GFX9-NEXT: v_or3_b32 v15, v9, 0, v15
+; GFX9-NEXT: v_or3_b32 v14, v8, v10, v14
+; GFX9-NEXT: v_or_b32_e32 v13, v13, v17
+; GFX9-NEXT: v_or_b32_e32 v12, v12, v16
+; GFX9-NEXT: .LBB1_6: ; %Flow3
+; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX9-NEXT: v_mul_lo_u32 v19, v12, v7
+; GFX9-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v4, v12, 0
+; GFX9-NEXT: v_mov_b32_e32 v17, 0
+; GFX9-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v12, v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v16, v8
+; GFX9-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v5, v12, v[16:17]
+; GFX9-NEXT: v_mul_lo_u32 v18, v13, v6
+; GFX9-NEXT: v_mul_lo_u32 v16, v15, v4
+; GFX9-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-NEXT: v_mov_b32_e32 v12, v17
+; GFX9-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v4, v13, v[11:12]
+; GFX9-NEXT: v_add3_u32 v10, v10, v19, v18
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v14, v4, v[9:10]
+; GFX9-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-NEXT: v_mul_lo_u32 v10, v14, v5
+; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, v6, v4
+; GFX9-NEXT: v_addc_co_u32_e64 v15, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v5, v13, v[14:15]
+; GFX9-NEXT: v_add3_u32 v6, v16, v9, v10
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v6, vcc
+; GFX9-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v7
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v5, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-O0-LABEL: v_urem_i128_vv:
+; GFX9-O0: ; %bb.0: ; %_udiv-special-cases
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v0
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v3
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-O0-NEXT: v_or_b32_e64 v3, v8, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
+; GFX9-O0-NEXT: v_or_b32_e64 v1, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0
+; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[1:2], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: v_or_b32_e64 v15, v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v9, v3, v1
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v15
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], s[6:7]
+; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-O0-NEXT: s_mov_b32 s9, 32
+; GFX9-O0-NEXT: v_add_u32_e64 v6, v6, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-O0-NEXT: v_min_u32_e64 v6, v6, v7
+; GFX9-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v7
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v8, v8
+; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-O0-NEXT: s_mov_b64 s[10:11], 64
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
+; GFX9-O0-NEXT: s_mov_b32 s12, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
+; GFX9-O0-NEXT: s_mov_b32 s14, s11
+; GFX9-O0-NEXT: v_add_co_u32_e64 v8, s[12:13], v8, s12
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, s14
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[12:13], v5, v9, s[12:13]
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: s_mov_b64 s[12:13], s[6:7]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[12:13], v[11:12], s[12:13]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v10, s[12:13]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, v6, v7, s[12:13]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v1
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v2
+; GFX9-O0-NEXT: v_min_u32_e64 v6, v5, v6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v3
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v11, v4
+; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v11
+; GFX9-O0-NEXT: ; implicit-def: $sgpr9
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
+; GFX9-O0-NEXT: s_mov_b32 s8, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
+; GFX9-O0-NEXT: s_mov_b32 s10, s11
+; GFX9-O0-NEXT: v_add_co_u32_e64 v11, s[8:9], v11, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s10
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[8:9], v5, v12, s[8:9]
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[8:9], v[13:14], s[8:9]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-O0-NEXT: s_mov_b32 s10, s6
+; GFX9-O0-NEXT: s_mov_b32 s11, s7
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v8
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v9, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v8, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, s11
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s11
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
+; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
+; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
+; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[14:15]
+; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v7, v10, s[8:9]
+; GFX9-O0-NEXT: v_and_b32_e64 v7, 1, v7
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[8:9], v7, 1
+; GFX9-O0-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: s_mov_b32 s14, s13
+; GFX9-O0-NEXT: v_xor_b32_e64 v7, v7, s14
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
+; GFX9-O0-NEXT: v_xor_b32_e64 v5, v5, s12
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v7, v7, v10
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[6:7], v[5:6], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v2, v5, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v4, v5, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s10
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec
+; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 2
+; GFX9-O0-NEXT: v_writelane_b32 v0, s5, 3
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execz .LBB1_3
+; GFX9-O0-NEXT: s_branch .LBB1_8
+; GFX9-O0-NEXT: .LBB1_1: ; %Flow
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v0, 4
+; GFX9-O0-NEXT: v_readlane_b32 s5, v0, 5
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: ; %bb.2: ; %Flow
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(6)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB1_5
+; GFX9-O0-NEXT: .LBB1_3: ; %Flow2
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v4, 2
+; GFX9-O0-NEXT: v_readlane_b32 s5, v4, 3
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB1_9
+; GFX9-O0-NEXT: .LBB1_4: ; %udiv-loop-exit
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[0:1]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_lshlrev_b64 v[9:10], s4, v[9:10]
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
+; GFX9-O0-NEXT: v_or3_b32 v4, v4, v11, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v9
+; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB1_3
+; GFX9-O0-NEXT: .LBB1_5: ; %Flow1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v8, 6
+; GFX9-O0-NEXT: v_readlane_b32 s5, v8, 7
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB1_4
+; GFX9-O0-NEXT: .LBB1_6: ; %udiv-do-while
+; GFX9-O0-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s6, v16, 8
+; GFX9-O0-NEXT: v_readlane_b32 s7, v16, 9
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: s_waitcnt vmcnt(16)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[29:30], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v30
+; GFX9-O0-NEXT: s_mov_b32 s5, 1
+; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], s5, v[23:24]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v24
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v29
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
+; GFX9-O0-NEXT: v_or_b32_e64 v23, v5, v10
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[2:3]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], s4, v[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v30
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v29
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v3, v4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s5, v[0:1]
+; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[6:7]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v30
+; GFX9-O0-NEXT: s_waitcnt vmcnt(10)
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v28
+; GFX9-O0-NEXT: v_or3_b32 v6, v6, v7, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v29
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v27
+; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-O0-NEXT: s_waitcnt vmcnt(8)
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v26
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v25
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
+; GFX9-O0-NEXT: v_ashrrev_i64 v[13:14], s4, v[11:12]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], 1
+; GFX9-O0-NEXT: s_mov_b32 s8, s5
+; GFX9-O0-NEXT: v_and_b32_e64 v12, v7, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
+; GFX9-O0-NEXT: v_and_b32_e64 v14, v11, s4
+; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v23, v22
+; GFX9-O0-NEXT: v_and_b32_e64 v23, v7, v23
+; GFX9-O0-NEXT: v_and_b32_e64 v21, v11, v21
+; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v23, v20
+; GFX9-O0-NEXT: v_and_b32_e64 v7, v7, v23
+; GFX9-O0-NEXT: v_and_b32_e64 v23, v11, v19
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v24
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v22
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v20
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v10, vcc, v10, v19, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v4, vcc, v4, v11, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v5, v7, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v8
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 killed $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
+; GFX9-O0-NEXT: s_mov_b32 s5, s8
+; GFX9-O0-NEXT: s_mov_b32 s4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v20, vcc, v11, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v11, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v10, v11, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v10, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr20 killed $vgpr20 def $vgpr20_vgpr21 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v21, v9
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v21
+; GFX9-O0-NEXT: v_or_b32_e64 v19, v19, v22
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v17, v17, v18
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v19
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[12:13]
+; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 4
+; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 5
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 8
+; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 9
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execnz .LBB1_6
+; GFX9-O0-NEXT: s_branch .LBB1_1
+; GFX9-O0-NEXT: .LBB1_7: ; %udiv-preheader
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(9)
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-O0-NEXT: s_mov_b32 s6, 64
+; GFX9-O0-NEXT: v_sub_u32_e64 v12, s6, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], v12, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v24
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v23
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
+; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v4, s6
+; GFX9-O0-NEXT: v_sub_u32_e64 v5, v4, s6
+; GFX9-O0-NEXT: v_lshrrev_b64 v[23:24], v5, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[4:5]
+; GFX9-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v22
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v23
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v21
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], v4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v5
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: s_mov_b32 s8, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v12, v12, v15, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v4
+; GFX9-O0-NEXT: s_mov_b32 s8, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v14
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
+; GFX9-O0-NEXT: s_mov_b32 s5, s8
+; GFX9-O0-NEXT: s_mov_b32 s4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v12, vcc, v12, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v15, v17, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v15, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v15, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v13
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
+; GFX9-O0-NEXT: v_writelane_b32 v16, s4, 8
+; GFX9-O0-NEXT: v_writelane_b32 v16, s5, 9
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB1_6
+; GFX9-O0-NEXT: .LBB1_8: ; %udiv-bb1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
+; GFX9-O0-NEXT: s_mov_b32 s5, s6
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
+; GFX9-O0-NEXT: s_mov_b32 s4, s7
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: s_mov_b32 s8, s6
+; GFX9-O0-NEXT: s_mov_b32 s9, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s9
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f
+; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[5:6], v3, v[11:12]
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v6
+; GFX9-O0-NEXT: s_mov_b32 s4, 64
+; GFX9-O0-NEXT: v_sub_u32_e64 v14, s4, v3
+; GFX9-O0-NEXT: v_lshrrev_b64 v[14:15], v14, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v15
+; GFX9-O0-NEXT: v_or_b32_e64 v13, v13, v16
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v6
+; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v3, s4
+; GFX9-O0-NEXT: s_mov_b32 s10, 63
+; GFX9-O0-NEXT: v_sub_u32_e64 v4, s10, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[13:14], v4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[4:5]
+; GFX9-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[10:11], v3, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[10:11]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[10:11]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[7:8], v3, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[4:5]
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v4, v7, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10
+; GFX9-O0-NEXT: v_or_b32_e64 v3, v3, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[1:2], s[6:7]
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec
+; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 6
+; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 7
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execz .LBB1_5
+; GFX9-O0-NEXT: s_branch .LBB1_7
+; GFX9-O0-NEXT: .LBB1_9: ; %udiv-end
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 32
+; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v13
+; GFX9-O0-NEXT: v_mul_lo_u32 v5, v6, v2
+; GFX9-O0-NEXT: v_lshrrev_b64 v[13:14], s4, v[13:14]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mul_lo_u32 v3, v7, v3
+; GFX9-O0-NEXT: v_mad_u64_u32 v[13:14], s[6:7], v7, v2, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
+; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: v_lshlrev_b64 v[17:18], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v18
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 killed $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: s_mov_b32 s5, 0
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-O0-NEXT: v_or_b32_e64 v13, v3, v5
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
+; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[15:16]
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v11
+; GFX9-O0-NEXT: v_mul_lo_u32 v3, v2, v8
+; GFX9-O0-NEXT: v_lshrrev_b64 v[11:12], s4, v[11:12]
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v15
+; GFX9-O0-NEXT: v_mul_lo_u32 v11, v11, v5
+; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v2, v5, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v16
+; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v11
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 killed $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v16
+; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v14
+; GFX9-O0-NEXT: v_add_co_u32_e64 v13, s[6:7], v11, v12
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
+; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v8, v6, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v12
+; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v16
+; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v15
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v8, v7, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v16
+; GFX9-O0-NEXT: v_or_b32_e64 v8, v8, v17
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v15
+; GFX9-O0-NEXT: v_or_b32_e64 v19, v11, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v8
+; GFX9-O0-NEXT: v_mad_u64_u32 v[11:12], s[6:7], v5, v7, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v19
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v18
+; GFX9-O0-NEXT: v_add_co_u32_e64 v7, s[6:7], v7, v16
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v15, s[6:7], v8, v15, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v8
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0xffffffff
+; GFX9-O0-NEXT: s_mov_b32 s8, s7
+; GFX9-O0-NEXT: v_and_b32_e64 v15, v15, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v7
+; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
+; GFX9-O0-NEXT: v_and_b32_e64 v17, v16, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
+; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v5, v6, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v15
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v6
+; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v16
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 killed $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_or_b32_e64 v19, v6, v15
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v19
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v18
+; GFX9-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v5, v16
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v15, s[6:7], v6, v15, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v15
+; GFX9-O0-NEXT: v_lshrrev_b64 v[17:18], s4, v[5:6]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
+; GFX9-O0-NEXT: v_add_co_u32_e64 v15, s[6:7], v15, v16
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v7, v8, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
+; GFX9-O0-NEXT: v_add_co_u32_e64 v15, s[6:7], v7, v8
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-O0-NEXT: v_add_co_u32_e64 v2, s[6:7], v2, v8
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v3, v7, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v7
+; GFX9-O0-NEXT: v_lshlrev_b64 v[6:7], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v10
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v7, vcc, v7, v8
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v5, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: ; kill: killed $vgpr4
+; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_nop 0
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+ %div = urem i128 %lhs, %rhs
+ ret i128 %div
+}
+
+define i128 @v_srem_i128_v_pow2k(i128 %lhs) {
+; GFX9-LABEL: v_srem_i128_v_pow2k:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; GFX9-NEXT: v_mov_b32_e32 v5, v4
+; GFX9-NEXT: v_lshrrev_b64 v[4:5], 31, v[4:5]
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v0, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v1, v5, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v2, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v3, vcc
+; GFX9-NEXT: v_and_b32_e32 v4, -2, v4
+; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v4, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v5, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-O0-LABEL: v_srem_i128_v_pow2k:
+; GFX9-O0: ; %bb.0:
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: v_ashrrev_i64 v[6:7], s4, v[6:7]
+; GFX9-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s4, v[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: s_mov_b32 s5, s6
+; GFX9-O0-NEXT: s_mov_b32 s4, s7
+; GFX9-O0-NEXT: v_add_co_u32_e32 v6, vcc, v5, v4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v4, vcc, v0, v2, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v3, v2, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-O0-NEXT: s_mov_b32 s6, -2
+; GFX9-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
+; GFX9-O0-NEXT: s_mov_b32 s5, s6
+; GFX9-O0-NEXT: s_mov_b32 s6, s5
+; GFX9-O0-NEXT: v_and_b32_e64 v4, v4, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
+; GFX9-O0-NEXT: v_and_b32_e64 v9, v6, s4
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v9
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v7
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: s_mov_b32 s4, 32
+; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT: v_lshrrev_b64 v[3:4], s4, v[3:4]
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+ %div = srem i128 %lhs, 8589934592
+ ret i128 %div
+}
+
+define i128 @v_urem_i128_v_pow2k(i128 %lhs) {
+; GFX9-LABEL: v_urem_i128_v_pow2k:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v1, 1, v1
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-O0-LABEL: v_urem_i128_v_pow2k:
+; GFX9-O0: ; %bb.0:
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr1 killed $exec
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: s_mov_b32 s6, 1
+; GFX9-O0-NEXT: s_mov_b32 s4, -1
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
+; GFX9-O0-NEXT: s_mov_b32 s5, s6
+; GFX9-O0-NEXT: s_mov_b32 s6, s5
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: v_and_b32_e64 v3, v2, s6
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_and_b32_e64 v1, v0, s4
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: s_mov_b32 s4, 32
+; GFX9-O0-NEXT: v_lshrrev_b64 v[1:2], s4, v[1:2]
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+ %div = urem i128 %lhs, 8589934592
+ ret i128 %div
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX9-SDAG: {{.*}}
+; GFX9-SDAG-O0: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir b/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
index f93456ccacb8..4c61e6803feb 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
@@ -160,3 +160,60 @@ body: |
S_ENDPGM 0
...
+---
+name: add_f16_u32_preserve_different_bb
+tracksRegLiveness: true
+body: |
+ ; SDWA-LABEL: name: add_f16_u32_preserve_different_bb
+ ; SDWA: bb.0:
+ ; SDWA-NEXT: successors: %bb.1(0x80000000)
+ ; SDWA-NEXT: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
+ ; SDWA-NEXT: {{ $}}
+ ; SDWA-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31
+ ; SDWA-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
+ ; SDWA-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
+ ; SDWA-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32))
+ ; SDWA-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY1]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32))
+ ; SDWA-NEXT: [[V_AND_B32_e32_:%[0-9]+]]:vgpr_32 = V_AND_B32_e32 65535, [[FLAT_LOAD_DWORD]], implicit $exec
+ ; SDWA-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, [[FLAT_LOAD_DWORD1]], implicit $exec
+ ; SDWA-NEXT: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[FLAT_LOAD_DWORD]], 8, 8, implicit $exec
+ ; SDWA-NEXT: [[V_LSHRREV_B32_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e32 24, [[FLAT_LOAD_DWORD1]], implicit $exec
+ ; SDWA-NEXT: {{ $}}
+ ; SDWA-NEXT: bb.1:
+ ; SDWA-NEXT: successors: %bb.2(0x80000000)
+ ; SDWA-NEXT: {{ $}}
+ ; SDWA-NEXT: [[V_MUL_F32_sdwa:%[0-9]+]]:vgpr_32 = V_MUL_F32_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 5, 0, 1, 3, implicit $mode, implicit $exec
+ ; SDWA-NEXT: {{ $}}
+ ; SDWA-NEXT: bb.2:
+ ; SDWA-NEXT: [[V_ADD_F16_sdwa:%[0-9]+]]:vgpr_32 = V_ADD_F16_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 1, 2, 4, 5, implicit $mode, implicit $exec, implicit killed [[V_MUL_F32_sdwa]](tied-def 0)
+ ; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_ADD_F16_sdwa]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
+ ; SDWA-NEXT: $sgpr30_sgpr31 = COPY [[COPY]]
+ ; SDWA-NEXT: S_SETPC_B64_return $sgpr30_sgpr31
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
+
+ %2:sreg_64 = COPY $sgpr30_sgpr31
+ %1:vreg_64 = COPY $vgpr2_vgpr3
+ %0:vreg_64 = COPY $vgpr0_vgpr1
+ %3:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32))
+ %4:vgpr_32 = FLAT_LOAD_DWORD %1, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32))
+
+ %5:vgpr_32 = V_AND_B32_e32 65535, %3, implicit $exec
+ %6:vgpr_32 = V_LSHRREV_B32_e64 16, %4, implicit $exec
+ %7:vgpr_32 = V_BFE_U32_e64 %3, 8, 8, implicit $exec
+ %8:vgpr_32 = V_LSHRREV_B32_e32 24, %4, implicit $exec
+
+ %9:vgpr_32 = V_ADD_F16_e64 0, %5, 0, %6, 0, 0, implicit $mode, implicit $exec
+ %10:vgpr_32 = V_LSHLREV_B16_e64 8, %9, implicit $exec
+
+ bb.1:
+ %11:vgpr_32 = V_MUL_F32_e64 0, %7, 0, %8, 0, 0, implicit $mode, implicit $exec
+ %12:vgpr_32 = V_LSHLREV_B32_e64 16, %11, implicit $exec
+
+ bb.2:
+ %13:vgpr_32 = V_OR_B32_e64 %10, %12, implicit $exec
+
+ FLAT_STORE_DWORD %0, %13, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
+ $sgpr30_sgpr31 = COPY %2
+ S_SETPC_B64_return $sgpr30_sgpr31
+...
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-mark-last-scratch-load.ll b/llvm/test/CodeGen/AMDGPU/vgpr-mark-last-scratch-load.ll
index 137bd0f5d9f1..4efa1e9353ab 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-mark-last-scratch-load.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-mark-last-scratch-load.ll
@@ -28,6 +28,7 @@ define amdgpu_cs void @max_6_vgprs(ptr addrspace(1) %p) "amdgpu-num-vgpr"="6" {
; CHECK-NEXT: scratch_store_b32 off, v0, off offset:12 ; 4-byte Folded Spill
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: global_store_b32 v[0:1], v5, off scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: scratch_load_b32 v0, off, off th:TH_LOAD_LU ; 4-byte Folded Reload
@@ -116,6 +117,7 @@ define amdgpu_cs void @max_11_vgprs_branch(ptr addrspace(1) %p, i32 %tmp) "amdgp
; CHECK-NEXT: scratch_store_b32 off, v0, off offset:32 ; 4-byte Folded Spill
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: global_store_b32 v[0:1], v10, off scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: scratch_load_b32 v0, off, off offset:16 th:TH_LOAD_LU ; 4-byte Folded Reload
@@ -174,6 +176,7 @@ define amdgpu_cs void @max_11_vgprs_branch(ptr addrspace(1) %p, i32 %tmp) "amdgp
; CHECK-NEXT: scratch_store_b32 off, v0, off offset:32 ; 4-byte Folded Spill
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: global_store_b32 v[0:1], v10, off scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: scratch_load_b32 v0, off, off offset:16 th:TH_LOAD_LU ; 4-byte Folded Reload
@@ -208,6 +211,7 @@ define amdgpu_cs void @max_11_vgprs_branch(ptr addrspace(1) %p, i32 %tmp) "amdgp
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s0
; CHECK-NEXT: scratch_load_b32 v0, off, off th:TH_LOAD_LU ; 4-byte Folded Reload
; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: global_store_b32 v[0:1], v0, off scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: scratch_load_b32 v0, off, off offset:4 th:TH_LOAD_LU ; 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/AMDGPU/wait-before-stores-with-scope_sys.ll b/llvm/test/CodeGen/AMDGPU/wait-before-stores-with-scope_sys.ll
new file mode 100644
index 000000000000..e6fbe97f8dc0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/wait-before-stores-with-scope_sys.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -march=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel=1 -march=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX12 %s
+
+define amdgpu_ps void @intrinsic_store_system_scope(i32 %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+; GFX12-LABEL: intrinsic_store_system_scope:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b32 v0, v[1:2], s[0:3], s4 idxen offen scope:SCOPE_SYS
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ call void @llvm.amdgcn.struct.buffer.store.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 24)
+ ret void
+}
+
+define amdgpu_ps void @generic_store_volatile(i32 %val, ptr addrspace(1) %out) {
+; GFX12-LABEL: generic_store_volatile:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: global_store_b32 v[1:2], v0, off scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ store volatile i32 %val, ptr addrspace(1) %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/wait-before-stores-with-scope_sys.mir b/llvm/test/CodeGen/AMDGPU/wait-before-stores-with-scope_sys.mir
new file mode 100644
index 000000000000..acf8bd3a6ab5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/wait-before-stores-with-scope_sys.mir
@@ -0,0 +1,43 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -run-pass=si-memory-legalizer %s -o - | FileCheck -check-prefix=GFX12 %s
+
+---
+name: intrinsic_store_system_scope
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2
+
+ ; GFX12-LABEL: name: intrinsic_store_system_scope
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: S_WAIT_LOADCNT_soft 0
+ ; GFX12-NEXT: S_WAIT_SAMPLECNT_soft 0
+ ; GFX12-NEXT: S_WAIT_BVHCNT_soft 0
+ ; GFX12-NEXT: S_WAIT_KMCNT_soft 0
+ ; GFX12-NEXT: S_WAIT_STORECNT_soft 0
+ ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact killed renamable $vgpr0, killed renamable $vgpr1_vgpr2, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, killed renamable $sgpr4, 0, 24, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX12-NEXT: S_ENDPGM 0
+ BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact killed renamable $vgpr0, killed renamable $vgpr1_vgpr2, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, killed renamable $sgpr4, 0, 24, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ S_ENDPGM 0
+...
+
+---
+name: generic_store_volatile
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+
+ ; GFX12-LABEL: name: generic_store_volatile
+ ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: S_WAIT_LOADCNT_soft 0
+ ; GFX12-NEXT: S_WAIT_SAMPLECNT_soft 0
+ ; GFX12-NEXT: S_WAIT_BVHCNT_soft 0
+ ; GFX12-NEXT: S_WAIT_KMCNT_soft 0
+ ; GFX12-NEXT: S_WAIT_STORECNT_soft 0
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD killed renamable $vgpr1_vgpr2, killed renamable $vgpr0, 0, 24, implicit $exec :: (volatile store (s32), addrspace 1)
+ ; GFX12-NEXT: S_WAIT_STORECNT_soft 0
+ ; GFX12-NEXT: S_ENDPGM 0
+ GLOBAL_STORE_DWORD killed renamable $vgpr1_vgpr2, killed renamable $vgpr0, 0, 0, implicit $exec :: (volatile store (s32), addrspace 1)
+ S_ENDPGM 0
+...
diff --git a/llvm/test/CodeGen/DirectX/frac.ll b/llvm/test/CodeGen/DirectX/frac.ll
new file mode 100644
index 000000000000..ab605ed6084a
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/frac.ll
@@ -0,0 +1,34 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for frac are generated for float and half.
+; CHECK:call float @dx.op.unary.f32(i32 22, float %{{.*}})
+; CHECK:call half @dx.op.unary.f16(i32 22, half %{{.*}})
+
+target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
+target triple = "dxil-pc-shadermodel6.7-library"
+
+; Function Attrs: noinline nounwind optnone
+define noundef float @frac_float(float noundef %a) #0 {
+entry:
+ %a.addr = alloca float, align 4
+ store float %a, ptr %a.addr, align 4
+ %0 = load float, ptr %a.addr, align 4
+ %dx.frac = call float @llvm.dx.frac.f32(float %0)
+ ret float %dx.frac
+}
+
+; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
+declare float @llvm.dx.frac.f32(float) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef half @frac_half(half noundef %a) #0 {
+entry:
+ %a.addr = alloca half, align 2
+ store half %a, ptr %a.addr, align 2
+ %0 = load half, ptr %a.addr, align 2
+ %dx.frac = call half @llvm.dx.frac.f16(half %0)
+ ret half %dx.frac
+}
+
+; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
+declare half @llvm.dx.frac.f16(half) #1
diff --git a/llvm/test/CodeGen/DirectX/round.ll b/llvm/test/CodeGen/DirectX/round.ll
new file mode 100644
index 000000000000..5d53a794b763
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/round.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for round are generated for float and half.
+; CHECK:call float @dx.op.unary.f32(i32 26, float %{{.*}})
+; CHECK:call half @dx.op.unary.f16(i32 26, half %{{.*}})
+
+target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
+target triple = "dxil-pc-shadermodel6.7-library"
+
+; Function Attrs: noinline nounwind optnone
+define noundef float @round_float(float noundef %a) #0 {
+entry:
+ %a.addr = alloca float, align 4
+ store float %a, ptr %a.addr, align 4
+ %0 = load float, ptr %a.addr, align 4
+ %elt.round = call float @llvm.round.f32(float %0)
+ ret float %elt.round
+}
+
+; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
+declare float @llvm.round.f32(float) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef half @round_half(half noundef %a) #0 {
+entry:
+ %a.addr = alloca half, align 2
+ store half %a, ptr %a.addr, align 2
+ %0 = load half, ptr %a.addr, align 2
+ %elt.round = call half @llvm.round.f16(half %0)
+ ret half %elt.round
+}
diff --git a/llvm/test/CodeGen/Hexagon/loop-balign.ll b/llvm/test/CodeGen/Hexagon/loop-balign.ll
new file mode 100644
index 000000000000..9d1f42a4b14b
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/loop-balign.ll
@@ -0,0 +1,91 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s -check-prefix=BALIGN
+; BALIGN: .p2align{{.*}}5
+
+; The test for checking the alignment of 'for.body4.for.body4_crit_edge' basic block
+
+define dso_local void @foo(i32 %nCol, i32 %nRow, ptr nocapture %resMat) local_unnamed_addr {
+entry:
+ %shl = shl i32 %nRow, 2
+ %cmp36 = icmp sgt i32 %nRow, 0
+ %0 = add i32 %nCol, -1
+ %.inv = icmp slt i32 %0, 1
+ %1 = select i1 %.inv, i32 1, i32 %nCol
+ br label %Outerloop
+
+Outerloop: ; preds = %for.end7, %entry
+ %r12.0 = phi i32 [ 0, %entry ], [ %inc8, %for.end7 ]
+ %r7_6.0 = phi i64 [ undef, %entry ], [ %r7_6.1.lcssa, %for.end7 ]
+ %r0i.0 = phi i32 [ undef, %entry ], [ %r0i.1.lcssa, %for.end7 ]
+ %r5.0 = phi ptr [ %resMat, %entry ], [ %r5.1.lcssa, %for.end7 ]
+ %r8.0 = phi i32 [ %shl, %entry ], [ %r8.1.lcssa, %for.end7 ]
+ br i1 %cmp36, label %for.body.lr.ph, label %for.end7
+
+for.body.lr.ph: ; preds = %Outerloop
+ %cmp332 = icmp eq i32 %r12.0, 0
+ %exitcond.peel = icmp eq i32 %r12.0, 1
+ br label %for.body
+
+for.body: ; preds = %for.end, %for.body.lr.ph
+ %r8.141 = phi i32 [ %r8.0, %for.body.lr.ph ], [ %add, %for.end ]
+ %r5.140 = phi ptr [ %r5.0, %for.body.lr.ph ], [ %add.ptr, %for.end ]
+ %i.039 = phi i32 [ 0, %for.body.lr.ph ], [ %inc6, %for.end ]
+ %r0i.138 = phi i32 [ %r0i.0, %for.body.lr.ph ], [ %4, %for.end ]
+ %r7_6.137 = phi i64 [ %r7_6.0, %for.body.lr.ph ], [ %r7_6.2.lcssa, %for.end ]
+ %add = add nsw i32 %r8.141, %shl
+ br i1 %cmp332, label %for.end, label %for.body4.peel
+
+for.body4.peel: ; preds = %for.body
+ %r1i.0.in.peel = inttoptr i32 %r8.141 to ptr
+ %r1i.0.peel = load i32, ptr %r1i.0.in.peel, align 4
+ %2 = tail call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %r7_6.137, i32 %r1i.0.peel, i32 %r0i.138)
+ br i1 %exitcond.peel, label %for.end, label %for.body4.preheader.peel.newph
+
+for.body4.preheader.peel.newph: ; preds = %for.body4.peel
+ %r1i.0.in = inttoptr i32 %add to ptr
+ %r1i.0 = load i32, ptr %r1i.0.in, align 4
+ br label %for.body4
+
+for.body4: ; preds = %for.body4.for.body4_crit_edge, %for.body4.preheader.peel.newph
+ %inc.phi = phi i32 [ %inc.0, %for.body4.for.body4_crit_edge ], [ 2, %for.body4.preheader.peel.newph ]
+ %r7_6.233 = phi i64 [ %3, %for.body4.for.body4_crit_edge ], [ %2, %for.body4.preheader.peel.newph ]
+ %3 = tail call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %r7_6.233, i32 %r1i.0, i32 %r0i.138)
+ %exitcond = icmp eq i32 %inc.phi, %r12.0
+ br i1 %exitcond, label %for.end.loopexit, label %for.body4.for.body4_crit_edge
+
+for.body4.for.body4_crit_edge: ; preds = %for.body4
+ %inc.0 = add nuw nsw i32 %inc.phi, 1
+ br label %for.body4
+
+for.end.loopexit: ; preds = %for.body4
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %for.body4.peel, %for.body
+ %r7_6.2.lcssa = phi i64 [ %r7_6.137, %for.body ], [ %2, %for.body4.peel ], [ %3, %for.end.loopexit ]
+ %4 = tail call i32 @llvm.hexagon.S2.clbp(i64 %r7_6.2.lcssa)
+ store i32 %4, ptr %r5.140, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %r5.140, i32 undef
+ %inc6 = add nuw nsw i32 %i.039, 1
+ %exitcond47 = icmp eq i32 %inc6, %nRow
+ br i1 %exitcond47, label %for.end7.loopexit, label %for.body
+
+for.end7.loopexit: ; preds = %for.end
+ br label %for.end7
+
+for.end7: ; preds = %for.end7.loopexit, %Outerloop
+ %r7_6.1.lcssa = phi i64 [ %r7_6.0, %Outerloop ], [ %r7_6.2.lcssa, %for.end7.loopexit ]
+ %r0i.1.lcssa = phi i32 [ %r0i.0, %Outerloop ], [ %4, %for.end7.loopexit ]
+ %r5.1.lcssa = phi ptr [ %r5.0, %Outerloop ], [ %add.ptr, %for.end7.loopexit ]
+ %r8.1.lcssa = phi i32 [ %r8.0, %Outerloop ], [ %add, %for.end7.loopexit ]
+ %inc8 = add nuw i32 %r12.0, 1
+ %exitcond48 = icmp eq i32 %inc8, %1
+ br i1 %exitcond48, label %if.end, label %Outerloop
+
+if.end: ; preds = %for.end7
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64, i32, i32)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.clbp(i64)
diff --git a/llvm/test/CodeGen/Hexagon/loop_align_count.ll b/llvm/test/CodeGen/Hexagon/loop_align_count.ll
new file mode 100644
index 000000000000..1f89d8e39495
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/loop_align_count.ll
@@ -0,0 +1,116 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv73 -O2 -mattr=+hvxv73,hvx-length64b \
+; RUN: -debug-only=hexagon-loop-align 2>&1 < %s | FileCheck %s
+; Validate that there are 4 bundles in the loop.
+; REQUIRES: asserts
+
+; CHECK: Loop Align Pass:
+; CHECK: Bundle Count : 4
+; CHECK: .p2align{{.*}}5
+
+; Function Attrs: nounwind
+define void @ham(ptr noalias nocapture readonly %arg, i32 %arg1, i32 %arg2, i32 %arg3, ptr noalias nocapture %arg4, i32 %arg5) #0 {
+bb:
+ %ashr = ashr i32 %arg3, 2
+ %ashr6 = ashr i32 %arg3, 1
+ %add = add nsw i32 %ashr6, %ashr
+ %icmp = icmp sgt i32 %arg2, 0
+ br i1 %icmp, label %bb7, label %bb61
+
+bb7: ; preds = %bb
+ %sdiv = sdiv i32 %arg1, 64
+ %icmp8 = icmp sgt i32 %arg1, 63
+ br label %bb9
+
+bb9: ; preds = %bb57, %bb7
+ %phi = phi i32 [ 0, %bb7 ], [ %add58, %bb57 ]
+ %ashr10 = ashr exact i32 %phi, 1
+ %mul = mul nsw i32 %ashr10, %arg3
+ br i1 %icmp8, label %bb11, label %bb57
+
+bb11: ; preds = %bb9
+ %add12 = add nsw i32 %phi, 1
+ %mul13 = mul nsw i32 %add12, %arg5
+ %mul14 = mul nsw i32 %phi, %arg5
+ %add15 = add i32 %add, %mul
+ %add16 = add i32 %mul, %ashr
+ %add17 = add i32 %mul, %ashr6
+ %getelementptr = getelementptr inbounds i8, ptr %arg4, i32 %mul13
+ %getelementptr18 = getelementptr inbounds i8, ptr %arg4, i32 %mul14
+ %getelementptr19 = getelementptr inbounds i16, ptr %arg, i32 %add15
+ %getelementptr20 = getelementptr inbounds i16, ptr %arg, i32 %add16
+ %getelementptr21 = getelementptr inbounds i16, ptr %arg, i32 %add17
+ %getelementptr22 = getelementptr inbounds i16, ptr %arg, i32 %mul
+ %bitcast = bitcast ptr %getelementptr to ptr
+ %bitcast23 = bitcast ptr %getelementptr18 to ptr
+ %bitcast24 = bitcast ptr %getelementptr19 to ptr
+ %bitcast25 = bitcast ptr %getelementptr20 to ptr
+ %bitcast26 = bitcast ptr %getelementptr21 to ptr
+ %bitcast27 = bitcast ptr %getelementptr22 to ptr
+ br label %bb28
+
+bb28: ; preds = %bb28, %bb11
+ %phi29 = phi i32 [ 0, %bb11 ], [ %add54, %bb28 ]
+ %phi30 = phi ptr [ %bitcast27, %bb11 ], [ %getelementptr36, %bb28 ]
+ %phi31 = phi ptr [ %bitcast26, %bb11 ], [ %getelementptr37, %bb28 ]
+ %phi32 = phi ptr [ %bitcast25, %bb11 ], [ %getelementptr39, %bb28 ]
+ %phi33 = phi ptr [ %bitcast24, %bb11 ], [ %getelementptr41, %bb28 ]
+ %phi34 = phi ptr [ %bitcast, %bb11 ], [ %getelementptr53, %bb28 ]
+ %phi35 = phi ptr [ %bitcast23, %bb11 ], [ %getelementptr52, %bb28 ]
+ %getelementptr36 = getelementptr inbounds <16 x i32>, ptr %phi30, i32 1
+ %load = load <16 x i32>, ptr %phi30, align 64
+ %getelementptr37 = getelementptr inbounds <16 x i32>, ptr %phi31, i32 1
+ %load38 = load <16 x i32>, ptr %phi31, align 64
+ %getelementptr39 = getelementptr inbounds <16 x i32>, ptr %phi32, i32 1
+ %load40 = load <16 x i32>, ptr %phi32, align 64
+ %getelementptr41 = getelementptr inbounds <16 x i32>, ptr %phi33, i32 1
+ %load42 = load <16 x i32>, ptr %phi33, align 64
+ %call = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %load, <16 x i32> %load38)
+ %call43 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %load, <16 x i32> %load38)
+ %call44 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %load40, <16 x i32> %load42)
+ %call45 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %load40, <16 x i32> %load42)
+ %call46 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %call, <16 x i32> %call44)
+ %call47 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %call, <16 x i32> %call44)
+ %call48 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %call43, <16 x i32> %call45)
+ %call49 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %call43, <16 x i32> %call45)
+ %call50 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %call47, <16 x i32> %call46)
+ %call51 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %call49, <16 x i32> %call48)
+ %getelementptr52 = getelementptr inbounds <16 x i32>, ptr %phi35, i32 1
+ store <16 x i32> %call50, ptr %phi35, align 64
+ %getelementptr53 = getelementptr inbounds <16 x i32>, ptr %phi34, i32 1
+ store <16 x i32> %call51, ptr %phi34, align 64
+ %add54 = add nsw i32 %phi29, 1
+ %icmp55 = icmp slt i32 %add54, %sdiv
+ br i1 %icmp55, label %bb28, label %bb56
+
+bb56: ; preds = %bb28
+ br label %bb57
+
+bb57: ; preds = %bb56, %bb9
+ %add58 = add nsw i32 %phi, 2
+ %icmp59 = icmp slt i32 %add58, %arg2
+ br i1 %icmp59, label %bb9, label %bb60
+
+bb60: ; preds = %bb57
+ br label %bb61
+
+bb61: ; preds = %bb60, %bb
+ ret void
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nocallback nofree nosync nounwind willreturn memory(none) }
diff --git a/llvm/test/CodeGen/Hexagon/loop_align_count.mir b/llvm/test/CodeGen/Hexagon/loop_align_count.mir
new file mode 100644
index 000000000000..6955b525e1ad
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/loop_align_count.mir
@@ -0,0 +1,131 @@
+# RUN: llc -march=hexagon -O3 -run-pass hexagon-loop-align -o - %s\
+# RUN: -debug-only=hexagon-loop-align -verify-machineinstrs 2>&1 | FileCheck %s
+# REQUIRES: asserts
+
+# Test that we only count til endloop instruction and we align this
+# loop to 32.
+# CHECK: Loop Align Pass:
+# CHECK: Instruction Count : 16
+# CHECK: bb.5 (align 32)
+---
+name: fred
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ successors: %bb.1(0x50000000), %bb.8(0x30000000)
+ liveins: $r0, $r1, $r2, $r3, $r4, $r5
+
+ renamable $p0 = C2_cmpgti renamable $r2, 0
+ J2_jumpf killed renamable $p0, %bb.8, implicit-def dead $pc
+ J2_jump %bb.1, implicit-def dead $pc
+
+ bb.1:
+ successors: %bb.2(0x80000000)
+ liveins: $r0, $r1, $r2, $r3, $r4, $r5
+
+ renamable $r7 = A2_addi killed renamable $r2, 1
+ renamable $r8 = S2_asr_i_r renamable $r1, 31
+ renamable $p0 = C2_cmpgti renamable $r1, 63
+ renamable $r2 = S2_asr_i_r renamable $r3, 2
+ renamable $r6 = S2_asr_i_r renamable $r3, 1
+ renamable $r9 = S2_lsr_i_r killed renamable $r7, 1
+ renamable $r1 = S2_lsr_i_r_acc killed renamable $r1, killed renamable $r8, 26
+ renamable $r7 = A2_tfrsi 0
+ renamable $r1 = S2_asr_i_r killed renamable $r1, 6
+ J2_loop1r %bb.2, killed renamable $r9, implicit-def $lc1, implicit-def $sa1
+ renamable $r8 = nsw A2_add renamable $r6, renamable $r2
+
+ bb.2:
+ successors: %bb.3(0x40000000), %bb.7(0x40000000)
+ liveins: $p0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8
+
+ J2_jumpf renamable $p0, %bb.7, implicit-def dead $pc
+ J2_jump %bb.3, implicit-def dead $pc
+
+ bb.3:
+ successors: %bb.4(0x80000000)
+ liveins: $p0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8
+
+ renamable $r13 = exact S2_asr_i_r renamable $r7, 1
+ renamable $r12 = COPY renamable $r4
+ renamable $r9 = COPY renamable $r4
+ renamable $r14 = nsw A2_addi renamable $r7, 1
+ renamable $r15 = nsw M2_mpyi killed renamable $r13, renamable $r3
+ renamable $r9 = M2_maci killed renamable $r9, killed renamable $r14, renamable $r5
+ renamable $r13 = A2_add renamable $r8, renamable $r15
+ renamable $r28 = A2_add renamable $r15, renamable $r2
+ renamable $r10 = A2_add renamable $r15, renamable $r6
+ renamable $r12 = M2_maci killed renamable $r12, renamable $r7, renamable $r5
+ renamable $r13 = S2_addasl_rrri renamable $r0, killed renamable $r13, 1
+ renamable $r14 = S2_addasl_rrri renamable $r0, killed renamable $r15, 1
+ renamable $r15 = S2_addasl_rrri renamable $r0, killed renamable $r28, 1
+ renamable $r28 = S2_addasl_rrri renamable $r0, killed renamable $r10, 1
+
+ bb.4:
+ successors: %bb.5(0x40000000), %bb.6(0x40000000)
+ liveins: $p0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12, $r13, $r14, $r15, $r28
+
+ renamable $v0, renamable $r14 = V6_vL32b_pi killed renamable $r14, 64
+ renamable $p1 = C2_cmpgtui renamable $r1, 1
+ renamable $r10 = A2_addi renamable $r1, -1
+ renamable $v2, renamable $r28 = V6_vL32b_pi killed renamable $r28, 64
+ renamable $v1 = V6_vaddh renamable $v0, renamable $v2
+ renamable $v3, renamable $r15 = V6_vL32b_pi killed renamable $r15, 64
+ renamable $v0 = V6_vsubh killed renamable $v0, killed renamable $v2
+ J2_loop0r %bb.5, killed renamable $r10, implicit-def $lc0, implicit-def $sa0, implicit-def $usr
+ renamable $v4, renamable $r13 = V6_vL32b_pi killed renamable $r13, 64
+ renamable $v2 = V6_vaddh renamable $v3, renamable $v4
+ J2_jumpf killed renamable $p1, %bb.6, implicit-def $pc
+ J2_jump %bb.5, implicit-def $pc
+
+ bb.5:
+ successors: %bb.5(0x7c000000), %bb.6(0x04000000)
+ liveins: $p0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12, $r13, $r14, $r15, $r28, $v0, $v1, $v2, $v3, $v4
+
+ renamable $v3 = V6_vsubh killed renamable $v3, killed renamable $v4
+ renamable $v4, renamable $r14 = V6_vL32b_pi killed renamable $r14, 64
+ renamable $v5 = V6_vnavgh renamable $v1, renamable $v2
+ renamable $v1 = V6_vavgh killed renamable $v1, killed renamable $v2
+ renamable $v2, renamable $r28 = V6_vL32b_pi killed renamable $r28, 64
+ renamable $v1 = V6_vsathub killed renamable $v5, killed renamable $v1
+ renamable $v5 = V6_vnavgh renamable $v0, renamable $v3
+ renamable $v6 = V6_vavgh killed renamable $v0, killed renamable $v3
+ renamable $r12 = V6_vS32b_pi killed renamable $r12, 64, killed renamable $v1
+ renamable $v1 = V6_vaddh renamable $v4, renamable $v2
+ renamable $v3, renamable $r15 = V6_vL32b_pi killed renamable $r15, 64
+ renamable $v0 = V6_vsubh killed renamable $v4, killed renamable $v2
+ renamable $v4, renamable $r13 = V6_vL32b_pi killed renamable $r13, 64
+ renamable $v2 = V6_vaddh renamable $v3, renamable $v4
+ renamable $v5 = V6_vsathub killed renamable $v5, killed renamable $v6
+ renamable $r9 = V6_vS32b_pi killed renamable $r9, 64, killed renamable $v5
+ ENDLOOP0 %bb.5, implicit-def $pc, implicit-def $lc0, implicit $sa0, implicit $lc0
+ J2_jump %bb.6, implicit-def $pc
+
+ bb.6:
+ successors: %bb.7(0x80000000)
+ liveins: $p0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12, $v0, $v1, $v2, $v3, $v4
+
+ renamable $v3 = V6_vsubh killed renamable $v3, killed renamable $v4
+ renamable $v4 = V6_vavgh renamable $v1, renamable $v2
+ renamable $v1 = V6_vnavgh killed renamable $v1, killed renamable $v2
+ renamable $v2 = V6_vavgh renamable $v0, renamable $v3
+ renamable $v0 = V6_vnavgh killed renamable $v0, killed renamable $v3
+ renamable $v1 = V6_vsathub killed renamable $v1, killed renamable $v4
+ dead renamable $r12 = V6_vS32b_pi killed renamable $r12, 64, killed renamable $v1
+ renamable $v0 = V6_vsathub killed renamable $v0, killed renamable $v2
+ dead renamable $r9 = V6_vS32b_pi killed renamable $r9, 64, killed renamable $v0
+ J2_jump %bb.7, implicit-def $pc
+
+ bb.7:
+ successors: %bb.2(0x7c000000), %bb.8(0x04000000)
+ liveins: $p0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8
+
+ renamable $r7 = nsw A2_addi killed renamable $r7, 2
+ ENDLOOP1 %bb.2, implicit-def $pc, implicit-def $lc1, implicit $sa1, implicit $lc1
+ J2_jump %bb.8, implicit-def dead $pc
+
+ bb.8:
+ PS_jmpret $r31, implicit-def dead $pc
+
+...
diff --git a/llvm/test/CodeGen/Hexagon/v6-haar-balign32.ll b/llvm/test/CodeGen/Hexagon/v6-haar-balign32.ll
new file mode 100644
index 000000000000..6b3c0a94a494
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/v6-haar-balign32.ll
@@ -0,0 +1,117 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv73 -O2 -mattr=+hvxv73,hvx-length64b < %s | FileCheck %s
+; CHECK: .p2align{{.*}}5
+
+; Function Attrs: nounwind
+define void @wobble(ptr noalias nocapture readonly %arg, i32 %arg1, i32 %arg2, i32 %arg3, ptr noalias nocapture %arg4, i32 %arg5) #0 {
+bb:
+ %ashr = ashr i32 %arg3, 2
+ %ashr6 = ashr i32 %arg3, 1
+ %add = add nsw i32 %ashr6, %ashr
+ %icmp = icmp sgt i32 %arg2, 0
+ br i1 %icmp, label %bb7, label %bb61
+
+bb7: ; preds = %bb
+ %sdiv = sdiv i32 %arg1, 64
+ %icmp8 = icmp sgt i32 %arg1, 63
+ br label %bb9
+
+bb9: ; preds = %bb57, %bb7
+ %phi = phi i32 [ 0, %bb7 ], [ %add58, %bb57 ]
+ %ashr10 = ashr exact i32 %phi, 1
+ %mul = mul nsw i32 %ashr10, %arg3
+ br i1 %icmp8, label %bb11, label %bb57
+
+bb11: ; preds = %bb9
+ %add12 = add nsw i32 %phi, 1
+ %mul13 = mul nsw i32 %add12, %arg5
+ %mul14 = mul nsw i32 %phi, %arg5
+ %add15 = add i32 %add, %mul
+ %add16 = add i32 %mul, %ashr
+ %add17 = add i32 %mul, %ashr6
+ %getelementptr = getelementptr inbounds i8, ptr %arg4, i32 %mul13
+ %getelementptr18 = getelementptr inbounds i8, ptr %arg4, i32 %mul14
+ %getelementptr19 = getelementptr inbounds i16, ptr %arg, i32 %add15
+ %getelementptr20 = getelementptr inbounds i16, ptr %arg, i32 %add16
+ %getelementptr21 = getelementptr inbounds i16, ptr %arg, i32 %add17
+ %getelementptr22 = getelementptr inbounds i16, ptr %arg, i32 %mul
+ %bitcast = bitcast ptr %getelementptr to ptr
+ %bitcast23 = bitcast ptr %getelementptr18 to ptr
+ %bitcast24 = bitcast ptr %getelementptr19 to ptr
+ %bitcast25 = bitcast ptr %getelementptr20 to ptr
+ %bitcast26 = bitcast ptr %getelementptr21 to ptr
+ %bitcast27 = bitcast ptr %getelementptr22 to ptr
+ br label %bb28
+
+bb28: ; preds = %bb28, %bb11
+ %phi29 = phi i32 [ 0, %bb11 ], [ %add54, %bb28 ]
+ %phi30 = phi ptr [ %bitcast27, %bb11 ], [ %getelementptr36, %bb28 ]
+ %phi31 = phi ptr [ %bitcast26, %bb11 ], [ %getelementptr37, %bb28 ]
+ %phi32 = phi ptr [ %bitcast25, %bb11 ], [ %getelementptr39, %bb28 ]
+ %phi33 = phi ptr [ %bitcast24, %bb11 ], [ %getelementptr41, %bb28 ]
+ %phi34 = phi ptr [ %bitcast, %bb11 ], [ %getelementptr53, %bb28 ]
+ %phi35 = phi ptr [ %bitcast23, %bb11 ], [ %getelementptr52, %bb28 ]
+ %getelementptr36 = getelementptr inbounds <16 x i32>, ptr %phi30, i32 1
+ %load = load <16 x i32>, ptr %phi30, align 64, !tbaa !1
+ %getelementptr37 = getelementptr inbounds <16 x i32>, ptr %phi31, i32 1
+ %load38 = load <16 x i32>, ptr %phi31, align 64, !tbaa !1
+ %getelementptr39 = getelementptr inbounds <16 x i32>, ptr %phi32, i32 1
+ %load40 = load <16 x i32>, ptr %phi32, align 64, !tbaa !1
+ %getelementptr41 = getelementptr inbounds <16 x i32>, ptr %phi33, i32 1
+ %load42 = load <16 x i32>, ptr %phi33, align 64, !tbaa !1
+ %call = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %load, <16 x i32> %load38)
+ %call43 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %load, <16 x i32> %load38)
+ %call44 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %load40, <16 x i32> %load42)
+ %call45 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %load40, <16 x i32> %load42)
+ %call46 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %call, <16 x i32> %call44)
+ %call47 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %call, <16 x i32> %call44)
+ %call48 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %call43, <16 x i32> %call45)
+ %call49 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %call43, <16 x i32> %call45)
+ %call50 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %call47, <16 x i32> %call46)
+ %call51 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %call49, <16 x i32> %call48)
+ %getelementptr52 = getelementptr inbounds <16 x i32>, ptr %phi35, i32 1
+ store <16 x i32> %call50, ptr %phi35, align 64, !tbaa !1
+ %getelementptr53 = getelementptr inbounds <16 x i32>, ptr %phi34, i32 1
+ store <16 x i32> %call51, ptr %phi34, align 64, !tbaa !1
+ %add54 = add nsw i32 %phi29, 1
+ %icmp55 = icmp slt i32 %add54, %sdiv
+ br i1 %icmp55, label %bb28, label %bb56
+
+bb56: ; preds = %bb28
+ br label %bb57
+
+bb57: ; preds = %bb56, %bb9
+ %add58 = add nsw i32 %phi, 2
+ %icmp59 = icmp slt i32 %add58, %arg2
+ br i1 %icmp59, label %bb9, label %bb60
+
+bb60: ; preds = %bb57
+ br label %bb61
+
+bb61: ; preds = %bb60, %bb
+ ret void
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nocallback nofree nosync nounwind willreturn memory(none) }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"Clang 3.1"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
diff --git a/llvm/test/CodeGen/M68k/pipeline.ll b/llvm/test/CodeGen/M68k/pipeline.ll
index dfaa149b7a47..db047da943c5 100644
--- a/llvm/test/CodeGen/M68k/pipeline.ll
+++ b/llvm/test/CodeGen/M68k/pipeline.ll
@@ -76,6 +76,7 @@
; CHECK-NEXT: Peephole Optimizations
; CHECK-NEXT: Remove dead machine instructions
; CHECK-NEXT: Detect Dead Lanes
+; CHECK-NEXT: Init Undef Pass
; CHECK-NEXT: Process Implicit Definitions
; CHECK-NEXT: Remove unreachable machine basic blocks
; CHECK-NEXT: Live Variable Analysis
diff --git a/llvm/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll b/llvm/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
index 0405b25e7fb0..e20fc400f80f 100644
--- a/llvm/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
+++ b/llvm/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
@@ -36,7 +36,7 @@ define i64 @__fixunstfdi(ppc_fp128 %a) nounwind readnone {
; CHECK-NEXT: # %bb.1: # %bb5
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: li 4, 0
-; CHECK-NEXT: b .LBB0_17
+; CHECK-NEXT: b .LBB0_19
; CHECK-NEXT: .LBB0_2: # %bb1
; CHECK-NEXT: lfd 0, 400(1)
; CHECK-NEXT: lis 3, 15856
@@ -99,24 +99,22 @@ define i64 @__fixunstfdi(ppc_fp128 %a) nounwind readnone {
; CHECK-NEXT: fadd 1, 28, 29
; CHECK-NEXT: mtfsf 1, 0
; CHECK-NEXT: lfs 0, .LCPI0_1@l(3)
-; CHECK-NEXT: fctiwz 1, 1
-; CHECK-NEXT: stfd 1, 152(1)
; CHECK-NEXT: fcmpu 0, 28, 27
-; CHECK-NEXT: lwz 3, 164(1)
+; CHECK-NEXT: fctiwz 1, 1
; CHECK-NEXT: fcmpu 1, 29, 0
-; CHECK-NEXT: lwz 4, 156(1)
; CHECK-NEXT: crandc 20, 6, 0
; CHECK-NEXT: cror 20, 5, 20
-; CHECK-NEXT: addis 3, 3, -32768
+; CHECK-NEXT: stfd 1, 152(1)
; CHECK-NEXT: bc 12, 20, .LBB0_4
; CHECK-NEXT: # %bb.3: # %bb1
-; CHECK-NEXT: ori 30, 4, 0
+; CHECK-NEXT: lwz 30, 156(1)
; CHECK-NEXT: b .LBB0_5
-; CHECK-NEXT: .LBB0_4: # %bb1
-; CHECK-NEXT: addi 30, 3, 0
+; CHECK-NEXT: .LBB0_4:
+; CHECK-NEXT: lwz 3, 164(1)
+; CHECK-NEXT: addis 30, 3, -32768
; CHECK-NEXT: .LBB0_5: # %bb1
-; CHECK-NEXT: li 4, 0
; CHECK-NEXT: mr 3, 30
+; CHECK-NEXT: li 4, 0
; CHECK-NEXT: bl __floatditf
; CHECK-NEXT: lis 3, 17392
; CHECK-NEXT: stfd 1, 208(1)
@@ -179,10 +177,10 @@ define i64 @__fixunstfdi(ppc_fp128 %a) nounwind readnone {
; CHECK-NEXT: lwz 3, 168(1)
; CHECK-NEXT: stw 3, 272(1)
; CHECK-NEXT: lfd 31, 272(1)
-; CHECK-NEXT: bc 12, 20, .LBB0_14
+; CHECK-NEXT: bc 12, 20, .LBB0_13
; CHECK-NEXT: # %bb.10: # %bb1
; CHECK-NEXT: cror 20, 1, 3
-; CHECK-NEXT: bc 12, 20, .LBB0_14
+; CHECK-NEXT: bc 12, 20, .LBB0_13
; CHECK-NEXT: # %bb.11: # %bb2
; CHECK-NEXT: fneg 29, 31
; CHECK-NEXT: stfd 29, 48(1)
@@ -223,24 +221,17 @@ define i64 @__fixunstfdi(ppc_fp128 %a) nounwind readnone {
; CHECK-NEXT: fadd 1, 28, 29
; CHECK-NEXT: mtfsf 1, 0
; CHECK-NEXT: lfs 0, .LCPI0_3@l(3)
-; CHECK-NEXT: fctiwz 1, 1
-; CHECK-NEXT: stfd 1, 24(1)
; CHECK-NEXT: fcmpu 0, 30, 2
-; CHECK-NEXT: lwz 3, 36(1)
+; CHECK-NEXT: fctiwz 1, 1
; CHECK-NEXT: fcmpu 1, 31, 0
-; CHECK-NEXT: lwz 4, 28(1)
; CHECK-NEXT: crandc 20, 6, 1
; CHECK-NEXT: cror 20, 4, 20
-; CHECK-NEXT: addis 3, 3, -32768
-; CHECK-NEXT: bc 12, 20, .LBB0_13
+; CHECK-NEXT: stfd 1, 24(1)
+; CHECK-NEXT: bc 12, 20, .LBB0_17
; CHECK-NEXT: # %bb.12: # %bb2
-; CHECK-NEXT: ori 3, 4, 0
-; CHECK-NEXT: b .LBB0_13
-; CHECK-NEXT: .LBB0_13: # %bb2
-; CHECK-NEXT: subfic 4, 3, 0
-; CHECK-NEXT: subfe 3, 29, 30
-; CHECK-NEXT: b .LBB0_17
-; CHECK-NEXT: .LBB0_14: # %bb3
+; CHECK-NEXT: lwz 3, 28(1)
+; CHECK-NEXT: b .LBB0_18
+; CHECK-NEXT: .LBB0_13: # %bb3
; CHECK-NEXT: stfd 31, 112(1)
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: stw 3, 148(1)
@@ -278,22 +269,29 @@ define i64 @__fixunstfdi(ppc_fp128 %a) nounwind readnone {
; CHECK-NEXT: fadd 2, 30, 31
; CHECK-NEXT: mtfsf 1, 0
; CHECK-NEXT: lfs 0, .LCPI0_1@l(3)
-; CHECK-NEXT: fctiwz 2, 2
-; CHECK-NEXT: stfd 2, 88(1)
; CHECK-NEXT: fcmpu 0, 30, 1
-; CHECK-NEXT: lwz 3, 100(1)
+; CHECK-NEXT: fctiwz 1, 2
; CHECK-NEXT: fcmpu 1, 31, 0
-; CHECK-NEXT: lwz 4, 92(1)
; CHECK-NEXT: crandc 20, 6, 0
; CHECK-NEXT: cror 20, 5, 20
-; CHECK-NEXT: addis 3, 3, -32768
+; CHECK-NEXT: stfd 1, 88(1)
; CHECK-NEXT: bc 12, 20, .LBB0_15
+; CHECK-NEXT: # %bb.14: # %bb3
+; CHECK-NEXT: lwz 4, 92(1)
; CHECK-NEXT: b .LBB0_16
-; CHECK-NEXT: .LBB0_15: # %bb3
-; CHECK-NEXT: addi 4, 3, 0
+; CHECK-NEXT: .LBB0_15:
+; CHECK-NEXT: lwz 3, 100(1)
+; CHECK-NEXT: addis 4, 3, -32768
; CHECK-NEXT: .LBB0_16: # %bb3
; CHECK-NEXT: mr 3, 30
-; CHECK-NEXT: .LBB0_17: # %bb5
+; CHECK-NEXT: b .LBB0_19
+; CHECK-NEXT: .LBB0_17:
+; CHECK-NEXT: lwz 3, 36(1)
+; CHECK-NEXT: addis 3, 3, -32768
+; CHECK-NEXT: .LBB0_18: # %bb2
+; CHECK-NEXT: subfic 4, 3, 0
+; CHECK-NEXT: subfe 3, 29, 30
+; CHECK-NEXT: .LBB0_19: # %bb3
; CHECK-NEXT: lfd 31, 456(1) # 8-byte Folded Reload
; CHECK-NEXT: lfd 30, 448(1) # 8-byte Folded Reload
; CHECK-NEXT: lfd 29, 440(1) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-gd-double.ll b/llvm/test/CodeGen/PowerPC/aix-tls-gd-double.ll
index c0ffb8154c69..ae41b6b13010 100644
--- a/llvm/test/CodeGen/PowerPC/aix-tls-gd-double.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-gd-double.ll
@@ -156,11 +156,11 @@ define void @storesTIInit(double %Val) #0 {
; SMALL32: # %bb.0: # %entry
; SMALL32-NEXT: mflr 0
; SMALL32-NEXT: stwu 1, -32(1)
-; SMALL32-NEXT: lwz 3, L..C4(2) # target-flags(ppc-tlsgdm) @TIInit
-; SMALL32-NEXT: lwz 4, L..C5(2) # target-flags(ppc-tlsgd) @TIInit
+; SMALL32-NEXT: lwz 3, L..C4(2) # target-flags(ppc-tlsldm) @"_$TLSML"
; SMALL32-NEXT: stw 0, 40(1)
-; SMALL32-NEXT: bla .__tls_get_addr[PR]
-; SMALL32-NEXT: stfd 1, 0(3)
+; SMALL32-NEXT: bla .__tls_get_mod[PR]
+; SMALL32-NEXT: lwz 4, L..C5(2) # target-flags(ppc-tlsld) @TIInit
+; SMALL32-NEXT: stfdx 1, 3, 4
; SMALL32-NEXT: addi 1, 1, 32
; SMALL32-NEXT: lwz 0, 8(1)
; SMALL32-NEXT: mtlr 0
@@ -171,12 +171,12 @@ define void @storesTIInit(double %Val) #0 {
; LARGE32-NEXT: mflr 0
; LARGE32-NEXT: stwu 1, -32(1)
; LARGE32-NEXT: stw 0, 40(1)
-; LARGE32-NEXT: addis 3, L..C4@u(2)
-; LARGE32-NEXT: addis 4, L..C5@u(2)
-; LARGE32-NEXT: lwz 3, L..C4@l(3)
-; LARGE32-NEXT: lwz 4, L..C5@l(4)
-; LARGE32-NEXT: bla .__tls_get_addr[PR]
-; LARGE32-NEXT: stfd 1, 0(3)
+; LARGE32-NEXT: addis 6, L..C4@u(2)
+; LARGE32-NEXT: addis 3, L..C5@u(2)
+; LARGE32-NEXT: lwz 3, L..C5@l(3)
+; LARGE32-NEXT: bla .__tls_get_mod[PR]
+; LARGE32-NEXT: lwz 4, L..C4@l(6)
+; LARGE32-NEXT: stfdx 1, 3, 4
; LARGE32-NEXT: addi 1, 1, 32
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
@@ -186,11 +186,11 @@ define void @storesTIInit(double %Val) #0 {
; SMALL64: # %bb.0: # %entry
; SMALL64-NEXT: mflr 0
; SMALL64-NEXT: stdu 1, -48(1)
-; SMALL64-NEXT: ld 3, L..C4(2) # target-flags(ppc-tlsgdm) @TIInit
-; SMALL64-NEXT: ld 4, L..C5(2) # target-flags(ppc-tlsgd) @TIInit
+; SMALL64-NEXT: ld 3, L..C4(2) # target-flags(ppc-tlsldm) @"_$TLSML"
; SMALL64-NEXT: std 0, 64(1)
-; SMALL64-NEXT: bla .__tls_get_addr[PR]
-; SMALL64-NEXT: stfd 1, 0(3)
+; SMALL64-NEXT: bla .__tls_get_mod[PR]
+; SMALL64-NEXT: ld 4, L..C5(2) # target-flags(ppc-tlsld) @TIInit
+; SMALL64-NEXT: stfdx 1, 3, 4
; SMALL64-NEXT: addi 1, 1, 48
; SMALL64-NEXT: ld 0, 16(1)
; SMALL64-NEXT: mtlr 0
@@ -201,12 +201,12 @@ define void @storesTIInit(double %Val) #0 {
; LARGE64-NEXT: mflr 0
; LARGE64-NEXT: stdu 1, -48(1)
; LARGE64-NEXT: addis 3, L..C4@u(2)
-; LARGE64-NEXT: addis 4, L..C5@u(2)
; LARGE64-NEXT: std 0, 64(1)
+; LARGE64-NEXT: addis 6, L..C5@u(2)
; LARGE64-NEXT: ld 3, L..C4@l(3)
-; LARGE64-NEXT: ld 4, L..C5@l(4)
-; LARGE64-NEXT: bla .__tls_get_addr[PR]
-; LARGE64-NEXT: stfd 1, 0(3)
+; LARGE64-NEXT: bla .__tls_get_mod[PR]
+; LARGE64-NEXT: ld 4, L..C5@l(6)
+; LARGE64-NEXT: stfdx 1, 3, 4
; LARGE64-NEXT: addi 1, 1, 48
; LARGE64-NEXT: ld 0, 16(1)
; LARGE64-NEXT: mtlr 0
@@ -452,13 +452,13 @@ define double @loadsTIInit() #1 {
; SMALL32: # %bb.0: # %entry
; SMALL32-NEXT: mflr 0
; SMALL32-NEXT: stwu 1, -32(1)
-; SMALL32-NEXT: lwz 3, L..C4(2) # target-flags(ppc-tlsgdm) @TIInit
-; SMALL32-NEXT: lwz 4, L..C5(2) # target-flags(ppc-tlsgd) @TIInit
+; SMALL32-NEXT: lwz 3, L..C4(2) # target-flags(ppc-tlsldm) @"_$TLSML"
; SMALL32-NEXT: stw 0, 40(1)
-; SMALL32-NEXT: bla .__tls_get_addr[PR]
-; SMALL32-NEXT: lwz 4, L..C8(2) # @GInit
-; SMALL32-NEXT: lfd 0, 0(3)
-; SMALL32-NEXT: lfd 1, 0(4)
+; SMALL32-NEXT: bla .__tls_get_mod[PR]
+; SMALL32-NEXT: lwz 4, L..C5(2) # target-flags(ppc-tlsld) @TIInit
+; SMALL32-NEXT: lfdx 0, 3, 4
+; SMALL32-NEXT: lwz 3, L..C8(2) # @GInit
+; SMALL32-NEXT: lfd 1, 0(3)
; SMALL32-NEXT: fadd 1, 0, 1
; SMALL32-NEXT: addi 1, 1, 32
; SMALL32-NEXT: lwz 0, 8(1)
@@ -470,12 +470,12 @@ define double @loadsTIInit() #1 {
; LARGE32-NEXT: mflr 0
; LARGE32-NEXT: stwu 1, -32(1)
; LARGE32-NEXT: stw 0, 40(1)
-; LARGE32-NEXT: addis 3, L..C4@u(2)
-; LARGE32-NEXT: addis 4, L..C5@u(2)
-; LARGE32-NEXT: lwz 3, L..C4@l(3)
-; LARGE32-NEXT: lwz 4, L..C5@l(4)
-; LARGE32-NEXT: bla .__tls_get_addr[PR]
-; LARGE32-NEXT: lfd 0, 0(3)
+; LARGE32-NEXT: addis 6, L..C4@u(2)
+; LARGE32-NEXT: addis 3, L..C5@u(2)
+; LARGE32-NEXT: lwz 3, L..C5@l(3)
+; LARGE32-NEXT: bla .__tls_get_mod[PR]
+; LARGE32-NEXT: lwz 4, L..C4@l(6)
+; LARGE32-NEXT: lfdx 0, 3, 4
; LARGE32-NEXT: addis 3, L..C8@u(2)
; LARGE32-NEXT: lwz 3, L..C8@l(3)
; LARGE32-NEXT: lfd 1, 0(3)
@@ -489,13 +489,13 @@ define double @loadsTIInit() #1 {
; SMALL64: # %bb.0: # %entry
; SMALL64-NEXT: mflr 0
; SMALL64-NEXT: stdu 1, -48(1)
-; SMALL64-NEXT: ld 3, L..C4(2) # target-flags(ppc-tlsgdm) @TIInit
-; SMALL64-NEXT: ld 4, L..C5(2) # target-flags(ppc-tlsgd) @TIInit
+; SMALL64-NEXT: ld 3, L..C4(2) # target-flags(ppc-tlsldm) @"_$TLSML"
; SMALL64-NEXT: std 0, 64(1)
-; SMALL64-NEXT: bla .__tls_get_addr[PR]
-; SMALL64-NEXT: ld 4, L..C8(2) # @GInit
-; SMALL64-NEXT: lfd 0, 0(3)
-; SMALL64-NEXT: lfd 1, 0(4)
+; SMALL64-NEXT: bla .__tls_get_mod[PR]
+; SMALL64-NEXT: ld 4, L..C5(2) # target-flags(ppc-tlsld) @TIInit
+; SMALL64-NEXT: lfdx 0, 3, 4
+; SMALL64-NEXT: ld 3, L..C8(2) # @GInit
+; SMALL64-NEXT: lfd 1, 0(3)
; SMALL64-NEXT: fadd 1, 0, 1
; SMALL64-NEXT: addi 1, 1, 48
; SMALL64-NEXT: ld 0, 16(1)
@@ -507,14 +507,14 @@ define double @loadsTIInit() #1 {
; LARGE64-NEXT: mflr 0
; LARGE64-NEXT: stdu 1, -48(1)
; LARGE64-NEXT: addis 3, L..C4@u(2)
-; LARGE64-NEXT: addis 4, L..C5@u(2)
; LARGE64-NEXT: std 0, 64(1)
+; LARGE64-NEXT: addis 6, L..C5@u(2)
; LARGE64-NEXT: ld 3, L..C4@l(3)
-; LARGE64-NEXT: ld 4, L..C5@l(4)
-; LARGE64-NEXT: bla .__tls_get_addr[PR]
-; LARGE64-NEXT: addis 4, L..C8@u(2)
-; LARGE64-NEXT: lfd 0, 0(3)
-; LARGE64-NEXT: ld 3, L..C8@l(4)
+; LARGE64-NEXT: bla .__tls_get_mod[PR]
+; LARGE64-NEXT: ld 4, L..C5@l(6)
+; LARGE64-NEXT: addis 5, L..C8@u(2)
+; LARGE64-NEXT: lfdx 0, 3, 4
+; LARGE64-NEXT: ld 3, L..C8@l(5)
; LARGE64-NEXT: lfd 1, 0(3)
; LARGE64-NEXT: fadd 1, 0, 1
; LARGE64-NEXT: addi 1, 1, 48
@@ -610,12 +610,16 @@ entry:
ret double %add
}
-; External symbol reference checks for .__tls_get_addr
+; External symbol reference checks for .__tls_get_addr/.__tls_get_mod
; SMALL32: .extern .__tls_get_addr[PR]
+; SMALL32: .extern .__tls_get_mod[PR]
; SMALL64: .extern .__tls_get_addr[PR]
+; SMALL64: .extern .__tls_get_mod[PR]
; LARGE32: .extern .__tls_get_addr[PR]
+; LARGE32: .extern .__tls_get_mod[PR]
; LARGE64: .extern .__tls_get_addr[PR]
+; LARGE64: .extern .__tls_get_mod[PR]
; TOC entry checks
@@ -629,9 +633,10 @@ entry:
; SMALL32-LABEL: L..C3:
; SMALL32-NEXT: .tc TGInit[TC],TGInit[TL]@gd
; SMALL32-LABEL: L..C4:
-; SMALL32-NEXT: .tc .TIInit[TC],TIInit[TL]@m
+; SMALL32-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; SMALL32-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; SMALL32-LABEL: L..C5:
-; SMALL32-NEXT: .tc TIInit[TC],TIInit[TL]@gd
+; SMALL32-NEXT: .tc TIInit[TC],TIInit[TL]@ld
; SMALL32-LABEL: L..C6:
; SMALL32-NEXT: .tc .TWInit[TC],TWInit[TL]@m
; SMALL32-LABEL: L..C7:
@@ -649,9 +654,10 @@ entry:
; LARGE32-LABEL: L..C3:
; LARGE32-NEXT: .tc TGInit[TE],TGInit[TL]@gd
; LARGE32-LABEL: L..C4:
-; LARGE32-NEXT: .tc .TIInit[TE],TIInit[TL]@m
+; LARGE32-NEXT: .tc TIInit[TE],TIInit[TL]@ld
; LARGE32-LABEL: L..C5:
-; LARGE32-NEXT: .tc TIInit[TE],TIInit[TL]@gd
+; LARGE32-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; LARGE32-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; LARGE32-LABEL: L..C6:
; LARGE32-NEXT: .tc .TWInit[TE],TWInit[TL]@m
; LARGE32-LABEL: L..C7:
@@ -669,9 +675,10 @@ entry:
; SMALL64-LABEL: L..C3:
; SMALL64-NEXT: .tc TGInit[TC],TGInit[TL]@gd
; SMALL64-LABEL: L..C4:
-; SMALL64-NEXT: .tc .TIInit[TC],TIInit[TL]@m
+; SMALL64-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; SMALL64-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; SMALL64-LABEL: L..C5:
-; SMALL64-NEXT: .tc TIInit[TC],TIInit[TL]@gd
+; SMALL64-NEXT: .tc TIInit[TC],TIInit[TL]@ld
; SMALL64-LABEL: L..C6:
; SMALL64-NEXT: .tc .TWInit[TC],TWInit[TL]@m
; SMALL64-LABEL: L..C7:
@@ -689,9 +696,10 @@ entry:
; LARGE64-LABEL: L..C3:
; LARGE64-NEXT: .tc TGInit[TE],TGInit[TL]@gd
; LARGE64-LABEL: L..C4:
-; LARGE64-NEXT: .tc .TIInit[TE],TIInit[TL]@m
+; LARGE64-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; LARGE64-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; LARGE64-LABEL: L..C5:
-; LARGE64-NEXT: .tc TIInit[TE],TIInit[TL]@gd
+; LARGE64-NEXT: .tc TIInit[TE],TIInit[TL]@ld
; LARGE64-LABEL: L..C6:
; LARGE64-NEXT: .tc .TWInit[TE],TWInit[TL]@m
; LARGE64-LABEL: L..C7:
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-gd-int.ll b/llvm/test/CodeGen/PowerPC/aix-tls-gd-int.ll
index 887c4521a4c9..bbb8e04b67b9 100644
--- a/llvm/test/CodeGen/PowerPC/aix-tls-gd-int.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-gd-int.ll
@@ -165,10 +165,10 @@ define void @storesTIUninit(i32 %Val) #0 {
; SMALL32-NEXT: stwu 1, -32(1)
; SMALL32-NEXT: mr 6, 3
; SMALL32-NEXT: lwz 3, L..C4(2)
-; SMALL32-NEXT: lwz 4, L..C5(2)
; SMALL32-NEXT: stw 0, 40(1)
-; SMALL32-NEXT: bla .__tls_get_addr[PR]
-; SMALL32-NEXT: stw 6, 0(3)
+; SMALL32-NEXT: bla .__tls_get_mod[PR]
+; SMALL32-NEXT: lwz 4, L..C5(2)
+; SMALL32-NEXT: stwx 6, 3, 4
; SMALL32-NEXT: addi 1, 1, 32
; SMALL32-NEXT: lwz 0, 8(1)
; SMALL32-NEXT: mtlr 0
@@ -180,12 +180,12 @@ define void @storesTIUninit(i32 %Val) #0 {
; LARGE32-NEXT: stwu 1, -32(1)
; LARGE32-NEXT: stw 0, 40(1)
; LARGE32-NEXT: mr 6, 3
-; LARGE32-NEXT: addis 3, L..C4@u(2)
-; LARGE32-NEXT: addis 4, L..C5@u(2)
-; LARGE32-NEXT: lwz 3, L..C4@l(3)
-; LARGE32-NEXT: lwz 4, L..C5@l(4)
-; LARGE32-NEXT: bla .__tls_get_addr[PR]
-; LARGE32-NEXT: stw 6, 0(3)
+; LARGE32-NEXT: addis 7, L..C4@u(2)
+; LARGE32-NEXT: addis 3, L..C5@u(2)
+; LARGE32-NEXT: lwz 3, L..C5@l(3)
+; LARGE32-NEXT: bla .__tls_get_mod[PR]
+; LARGE32-NEXT: lwz 4, L..C4@l(7)
+; LARGE32-NEXT: stwx 6, 3, 4
; LARGE32-NEXT: addi 1, 1, 32
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
@@ -197,10 +197,10 @@ define void @storesTIUninit(i32 %Val) #0 {
; SMALL64-NEXT: stdu 1, -48(1)
; SMALL64-NEXT: mr 6, 3
; SMALL64-NEXT: ld 3, L..C4(2)
-; SMALL64-NEXT: ld 4, L..C5(2)
; SMALL64-NEXT: std 0, 64(1)
-; SMALL64-NEXT: bla .__tls_get_addr[PR]
-; SMALL64-NEXT: stw 6, 0(3)
+; SMALL64-NEXT: bla .__tls_get_mod[PR]
+; SMALL64-NEXT: ld 4, L..C5(2)
+; SMALL64-NEXT: stwx 6, 3, 4
; SMALL64-NEXT: addi 1, 1, 48
; SMALL64-NEXT: ld 0, 16(1)
; SMALL64-NEXT: mtlr 0
@@ -212,12 +212,12 @@ define void @storesTIUninit(i32 %Val) #0 {
; LARGE64-NEXT: stdu 1, -48(1)
; LARGE64-NEXT: mr 6, 3
; LARGE64-NEXT: addis 3, L..C4@u(2)
-; LARGE64-NEXT: addis 4, L..C5@u(2)
; LARGE64-NEXT: std 0, 64(1)
+; LARGE64-NEXT: addis 7, L..C5@u(2)
; LARGE64-NEXT: ld 3, L..C4@l(3)
-; LARGE64-NEXT: ld 4, L..C5@l(4)
-; LARGE64-NEXT: bla .__tls_get_addr[PR]
-; LARGE64-NEXT: stw 6, 0(3)
+; LARGE64-NEXT: bla .__tls_get_mod[PR]
+; LARGE64-NEXT: ld 4, L..C5@l(7)
+; LARGE64-NEXT: stwx 6, 3, 4
; LARGE64-NEXT: addi 1, 1, 48
; LARGE64-NEXT: ld 0, 16(1)
; LARGE64-NEXT: mtlr 0
@@ -468,11 +468,11 @@ define i32 @loadsTIUninit() #1 {
; SMALL32-NEXT: mflr 0
; SMALL32-NEXT: stwu 1, -32(1)
; SMALL32-NEXT: lwz 3, L..C4(2)
-; SMALL32-NEXT: lwz 4, L..C5(2)
; SMALL32-NEXT: stw 0, 40(1)
-; SMALL32-NEXT: bla .__tls_get_addr[PR]
+; SMALL32-NEXT: bla .__tls_get_mod[PR]
+; SMALL32-NEXT: lwz 4, L..C5(2)
+; SMALL32-NEXT: lwzx 3, 3, 4
; SMALL32-NEXT: lwz 4, L..C8(2)
-; SMALL32-NEXT: lwz 3, 0(3)
; SMALL32-NEXT: lwz 4, 0(4)
; SMALL32-NEXT: add 3, 4, 3
; SMALL32-NEXT: addi 1, 1, 32
@@ -485,12 +485,12 @@ define i32 @loadsTIUninit() #1 {
; LARGE32-NEXT: mflr 0
; LARGE32-NEXT: stwu 1, -32(1)
; LARGE32-NEXT: stw 0, 40(1)
-; LARGE32-NEXT: addis 3, L..C4@u(2)
-; LARGE32-NEXT: addis 4, L..C5@u(2)
-; LARGE32-NEXT: lwz 3, L..C4@l(3)
-; LARGE32-NEXT: lwz 4, L..C5@l(4)
-; LARGE32-NEXT: bla .__tls_get_addr[PR]
-; LARGE32-NEXT: lwz 3, 0(3)
+; LARGE32-NEXT: addis 6, L..C4@u(2)
+; LARGE32-NEXT: addis 3, L..C5@u(2)
+; LARGE32-NEXT: lwz 3, L..C5@l(3)
+; LARGE32-NEXT: bla .__tls_get_mod[PR]
+; LARGE32-NEXT: lwz 4, L..C4@l(6)
+; LARGE32-NEXT: lwzx 3, 3, 4
; LARGE32-NEXT: addis 4, L..C8@u(2)
; LARGE32-NEXT: lwz 4, L..C8@l(4)
; LARGE32-NEXT: lwz 4, 0(4)
@@ -505,11 +505,11 @@ define i32 @loadsTIUninit() #1 {
; SMALL64-NEXT: mflr 0
; SMALL64-NEXT: stdu 1, -48(1)
; SMALL64-NEXT: ld 3, L..C4(2)
-; SMALL64-NEXT: ld 4, L..C5(2)
; SMALL64-NEXT: std 0, 64(1)
-; SMALL64-NEXT: bla .__tls_get_addr[PR]
+; SMALL64-NEXT: bla .__tls_get_mod[PR]
+; SMALL64-NEXT: ld 4, L..C5(2)
+; SMALL64-NEXT: lwzx 3, 3, 4
; SMALL64-NEXT: ld 4, L..C8(2)
-; SMALL64-NEXT: lwz 3, 0(3)
; SMALL64-NEXT: lwz 4, 0(4)
; SMALL64-NEXT: add 3, 4, 3
; SMALL64-NEXT: addi 1, 1, 48
@@ -522,14 +522,14 @@ define i32 @loadsTIUninit() #1 {
; LARGE64-NEXT: mflr 0
; LARGE64-NEXT: stdu 1, -48(1)
; LARGE64-NEXT: addis 3, L..C4@u(2)
-; LARGE64-NEXT: addis 4, L..C5@u(2)
; LARGE64-NEXT: std 0, 64(1)
+; LARGE64-NEXT: addis 6, L..C5@u(2)
; LARGE64-NEXT: ld 3, L..C4@l(3)
-; LARGE64-NEXT: ld 4, L..C5@l(4)
-; LARGE64-NEXT: bla .__tls_get_addr[PR]
-; LARGE64-NEXT: addis 4, L..C8@u(2)
-; LARGE64-NEXT: lwz 3, 0(3)
-; LARGE64-NEXT: ld 4, L..C8@l(4)
+; LARGE64-NEXT: bla .__tls_get_mod[PR]
+; LARGE64-NEXT: ld 4, L..C5@l(6)
+; LARGE64-NEXT: addis 5, L..C8@u(2)
+; LARGE64-NEXT: lwzx 3, 3, 4
+; LARGE64-NEXT: ld 4, L..C8@l(5)
; LARGE64-NEXT: lwz 4, 0(4)
; LARGE64-NEXT: add 3, 4, 3
; LARGE64-NEXT: addi 1, 1, 48
@@ -625,12 +625,16 @@ entry:
ret i32 %add
}
-; External symbol reference checks for .__tls_get_addr
+; External symbol reference checks for .__tls_get_addr/.__tls_get_mod
; SMALL32: .extern .__tls_get_addr[PR]
+; SMALL32: .extern .__tls_get_mod[PR]
; SMALL64: .extern .__tls_get_addr[PR]
+; SMALL64: .extern .__tls_get_mod[PR]
; LARGE32: .extern .__tls_get_addr[PR]
+; LARGE32: .extern .__tls_get_mod[PR]
; LARGE64: .extern .__tls_get_addr[PR]
+; LARGE64: .extern .__tls_get_mod[PR]
; TOC entry checks
@@ -644,9 +648,10 @@ entry:
; SMALL32-LABEL: L..C3:
; SMALL32-NEXT: .tc TGInit[TC],TGInit[TL]@gd
; SMALL32-LABEL: L..C4:
-; SMALL32-NEXT: .tc .TIUninit[TC],TIUninit[UL]@m
+; SMALL32-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; SMALL32-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; SMALL32-LABEL: L..C5:
-; SMALL32-NEXT: .tc TIUninit[TC],TIUninit[UL]@gd
+; SMALL32-NEXT: .tc TIUninit[TC],TIUninit[UL]@ld
; SMALL32-LABEL: L..C6:
; SMALL32-NEXT: .tc .TWUninit[TC],TWUninit[TL]@m
; SMALL32-LABEL: L..C7:
@@ -664,9 +669,10 @@ entry:
; LARGE32-LABEL: L..C3:
; LARGE32-NEXT: .tc TGInit[TE],TGInit[TL]@gd
; LARGE32-LABEL: L..C4:
-; LARGE32-NEXT: .tc .TIUninit[TE],TIUninit[UL]@m
+; LARGE32-NEXT: .tc TIUninit[TE],TIUninit[UL]@ld
; LARGE32-LABEL: L..C5:
-; LARGE32-NEXT: .tc TIUninit[TE],TIUninit[UL]@gd
+; LARGE32-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; LARGE32-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; LARGE32-LABEL: L..C6:
; LARGE32-NEXT: .tc .TWUninit[TE],TWUninit[TL]@m
; LARGE32-LABEL: L..C7:
@@ -684,9 +690,10 @@ entry:
; SMALL64-LABEL: L..C3:
; SMALL64-NEXT: .tc TGInit[TC],TGInit[TL]@gd
; SMALL64-LABEL: L..C4:
-; SMALL64-NEXT: .tc .TIUninit[TC],TIUninit[UL]@m
+; SMALL64-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; SMALL64-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; SMALL64-LABEL: L..C5:
-; SMALL64-NEXT: .tc TIUninit[TC],TIUninit[UL]@gd
+; SMALL64-NEXT: .tc TIUninit[TC],TIUninit[UL]@ld
; SMALL64-LABEL: L..C6:
; SMALL64-NEXT: .tc .TWUninit[TC],TWUninit[TL]@m
; SMALL64-LABEL: L..C7:
@@ -704,9 +711,10 @@ entry:
; LARGE64-LABEL: L..C3:
; LARGE64-NEXT: .tc TGInit[TE],TGInit[TL]@gd
; LARGE64-LABEL: L..C4:
-; LARGE64-NEXT: .tc .TIUninit[TE],TIUninit[UL]@m
+; LARGE64-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; LARGE64-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; LARGE64-LABEL: L..C5:
-; LARGE64-NEXT: .tc TIUninit[TE],TIUninit[UL]@gd
+; LARGE64-NEXT: .tc TIUninit[TE],TIUninit[UL]@ld
; LARGE64-LABEL: L..C6:
; LARGE64-NEXT: .tc .TWUninit[TE],TWUninit[TL]@m
; LARGE64-LABEL: L..C7:
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-gd-longlong.ll b/llvm/test/CodeGen/PowerPC/aix-tls-gd-longlong.ll
index 47813b59ba80..ff087a214448 100644
--- a/llvm/test/CodeGen/PowerPC/aix-tls-gd-longlong.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-gd-longlong.ll
@@ -97,14 +97,14 @@ define void @storesTIUninit(i64 %Val) #0 {
; SMALL32: # %bb.0: # %entry
; SMALL32-NEXT: mflr 0
; SMALL32-NEXT: stwu 1, -32(1)
-; SMALL32-NEXT: mr 6, 4
; SMALL32-NEXT: mr 7, 3
; SMALL32-NEXT: lwz 3, L..C2(2)
-; SMALL32-NEXT: lwz 4, L..C3(2)
; SMALL32-NEXT: stw 0, 40(1)
-; SMALL32-NEXT: bla .__tls_get_addr[PR]
+; SMALL32-NEXT: mr 6, 4
+; SMALL32-NEXT: bla .__tls_get_mod[PR]
+; SMALL32-NEXT: lwz 4, L..C3(2)
+; SMALL32-NEXT: stwux 7, 3, 4
; SMALL32-NEXT: stw 6, 4(3)
-; SMALL32-NEXT: stw 7, 0(3)
; SMALL32-NEXT: addi 1, 1, 32
; SMALL32-NEXT: lwz 0, 8(1)
; SMALL32-NEXT: mtlr 0
@@ -115,15 +115,15 @@ define void @storesTIUninit(i64 %Val) #0 {
; LARGE32-NEXT: mflr 0
; LARGE32-NEXT: stwu 1, -32(1)
; LARGE32-NEXT: stw 0, 40(1)
-; LARGE32-NEXT: mr 6, 4
; LARGE32-NEXT: mr 7, 3
-; LARGE32-NEXT: addis 3, L..C2@u(2)
-; LARGE32-NEXT: addis 4, L..C3@u(2)
-; LARGE32-NEXT: lwz 3, L..C2@l(3)
-; LARGE32-NEXT: lwz 4, L..C3@l(4)
-; LARGE32-NEXT: bla .__tls_get_addr[PR]
+; LARGE32-NEXT: mr 6, 4
+; LARGE32-NEXT: addis 8, L..C2@u(2)
+; LARGE32-NEXT: addis 3, L..C3@u(2)
+; LARGE32-NEXT: lwz 3, L..C3@l(3)
+; LARGE32-NEXT: bla .__tls_get_mod[PR]
+; LARGE32-NEXT: lwz 4, L..C2@l(8)
+; LARGE32-NEXT: stwux 7, 3, 4
; LARGE32-NEXT: stw 6, 4(3)
-; LARGE32-NEXT: stw 7, 0(3)
; LARGE32-NEXT: addi 1, 1, 32
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
@@ -135,10 +135,10 @@ define void @storesTIUninit(i64 %Val) #0 {
; SMALL64-NEXT: stdu 1, -48(1)
; SMALL64-NEXT: mr 6, 3
; SMALL64-NEXT: ld 3, L..C2(2)
-; SMALL64-NEXT: ld 4, L..C3(2)
; SMALL64-NEXT: std 0, 64(1)
-; SMALL64-NEXT: bla .__tls_get_addr[PR]
-; SMALL64-NEXT: std 6, 0(3)
+; SMALL64-NEXT: bla .__tls_get_mod[PR]
+; SMALL64-NEXT: ld 4, L..C3(2)
+; SMALL64-NEXT: stdx 6, 3, 4
; SMALL64-NEXT: addi 1, 1, 48
; SMALL64-NEXT: ld 0, 16(1)
; SMALL64-NEXT: mtlr 0
@@ -150,12 +150,12 @@ define void @storesTIUninit(i64 %Val) #0 {
; LARGE64-NEXT: stdu 1, -48(1)
; LARGE64-NEXT: mr 6, 3
; LARGE64-NEXT: addis 3, L..C2@u(2)
-; LARGE64-NEXT: addis 4, L..C3@u(2)
; LARGE64-NEXT: std 0, 64(1)
+; LARGE64-NEXT: addis 7, L..C3@u(2)
; LARGE64-NEXT: ld 3, L..C2@l(3)
-; LARGE64-NEXT: ld 4, L..C3@l(4)
-; LARGE64-NEXT: bla .__tls_get_addr[PR]
-; LARGE64-NEXT: std 6, 0(3)
+; LARGE64-NEXT: bla .__tls_get_mod[PR]
+; LARGE64-NEXT: ld 4, L..C3@l(7)
+; LARGE64-NEXT: stdx 6, 3, 4
; LARGE64-NEXT: addi 1, 1, 48
; LARGE64-NEXT: ld 0, 16(1)
; LARGE64-NEXT: mtlr 0
@@ -171,14 +171,14 @@ define void @storesTIInit(i64 %Val) #0 {
; SMALL32: # %bb.0: # %entry
; SMALL32-NEXT: mflr 0
; SMALL32-NEXT: stwu 1, -32(1)
-; SMALL32-NEXT: mr 6, 4
; SMALL32-NEXT: mr 7, 3
-; SMALL32-NEXT: lwz 3, L..C4(2)
-; SMALL32-NEXT: lwz 4, L..C5(2)
+; SMALL32-NEXT: lwz 3, L..C2(2)
; SMALL32-NEXT: stw 0, 40(1)
-; SMALL32-NEXT: bla .__tls_get_addr[PR]
+; SMALL32-NEXT: mr 6, 4
+; SMALL32-NEXT: bla .__tls_get_mod[PR]
+; SMALL32-NEXT: lwz 4, L..C4(2)
+; SMALL32-NEXT: stwux 7, 3, 4
; SMALL32-NEXT: stw 6, 4(3)
-; SMALL32-NEXT: stw 7, 0(3)
; SMALL32-NEXT: addi 1, 1, 32
; SMALL32-NEXT: lwz 0, 8(1)
; SMALL32-NEXT: mtlr 0
@@ -189,15 +189,15 @@ define void @storesTIInit(i64 %Val) #0 {
; LARGE32-NEXT: mflr 0
; LARGE32-NEXT: stwu 1, -32(1)
; LARGE32-NEXT: stw 0, 40(1)
-; LARGE32-NEXT: mr 6, 4
; LARGE32-NEXT: mr 7, 3
-; LARGE32-NEXT: addis 3, L..C4@u(2)
-; LARGE32-NEXT: addis 4, L..C5@u(2)
-; LARGE32-NEXT: lwz 3, L..C4@l(3)
-; LARGE32-NEXT: lwz 4, L..C5@l(4)
-; LARGE32-NEXT: bla .__tls_get_addr[PR]
+; LARGE32-NEXT: mr 6, 4
+; LARGE32-NEXT: addis 8, L..C4@u(2)
+; LARGE32-NEXT: addis 3, L..C3@u(2)
+; LARGE32-NEXT: lwz 3, L..C3@l(3)
+; LARGE32-NEXT: bla .__tls_get_mod[PR]
+; LARGE32-NEXT: lwz 4, L..C4@l(8)
+; LARGE32-NEXT: stwux 7, 3, 4
; LARGE32-NEXT: stw 6, 4(3)
-; LARGE32-NEXT: stw 7, 0(3)
; LARGE32-NEXT: addi 1, 1, 32
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
@@ -208,11 +208,11 @@ define void @storesTIInit(i64 %Val) #0 {
; SMALL64-NEXT: mflr 0
; SMALL64-NEXT: stdu 1, -48(1)
; SMALL64-NEXT: mr 6, 3
-; SMALL64-NEXT: ld 3, L..C4(2)
-; SMALL64-NEXT: ld 4, L..C5(2)
+; SMALL64-NEXT: ld 3, L..C2(2)
; SMALL64-NEXT: std 0, 64(1)
-; SMALL64-NEXT: bla .__tls_get_addr[PR]
-; SMALL64-NEXT: std 6, 0(3)
+; SMALL64-NEXT: bla .__tls_get_mod[PR]
+; SMALL64-NEXT: ld 4, L..C4(2)
+; SMALL64-NEXT: stdx 6, 3, 4
; SMALL64-NEXT: addi 1, 1, 48
; SMALL64-NEXT: ld 0, 16(1)
; SMALL64-NEXT: mtlr 0
@@ -223,13 +223,13 @@ define void @storesTIInit(i64 %Val) #0 {
; LARGE64-NEXT: mflr 0
; LARGE64-NEXT: stdu 1, -48(1)
; LARGE64-NEXT: mr 6, 3
-; LARGE64-NEXT: addis 3, L..C4@u(2)
-; LARGE64-NEXT: addis 4, L..C5@u(2)
+; LARGE64-NEXT: addis 3, L..C2@u(2)
; LARGE64-NEXT: std 0, 64(1)
-; LARGE64-NEXT: ld 3, L..C4@l(3)
-; LARGE64-NEXT: ld 4, L..C5@l(4)
-; LARGE64-NEXT: bla .__tls_get_addr[PR]
-; LARGE64-NEXT: std 6, 0(3)
+; LARGE64-NEXT: addis 7, L..C4@u(2)
+; LARGE64-NEXT: ld 3, L..C2@l(3)
+; LARGE64-NEXT: bla .__tls_get_mod[PR]
+; LARGE64-NEXT: ld 4, L..C4@l(7)
+; LARGE64-NEXT: stdx 6, 3, 4
; LARGE64-NEXT: addi 1, 1, 48
; LARGE64-NEXT: ld 0, 16(1)
; LARGE64-NEXT: mtlr 0
@@ -247,8 +247,8 @@ define void @storesTWInit(i64 %Val) #0 {
; SMALL32-NEXT: stwu 1, -32(1)
; SMALL32-NEXT: mr 6, 4
; SMALL32-NEXT: mr 7, 3
-; SMALL32-NEXT: lwz 3, L..C6(2)
-; SMALL32-NEXT: lwz 4, L..C7(2)
+; SMALL32-NEXT: lwz 3, L..C5(2)
+; SMALL32-NEXT: lwz 4, L..C6(2)
; SMALL32-NEXT: stw 0, 40(1)
; SMALL32-NEXT: bla .__tls_get_addr[PR]
; SMALL32-NEXT: stw 6, 4(3)
@@ -265,10 +265,10 @@ define void @storesTWInit(i64 %Val) #0 {
; LARGE32-NEXT: stw 0, 40(1)
; LARGE32-NEXT: mr 6, 4
; LARGE32-NEXT: mr 7, 3
-; LARGE32-NEXT: addis 3, L..C6@u(2)
-; LARGE32-NEXT: addis 4, L..C7@u(2)
-; LARGE32-NEXT: lwz 3, L..C6@l(3)
-; LARGE32-NEXT: lwz 4, L..C7@l(4)
+; LARGE32-NEXT: addis 3, L..C5@u(2)
+; LARGE32-NEXT: addis 4, L..C6@u(2)
+; LARGE32-NEXT: lwz 3, L..C5@l(3)
+; LARGE32-NEXT: lwz 4, L..C6@l(4)
; LARGE32-NEXT: bla .__tls_get_addr[PR]
; LARGE32-NEXT: stw 6, 4(3)
; LARGE32-NEXT: stw 7, 0(3)
@@ -282,8 +282,8 @@ define void @storesTWInit(i64 %Val) #0 {
; SMALL64-NEXT: mflr 0
; SMALL64-NEXT: stdu 1, -48(1)
; SMALL64-NEXT: mr 6, 3
-; SMALL64-NEXT: ld 3, L..C6(2)
-; SMALL64-NEXT: ld 4, L..C7(2)
+; SMALL64-NEXT: ld 3, L..C5(2)
+; SMALL64-NEXT: ld 4, L..C6(2)
; SMALL64-NEXT: std 0, 64(1)
; SMALL64-NEXT: bla .__tls_get_addr[PR]
; SMALL64-NEXT: std 6, 0(3)
@@ -297,11 +297,11 @@ define void @storesTWInit(i64 %Val) #0 {
; LARGE64-NEXT: mflr 0
; LARGE64-NEXT: stdu 1, -48(1)
; LARGE64-NEXT: mr 6, 3
-; LARGE64-NEXT: addis 3, L..C6@u(2)
-; LARGE64-NEXT: addis 4, L..C7@u(2)
+; LARGE64-NEXT: addis 3, L..C5@u(2)
+; LARGE64-NEXT: addis 4, L..C6@u(2)
; LARGE64-NEXT: std 0, 64(1)
-; LARGE64-NEXT: ld 3, L..C6@l(3)
-; LARGE64-NEXT: ld 4, L..C7@l(4)
+; LARGE64-NEXT: ld 3, L..C5@l(3)
+; LARGE64-NEXT: ld 4, L..C6@l(4)
; LARGE64-NEXT: bla .__tls_get_addr[PR]
; LARGE64-NEXT: std 6, 0(3)
; LARGE64-NEXT: addi 1, 1, 48
@@ -323,7 +323,7 @@ define i64 @loadsTGInit() #1 {
; SMALL32-NEXT: lwz 4, L..C1(2)
; SMALL32-NEXT: stw 0, 40(1)
; SMALL32-NEXT: bla .__tls_get_addr[PR]
-; SMALL32-NEXT: lwz 4, L..C8(2)
+; SMALL32-NEXT: lwz 4, L..C7(2)
; SMALL32-NEXT: lwz 5, 4(3)
; SMALL32-NEXT: lwz 6, 4(4)
; SMALL32-NEXT: lwz 3, 0(3)
@@ -347,8 +347,8 @@ define i64 @loadsTGInit() #1 {
; LARGE32-NEXT: bla .__tls_get_addr[PR]
; LARGE32-NEXT: lwz 4, 4(3)
; LARGE32-NEXT: lwz 3, 0(3)
-; LARGE32-NEXT: addis 5, L..C8@u(2)
-; LARGE32-NEXT: lwz 5, L..C8@l(5)
+; LARGE32-NEXT: addis 5, L..C7@u(2)
+; LARGE32-NEXT: lwz 5, L..C7@l(5)
; LARGE32-NEXT: lwz 6, 4(5)
; LARGE32-NEXT: lwz 5, 0(5)
; LARGE32-NEXT: addc 4, 6, 4
@@ -366,7 +366,7 @@ define i64 @loadsTGInit() #1 {
; SMALL64-NEXT: ld 4, L..C1(2)
; SMALL64-NEXT: std 0, 64(1)
; SMALL64-NEXT: bla .__tls_get_addr[PR]
-; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: ld 4, L..C7(2)
; SMALL64-NEXT: ld 3, 0(3)
; SMALL64-NEXT: ld 4, 0(4)
; SMALL64-NEXT: add 3, 4, 3
@@ -385,9 +385,9 @@ define i64 @loadsTGInit() #1 {
; LARGE64-NEXT: ld 3, L..C0@l(3)
; LARGE64-NEXT: ld 4, L..C1@l(4)
; LARGE64-NEXT: bla .__tls_get_addr[PR]
-; LARGE64-NEXT: addis 4, L..C8@u(2)
+; LARGE64-NEXT: addis 4, L..C7@u(2)
; LARGE64-NEXT: ld 3, 0(3)
-; LARGE64-NEXT: ld 4, L..C8@l(4)
+; LARGE64-NEXT: ld 4, L..C7@l(4)
; LARGE64-NEXT: ld 4, 0(4)
; LARGE64-NEXT: add 3, 4, 3
; LARGE64-NEXT: addi 1, 1, 48
@@ -408,16 +408,16 @@ define i64 @loadsTIUninit() #1 {
; SMALL32-NEXT: mflr 0
; SMALL32-NEXT: stwu 1, -32(1)
; SMALL32-NEXT: lwz 3, L..C2(2)
-; SMALL32-NEXT: lwz 4, L..C3(2)
; SMALL32-NEXT: stw 0, 40(1)
-; SMALL32-NEXT: bla .__tls_get_addr[PR]
-; SMALL32-NEXT: lwz 4, L..C8(2)
-; SMALL32-NEXT: lwz 5, 4(3)
-; SMALL32-NEXT: lwz 6, 4(4)
-; SMALL32-NEXT: lwz 3, 0(3)
-; SMALL32-NEXT: lwz 7, 0(4)
-; SMALL32-NEXT: addc 4, 6, 5
-; SMALL32-NEXT: adde 3, 7, 3
+; SMALL32-NEXT: bla .__tls_get_mod[PR]
+; SMALL32-NEXT: lwz 4, L..C3(2)
+; SMALL32-NEXT: lwz 5, L..C7(2)
+; SMALL32-NEXT: lwzux 6, 3, 4
+; SMALL32-NEXT: lwz 4, 4(5)
+; SMALL32-NEXT: lwz 3, 4(3)
+; SMALL32-NEXT: lwz 5, 0(5)
+; SMALL32-NEXT: addc 4, 4, 3
+; SMALL32-NEXT: adde 3, 5, 6
; SMALL32-NEXT: addi 1, 1, 32
; SMALL32-NEXT: lwz 0, 8(1)
; SMALL32-NEXT: mtlr 0
@@ -428,19 +428,19 @@ define i64 @loadsTIUninit() #1 {
; LARGE32-NEXT: mflr 0
; LARGE32-NEXT: stwu 1, -32(1)
; LARGE32-NEXT: stw 0, 40(1)
-; LARGE32-NEXT: addis 3, L..C2@u(2)
-; LARGE32-NEXT: addis 4, L..C3@u(2)
-; LARGE32-NEXT: lwz 3, L..C2@l(3)
-; LARGE32-NEXT: lwz 4, L..C3@l(4)
-; LARGE32-NEXT: bla .__tls_get_addr[PR]
-; LARGE32-NEXT: lwz 4, 4(3)
-; LARGE32-NEXT: lwz 3, 0(3)
-; LARGE32-NEXT: addis 5, L..C8@u(2)
-; LARGE32-NEXT: lwz 5, L..C8@l(5)
-; LARGE32-NEXT: lwz 6, 4(5)
-; LARGE32-NEXT: lwz 5, 0(5)
-; LARGE32-NEXT: addc 4, 6, 4
-; LARGE32-NEXT: adde 3, 5, 3
+; LARGE32-NEXT: addis 6, L..C2@u(2)
+; LARGE32-NEXT: addis 3, L..C3@u(2)
+; LARGE32-NEXT: lwz 3, L..C3@l(3)
+; LARGE32-NEXT: bla .__tls_get_mod[PR]
+; LARGE32-NEXT: lwz 4, L..C2@l(6)
+; LARGE32-NEXT: lwzux 5, 3, 4
+; LARGE32-NEXT: lwz 3, 4(3)
+; LARGE32-NEXT: addis 4, L..C7@u(2)
+; LARGE32-NEXT: lwz 4, L..C7@l(4)
+; LARGE32-NEXT: lwz 6, 4(4)
+; LARGE32-NEXT: lwz 7, 0(4)
+; LARGE32-NEXT: addc 4, 6, 3
+; LARGE32-NEXT: adde 3, 7, 5
; LARGE32-NEXT: addi 1, 1, 32
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
@@ -451,11 +451,11 @@ define i64 @loadsTIUninit() #1 {
; SMALL64-NEXT: mflr 0
; SMALL64-NEXT: stdu 1, -48(1)
; SMALL64-NEXT: ld 3, L..C2(2)
-; SMALL64-NEXT: ld 4, L..C3(2)
; SMALL64-NEXT: std 0, 64(1)
-; SMALL64-NEXT: bla .__tls_get_addr[PR]
-; SMALL64-NEXT: ld 4, L..C8(2)
-; SMALL64-NEXT: ld 3, 0(3)
+; SMALL64-NEXT: bla .__tls_get_mod[PR]
+; SMALL64-NEXT: ld 4, L..C3(2)
+; SMALL64-NEXT: ldx 3, 3, 4
+; SMALL64-NEXT: ld 4, L..C7(2)
; SMALL64-NEXT: ld 4, 0(4)
; SMALL64-NEXT: add 3, 4, 3
; SMALL64-NEXT: addi 1, 1, 48
@@ -468,14 +468,14 @@ define i64 @loadsTIUninit() #1 {
; LARGE64-NEXT: mflr 0
; LARGE64-NEXT: stdu 1, -48(1)
; LARGE64-NEXT: addis 3, L..C2@u(2)
-; LARGE64-NEXT: addis 4, L..C3@u(2)
; LARGE64-NEXT: std 0, 64(1)
+; LARGE64-NEXT: addis 6, L..C3@u(2)
; LARGE64-NEXT: ld 3, L..C2@l(3)
-; LARGE64-NEXT: ld 4, L..C3@l(4)
-; LARGE64-NEXT: bla .__tls_get_addr[PR]
-; LARGE64-NEXT: addis 4, L..C8@u(2)
-; LARGE64-NEXT: ld 3, 0(3)
-; LARGE64-NEXT: ld 4, L..C8@l(4)
+; LARGE64-NEXT: bla .__tls_get_mod[PR]
+; LARGE64-NEXT: ld 4, L..C3@l(6)
+; LARGE64-NEXT: addis 5, L..C7@u(2)
+; LARGE64-NEXT: ldx 3, 3, 4
+; LARGE64-NEXT: ld 4, L..C7@l(5)
; LARGE64-NEXT: ld 4, 0(4)
; LARGE64-NEXT: add 3, 4, 3
; LARGE64-NEXT: addi 1, 1, 48
@@ -495,17 +495,17 @@ define i64 @loadsTIInit() #1 {
; SMALL32: # %bb.0: # %entry
; SMALL32-NEXT: mflr 0
; SMALL32-NEXT: stwu 1, -32(1)
-; SMALL32-NEXT: lwz 3, L..C4(2)
-; SMALL32-NEXT: lwz 4, L..C5(2)
+; SMALL32-NEXT: lwz 3, L..C2(2)
; SMALL32-NEXT: stw 0, 40(1)
-; SMALL32-NEXT: bla .__tls_get_addr[PR]
-; SMALL32-NEXT: lwz 4, L..C8(2)
-; SMALL32-NEXT: lwz 5, 4(3)
-; SMALL32-NEXT: lwz 6, 4(4)
-; SMALL32-NEXT: lwz 3, 0(3)
-; SMALL32-NEXT: lwz 7, 0(4)
-; SMALL32-NEXT: addc 4, 6, 5
-; SMALL32-NEXT: adde 3, 7, 3
+; SMALL32-NEXT: bla .__tls_get_mod[PR]
+; SMALL32-NEXT: lwz 4, L..C4(2)
+; SMALL32-NEXT: lwz 5, L..C7(2)
+; SMALL32-NEXT: lwzux 6, 3, 4
+; SMALL32-NEXT: lwz 4, 4(5)
+; SMALL32-NEXT: lwz 3, 4(3)
+; SMALL32-NEXT: lwz 5, 0(5)
+; SMALL32-NEXT: addc 4, 4, 3
+; SMALL32-NEXT: adde 3, 5, 6
; SMALL32-NEXT: addi 1, 1, 32
; SMALL32-NEXT: lwz 0, 8(1)
; SMALL32-NEXT: mtlr 0
@@ -516,19 +516,19 @@ define i64 @loadsTIInit() #1 {
; LARGE32-NEXT: mflr 0
; LARGE32-NEXT: stwu 1, -32(1)
; LARGE32-NEXT: stw 0, 40(1)
-; LARGE32-NEXT: addis 3, L..C4@u(2)
-; LARGE32-NEXT: addis 4, L..C5@u(2)
-; LARGE32-NEXT: lwz 3, L..C4@l(3)
-; LARGE32-NEXT: lwz 4, L..C5@l(4)
-; LARGE32-NEXT: bla .__tls_get_addr[PR]
-; LARGE32-NEXT: lwz 4, 4(3)
-; LARGE32-NEXT: lwz 3, 0(3)
-; LARGE32-NEXT: addis 5, L..C8@u(2)
-; LARGE32-NEXT: lwz 5, L..C8@l(5)
-; LARGE32-NEXT: lwz 6, 4(5)
-; LARGE32-NEXT: lwz 5, 0(5)
-; LARGE32-NEXT: addc 4, 6, 4
-; LARGE32-NEXT: adde 3, 5, 3
+; LARGE32-NEXT: addis 6, L..C4@u(2)
+; LARGE32-NEXT: addis 3, L..C3@u(2)
+; LARGE32-NEXT: lwz 3, L..C3@l(3)
+; LARGE32-NEXT: bla .__tls_get_mod[PR]
+; LARGE32-NEXT: lwz 4, L..C4@l(6)
+; LARGE32-NEXT: lwzux 5, 3, 4
+; LARGE32-NEXT: lwz 3, 4(3)
+; LARGE32-NEXT: addis 4, L..C7@u(2)
+; LARGE32-NEXT: lwz 4, L..C7@l(4)
+; LARGE32-NEXT: lwz 6, 4(4)
+; LARGE32-NEXT: lwz 7, 0(4)
+; LARGE32-NEXT: addc 4, 6, 3
+; LARGE32-NEXT: adde 3, 7, 5
; LARGE32-NEXT: addi 1, 1, 32
; LARGE32-NEXT: lwz 0, 8(1)
; LARGE32-NEXT: mtlr 0
@@ -538,12 +538,12 @@ define i64 @loadsTIInit() #1 {
; SMALL64: # %bb.0: # %entry
; SMALL64-NEXT: mflr 0
; SMALL64-NEXT: stdu 1, -48(1)
-; SMALL64-NEXT: ld 3, L..C4(2)
-; SMALL64-NEXT: ld 4, L..C5(2)
+; SMALL64-NEXT: ld 3, L..C2(2)
; SMALL64-NEXT: std 0, 64(1)
-; SMALL64-NEXT: bla .__tls_get_addr[PR]
-; SMALL64-NEXT: ld 4, L..C8(2)
-; SMALL64-NEXT: ld 3, 0(3)
+; SMALL64-NEXT: bla .__tls_get_mod[PR]
+; SMALL64-NEXT: ld 4, L..C4(2)
+; SMALL64-NEXT: ldx 3, 3, 4
+; SMALL64-NEXT: ld 4, L..C7(2)
; SMALL64-NEXT: ld 4, 0(4)
; SMALL64-NEXT: add 3, 4, 3
; SMALL64-NEXT: addi 1, 1, 48
@@ -555,15 +555,15 @@ define i64 @loadsTIInit() #1 {
; LARGE64: # %bb.0: # %entry
; LARGE64-NEXT: mflr 0
; LARGE64-NEXT: stdu 1, -48(1)
-; LARGE64-NEXT: addis 3, L..C4@u(2)
-; LARGE64-NEXT: addis 4, L..C5@u(2)
+; LARGE64-NEXT: addis 3, L..C2@u(2)
; LARGE64-NEXT: std 0, 64(1)
-; LARGE64-NEXT: ld 3, L..C4@l(3)
-; LARGE64-NEXT: ld 4, L..C5@l(4)
-; LARGE64-NEXT: bla .__tls_get_addr[PR]
-; LARGE64-NEXT: addis 4, L..C8@u(2)
-; LARGE64-NEXT: ld 3, 0(3)
-; LARGE64-NEXT: ld 4, L..C8@l(4)
+; LARGE64-NEXT: addis 6, L..C4@u(2)
+; LARGE64-NEXT: ld 3, L..C2@l(3)
+; LARGE64-NEXT: bla .__tls_get_mod[PR]
+; LARGE64-NEXT: ld 4, L..C4@l(6)
+; LARGE64-NEXT: addis 5, L..C7@u(2)
+; LARGE64-NEXT: ldx 3, 3, 4
+; LARGE64-NEXT: ld 4, L..C7@l(5)
; LARGE64-NEXT: ld 4, 0(4)
; LARGE64-NEXT: add 3, 4, 3
; LARGE64-NEXT: addi 1, 1, 48
@@ -583,11 +583,11 @@ define i64 @loadsTWInit() #1 {
; SMALL32: # %bb.0: # %entry
; SMALL32-NEXT: mflr 0
; SMALL32-NEXT: stwu 1, -32(1)
-; SMALL32-NEXT: lwz 3, L..C6(2)
-; SMALL32-NEXT: lwz 4, L..C7(2)
+; SMALL32-NEXT: lwz 3, L..C5(2)
+; SMALL32-NEXT: lwz 4, L..C6(2)
; SMALL32-NEXT: stw 0, 40(1)
; SMALL32-NEXT: bla .__tls_get_addr[PR]
-; SMALL32-NEXT: lwz 4, L..C8(2)
+; SMALL32-NEXT: lwz 4, L..C7(2)
; SMALL32-NEXT: lwz 5, 4(3)
; SMALL32-NEXT: lwz 6, 4(4)
; SMALL32-NEXT: lwz 3, 0(3)
@@ -604,15 +604,15 @@ define i64 @loadsTWInit() #1 {
; LARGE32-NEXT: mflr 0
; LARGE32-NEXT: stwu 1, -32(1)
; LARGE32-NEXT: stw 0, 40(1)
-; LARGE32-NEXT: addis 3, L..C6@u(2)
-; LARGE32-NEXT: addis 4, L..C7@u(2)
-; LARGE32-NEXT: lwz 3, L..C6@l(3)
-; LARGE32-NEXT: lwz 4, L..C7@l(4)
+; LARGE32-NEXT: addis 3, L..C5@u(2)
+; LARGE32-NEXT: addis 4, L..C6@u(2)
+; LARGE32-NEXT: lwz 3, L..C5@l(3)
+; LARGE32-NEXT: lwz 4, L..C6@l(4)
; LARGE32-NEXT: bla .__tls_get_addr[PR]
; LARGE32-NEXT: lwz 4, 4(3)
; LARGE32-NEXT: lwz 3, 0(3)
-; LARGE32-NEXT: addis 5, L..C8@u(2)
-; LARGE32-NEXT: lwz 5, L..C8@l(5)
+; LARGE32-NEXT: addis 5, L..C7@u(2)
+; LARGE32-NEXT: lwz 5, L..C7@l(5)
; LARGE32-NEXT: lwz 6, 4(5)
; LARGE32-NEXT: lwz 5, 0(5)
; LARGE32-NEXT: addc 4, 6, 4
@@ -626,11 +626,11 @@ define i64 @loadsTWInit() #1 {
; SMALL64: # %bb.0: # %entry
; SMALL64-NEXT: mflr 0
; SMALL64-NEXT: stdu 1, -48(1)
-; SMALL64-NEXT: ld 3, L..C6(2)
-; SMALL64-NEXT: ld 4, L..C7(2)
+; SMALL64-NEXT: ld 3, L..C5(2)
+; SMALL64-NEXT: ld 4, L..C6(2)
; SMALL64-NEXT: std 0, 64(1)
; SMALL64-NEXT: bla .__tls_get_addr[PR]
-; SMALL64-NEXT: ld 4, L..C8(2)
+; SMALL64-NEXT: ld 4, L..C7(2)
; SMALL64-NEXT: ld 3, 0(3)
; SMALL64-NEXT: ld 4, 0(4)
; SMALL64-NEXT: add 3, 4, 3
@@ -643,15 +643,15 @@ define i64 @loadsTWInit() #1 {
; LARGE64: # %bb.0: # %entry
; LARGE64-NEXT: mflr 0
; LARGE64-NEXT: stdu 1, -48(1)
-; LARGE64-NEXT: addis 3, L..C6@u(2)
-; LARGE64-NEXT: addis 4, L..C7@u(2)
+; LARGE64-NEXT: addis 3, L..C5@u(2)
+; LARGE64-NEXT: addis 4, L..C6@u(2)
; LARGE64-NEXT: std 0, 64(1)
-; LARGE64-NEXT: ld 3, L..C6@l(3)
-; LARGE64-NEXT: ld 4, L..C7@l(4)
+; LARGE64-NEXT: ld 3, L..C5@l(3)
+; LARGE64-NEXT: ld 4, L..C6@l(4)
; LARGE64-NEXT: bla .__tls_get_addr[PR]
-; LARGE64-NEXT: addis 4, L..C8@u(2)
+; LARGE64-NEXT: addis 4, L..C7@u(2)
; LARGE64-NEXT: ld 3, 0(3)
-; LARGE64-NEXT: ld 4, L..C8@l(4)
+; LARGE64-NEXT: ld 4, L..C7@l(4)
; LARGE64-NEXT: ld 4, 0(4)
; LARGE64-NEXT: add 3, 4, 3
; LARGE64-NEXT: addi 1, 1, 48
@@ -665,12 +665,16 @@ entry:
ret i64 %add
}
-; External symbol reference checks for .__tls_get_addr
+; External symbol reference checks for .__tls_get_addr/.__tls_get_mod
; SMALL32: .extern .__tls_get_addr[PR]
+; SMALL32: .extern .__tls_get_mod[PR]
; SMALL64: .extern .__tls_get_addr[PR]
+; SMALL64: .extern .__tls_get_mod[PR]
; LARGE32: .extern .__tls_get_addr[PR]
+; LARGE32: .extern .__tls_get_mod[PR]
; LARGE64: .extern .__tls_get_addr[PR]
+; LARGE64: .extern .__tls_get_mod[PR]
; TOC entry checks
@@ -680,18 +684,17 @@ entry:
; SMALL32-LABEL: L..C1:
; SMALL32-NEXT: .tc TGInit[TC],TGInit[TL]@gd
; SMALL32-LABEL: L..C2:
-; SMALL32-NEXT: .tc .TIUninit[TC],TIUninit[UL]@m
+; SMALL32-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; SMALL32-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; SMALL32-LABEL: L..C3:
-; SMALL32-NEXT: .tc TIUninit[TC],TIUninit[UL]@gd
+; SMALL32-NEXT: .tc TIUninit[TC],TIUninit[UL]@ld
; SMALL32-LABEL: L..C4:
-; SMALL32-NEXT: .tc .TIInit[TC],TIInit[TL]@m
+; SMALL32-NEXT: .tc TIInit[TC],TIInit[TL]@ld
; SMALL32-LABEL: L..C5:
-; SMALL32-NEXT: .tc TIInit[TC],TIInit[TL]@gd
-; SMALL32-LABEL: L..C6:
; SMALL32-NEXT: .tc .TWInit[TC],TWInit[TL]@m
-; SMALL32-LABEL: L..C7:
+; SMALL32-LABEL: L..C6:
; SMALL32-NEXT: .tc TWInit[TC],TWInit[TL]@gd
-; SMALL32-LABEL: L..C8:
+; SMALL32-LABEL: L..C7:
; SMALL32-NEXT: .tc GInit[TC],GInit[RW]
; LARGE32-LABEL: .toc
@@ -700,18 +703,17 @@ entry:
; LARGE32-LABEL: L..C1:
; LARGE32-NEXT: .tc TGInit[TE],TGInit[TL]@gd
; LARGE32-LABEL: L..C2:
-; LARGE32-NEXT: .tc .TIUninit[TE],TIUninit[UL]@m
+; LARGE32-NEXT: .tc TIUninit[TE],TIUninit[UL]@ld
; LARGE32-LABEL: L..C3:
-; LARGE32-NEXT: .tc TIUninit[TE],TIUninit[UL]@gd
+; LARGE32-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; LARGE32-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; LARGE32-LABEL: L..C4:
-; LARGE32-NEXT: .tc .TIInit[TE],TIInit[TL]@m
+; LARGE32-NEXT: .tc TIInit[TE],TIInit[TL]@ld
; LARGE32-LABEL: L..C5:
-; LARGE32-NEXT: .tc TIInit[TE],TIInit[TL]@gd
-; LARGE32-LABEL: L..C6:
; LARGE32-NEXT: .tc .TWInit[TE],TWInit[TL]@m
-; LARGE32-LABEL: L..C7:
+; LARGE32-LABEL: L..C6:
; LARGE32-NEXT: .tc TWInit[TE],TWInit[TL]@gd
-; LARGE32-LABEL: L..C8:
+; LARGE32-LABEL: L..C7:
; LARGE32-NEXT: .tc GInit[TE],GInit[RW]
; SMALL64-LABEL: .toc
@@ -720,18 +722,17 @@ entry:
; SMALL64-LABEL: L..C1:
; SMALL64-NEXT: .tc TGInit[TC],TGInit[TL]@gd
; SMALL64-LABEL: L..C2:
-; SMALL64-NEXT: .tc .TIUninit[TC],TIUninit[UL]@m
+; SMALL64-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; SMALL64-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; SMALL64-LABEL: L..C3:
-; SMALL64-NEXT: .tc TIUninit[TC],TIUninit[UL]@gd
+; SMALL64-NEXT: .tc TIUninit[TC],TIUninit[UL]@ld
; SMALL64-LABEL: L..C4:
-; SMALL64-NEXT: .tc .TIInit[TC],TIInit[TL]@m
+; SMALL64-NEXT: .tc TIInit[TC],TIInit[TL]@ld
; SMALL64-LABEL: L..C5:
-; SMALL64-NEXT: .tc TIInit[TC],TIInit[TL]@gd
-; SMALL64-LABEL: L..C6:
; SMALL64-NEXT: .tc .TWInit[TC],TWInit[TL]@m
-; SMALL64-LABEL: L..C7:
+; SMALL64-LABEL: L..C6:
; SMALL64-NEXT: .tc TWInit[TC],TWInit[TL]@gd
-; SMALL64-LABEL: L..C8:
+; SMALL64-LABEL: L..C7:
; SMALL64-NEXT: .tc GInit[TC],GInit[RW]
; LARGE64-LABEL: .toc
@@ -740,18 +741,17 @@ entry:
; LARGE64-LABEL: L..C1:
; LARGE64-NEXT: .tc TGInit[TE],TGInit[TL]@gd
; LARGE64-LABEL: L..C2:
-; LARGE64-NEXT: .tc .TIUninit[TE],TIUninit[UL]@m
+; LARGE64-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; LARGE64-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
; LARGE64-LABEL: L..C3:
-; LARGE64-NEXT: .tc TIUninit[TE],TIUninit[UL]@gd
+; LARGE64-NEXT: .tc TIUninit[TE],TIUninit[UL]@ld
; LARGE64-LABEL: L..C4:
-; LARGE64-NEXT: .tc .TIInit[TE],TIInit[TL]@m
+; LARGE64-NEXT: .tc TIInit[TE],TIInit[TL]@ld
; LARGE64-LABEL: L..C5:
-; LARGE64-NEXT: .tc TIInit[TE],TIInit[TL]@gd
-; LARGE64-LABEL: L..C6:
; LARGE64-NEXT: .tc .TWInit[TE],TWInit[TL]@m
-; LARGE64-LABEL: L..C7:
+; LARGE64-LABEL: L..C6:
; LARGE64-NEXT: .tc TWInit[TE],TWInit[TL]@gd
-; LARGE64-LABEL: L..C8:
+; LARGE64-LABEL: L..C7:
; LARGE64-NEXT: .tc GInit[TE],GInit[RW]
attributes #0 = { nofree norecurse nounwind willreturn writeonly "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="pwr4" "target-features"="-altivec,-bpermd,-crypto,-direct-move,-extdiv,-float128,-htm,-mma,-paired-vector-memops,-power10-vector,-power8-vector,-power9-vector,-spe,-vsx" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-ld-xcoff-reloc-large.ll b/llvm/test/CodeGen/PowerPC/aix-tls-ld-xcoff-reloc-large.ll
new file mode 100644
index 000000000000..73741a210ed2
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-ld-xcoff-reloc-large.ll
@@ -0,0 +1,349 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
+; RUN: -xcoff-traceback-table=false --code-model=large -filetype=obj -o %t.o < %s
+; RUN: llvm-readobj --relocs --expand-relocs %t.o | FileCheck -D#NFA=2 --check-prefix=RELOC %s
+; RUN: llvm-readobj --syms %t.o | FileCheck -D#NFA=2 --check-prefix=SYM %s
+; RUN: llvm-objdump -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS %s
+
+@ThreadLocalVarInit = thread_local(localdynamic) global i64 1, align 8
+@IThreadLocalVarUninit = internal thread_local(localdynamic) global i64 0, align 8
+@IThreadLocalVarUninit2 = internal thread_local(localdynamic) global i64 0, align 8
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+define void @storeITLUninit(i64 noundef %x) {
+entry:
+ %0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @IThreadLocalVarUninit)
+ store i64 %x, ptr %0, align 8
+ ret void
+}
+
+define i64 @loadTLInit() {
+entry:
+ %0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ThreadLocalVarInit)
+ %1 = load i64, ptr %0, align 8
+ ret i64 %1
+}
+
+define signext i64 @loadTLUninit() {
+entry:
+ %0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @IThreadLocalVarUninit)
+ store i64 1, ptr %0, align 8
+ %1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @IThreadLocalVarUninit2)
+ %2 = load i64, ptr %1, align 8
+ %add = add nsw i64 %2, 1
+ ret i64 %add
+}
+
+; RELOC: File: {{.*}}aix-tls-ld-xcoff-reloc-large.ll.tmp.o
+; RELOC-NEXT: Format: aix5coff64-rs6000
+; RELOC-NEXT: Arch: powerpc64
+; RELOC-NEXT: AddressSize: 64bit
+; RELOC-NEXT: Relocations [
+; RELOC: Virtual Address: 0xE
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+19]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCU (0x30)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x12
+; RELOC-NEXT: Symbol: IThreadLocalVarUninit ([[#NFA+21]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCU (0x30)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x1A
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+19]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCL (0x31)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x1C
+; RELOC-NEXT: Symbol: .__tls_get_mod ([[#NFA+1]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 26
+; RELOC-NEXT: Type: R_RBA (0x18)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x22
+; RELOC-NEXT: Symbol: IThreadLocalVarUninit ([[#NFA+21]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCL (0x31)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x4A
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+19]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCU (0x30)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x4E
+; RELOC-NEXT: Symbol: ThreadLocalVarInit ([[#NFA+23]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCU (0x30)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x56
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+19]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCL (0x31)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x58
+; RELOC-NEXT: Symbol: .__tls_get_mod ([[#NFA+1]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 26
+; RELOC-NEXT: Type: R_RBA (0x18)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x5E
+; RELOC-NEXT: Symbol: ThreadLocalVarInit ([[#NFA+23]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCL (0x31)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x8A
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+19]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCU (0x30)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x8E
+; RELOC-NEXT: Symbol: IThreadLocalVarUninit ([[#NFA+21]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCU (0x30)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x96
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+19]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCL (0x31)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x98
+; RELOC-NEXT: Symbol: .__tls_get_mod (3)
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 26
+; RELOC-NEXT: Type: R_RBA (0x18)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x9E
+; RELOC-NEXT: Symbol: IThreadLocalVarUninit ([[#NFA+21]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCL (0x31)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0xAA
+; RELOC-NEXT: Symbol: IThreadLocalVarUninit2 ([[#NFA+25]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCU (0x30)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0xAE
+; RELOC-NEXT: Symbol: IThreadLocalVarUninit2 ([[#NFA+25]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCL (0x31)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x110
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+19]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 64
+; RELOC-NEXT: Type: R_TLSML (0x25)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x118
+; RELOC-NEXT: Symbol: IThreadLocalVarUninit ([[#NFA+29]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 64
+; RELOC-NEXT: Type: R_TLS_LD (0x22)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x120
+; RELOC-NEXT: Symbol: ThreadLocalVarInit ([[#NFA+27]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 64
+; RELOC-NEXT: Type: R_TLS_LD (0x22)
+; RELOC-NEXT: }
+; RELOC: Virtual Address: 0x128
+; RELOC-NEXT: Symbol: IThreadLocalVarUninit2 ([[#NFA+31]])
+; RELOC-NEXT: IsSigned: No
+; RELOC-NEXT: FixupBitValue: 0
+; RELOC-NEXT: Length: 64
+; RELOC-NEXT: Type: R_TLS_LD (0x22)
+; RELOC-NEXT: }
+
+; SYM: File: {{.*}}aix-tls-ld-xcoff-reloc-large.ll.tmp.o
+; SYM-NEXT: Format: aix5coff64-rs6000
+; SYM-NEXT: Arch: powerpc64
+; SYM-NEXT: AddressSize: 64bit
+; SYM-NEXT: Symbols [
+; SYM: Index: [[#NFA+19]]
+; SYM-NEXT: Name: _$TLSML
+; SYM-NEXT: Value (RelocatableAddress): 0x110
+; SYM-NEXT: Section: .data
+; SYM-NEXT: Type: 0x0
+; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
+; SYM-NEXT: NumberOfAuxEntries: 1
+; SYM-NEXT: CSECT Auxiliary Entry {
+; SYM-NEXT: Index: [[#NFA+20]]
+; SYM-NEXT: SectionLen: 8
+; SYM-NEXT: ParameterHashIndex: 0x0
+; SYM-NEXT: TypeChkSectNum: 0x0
+; SYM-NEXT: SymbolAlignmentLog2: 3
+; SYM-NEXT: SymbolType: XTY_SD (0x1)
+; SYM-NEXT: StorageMappingClass: XMC_TC (0x3)
+; SYM-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; SYM-NEXT: }
+; SYM-NEXT: }
+; SYM: Index: [[#NFA+21]]
+; SYM-NEXT: Name: IThreadLocalVarUninit
+; SYM-NEXT: Value (RelocatableAddress): 0x118
+; SYM-NEXT: Section: .data
+; SYM-NEXT: Type: 0x0
+; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
+; SYM-NEXT: NumberOfAuxEntries: 1
+; SYM-NEXT: CSECT Auxiliary Entry {
+; SYM-NEXT: Index: [[#NFA+22]]
+; SYM-NEXT: SectionLen: 8
+; SYM-NEXT: ParameterHashIndex: 0x0
+; SYM-NEXT: TypeChkSectNum: 0x0
+; SYM-NEXT: SymbolAlignmentLog2: 3
+; SYM-NEXT: SymbolType: XTY_SD (0x1)
+; SYM-NEXT: StorageMappingClass: XMC_TE (0x16)
+; SYM-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; SYM-NEXT: }
+; SYM-NEXT: }
+; SYM: Index: [[#NFA+23]]
+; SYM-NEXT: Name: ThreadLocalVarInit
+; SYM-NEXT: Value (RelocatableAddress): 0x120
+; SYM-NEXT: Section: .data
+; SYM-NEXT: Type: 0x0
+; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
+; SYM-NEXT: NumberOfAuxEntries: 1
+; SYM-NEXT: CSECT Auxiliary Entry {
+; SYM-NEXT: Index: [[#NFA+24]]
+; SYM-NEXT: SectionLen: 8
+; SYM-NEXT: ParameterHashIndex: 0x0
+; SYM-NEXT: TypeChkSectNum: 0x0
+; SYM-NEXT: SymbolAlignmentLog2: 3
+; SYM-NEXT: SymbolType: XTY_SD (0x1)
+; SYM-NEXT: StorageMappingClass: XMC_TE (0x16)
+; SYM-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; SYM-NEXT: }
+; SYM-NEXT: }
+; SYM: Index: [[#NFA+25]]
+; SYM-NEXT: Name: IThreadLocalVarUninit2
+; SYM-NEXT: Value (RelocatableAddress): 0x128
+; SYM-NEXT: Section: .data
+; SYM-NEXT: Type: 0x0
+; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
+; SYM-NEXT: NumberOfAuxEntries: 1
+; SYM-NEXT: CSECT Auxiliary Entry {
+; SYM-NEXT: Index: [[#NFA+26]]
+; SYM-NEXT: SectionLen: 8
+; SYM-NEXT: ParameterHashIndex: 0x0
+; SYM-NEXT: TypeChkSectNum: 0x0
+; SYM-NEXT: SymbolAlignmentLog2: 3
+; SYM-NEXT: SymbolType: XTY_SD (0x1)
+; SYM-NEXT: StorageMappingClass: XMC_TE (0x16)
+; SYM-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; SYM-NEXT: }
+; SYM-NEXT: }
+; SYM: Index: [[#NFA+27]]
+; SYM-NEXT: Name: ThreadLocalVarInit
+; SYM-NEXT: Value (RelocatableAddress): 0x0
+; SYM-NEXT: Section: .tdata
+; SYM-NEXT: Type: 0x0
+; SYM-NEXT: StorageClass: C_EXT (0x2)
+; SYM-NEXT: NumberOfAuxEntries: 1
+; SYM-NEXT: CSECT Auxiliary Entry {
+; SYM-NEXT: Index: [[#NFA+28]]
+; SYM-NEXT: SectionLen: 8
+; SYM-NEXT: ParameterHashIndex: 0x0
+; SYM-NEXT: TypeChkSectNum: 0x0
+; SYM-NEXT: SymbolAlignmentLog2: 3
+; SYM-NEXT: SymbolType: XTY_SD (0x1)
+; SYM-NEXT: StorageMappingClass: XMC_TL (0x14)
+; SYM-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; SYM-NEXT: }
+; SYM-NEXT: }
+; SYM: Index: [[#NFA+29]]
+; SYM-NEXT: Name: IThreadLocalVarUninit
+; SYM-NEXT: Value (RelocatableAddress): 0x8
+; SYM-NEXT: Section: .tbss
+; SYM-NEXT: Type: 0x0
+; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
+; SYM-NEXT: NumberOfAuxEntries: 1
+; SYM-NEXT: CSECT Auxiliary Entry {
+; SYM-NEXT: Index: [[#NFA+30]]
+; SYM-NEXT: SectionLen: 8
+; SYM-NEXT: ParameterHashIndex: 0x0
+; SYM-NEXT: TypeChkSectNum: 0x0
+; SYM-NEXT: SymbolAlignmentLog2: 3
+; SYM-NEXT: SymbolType: XTY_CM (0x3)
+; SYM-NEXT: StorageMappingClass: XMC_UL (0x15)
+; SYM-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; SYM-NEXT: }
+; SYM-NEXT: }
+; SYM: Index: [[#NFA+31]]
+; SYM-NEXT: Name: IThreadLocalVarUninit2
+; SYM-NEXT: Value (RelocatableAddress): 0x10
+; SYM-NEXT: Section: .tbss
+; SYM-NEXT: Type: 0x0
+; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
+; SYM-NEXT: NumberOfAuxEntries: 1
+; SYM-NEXT: CSECT Auxiliary Entry {
+; SYM-NEXT: Index: [[#NFA+32]]
+; SYM-NEXT: SectionLen: 8
+; SYM-NEXT: ParameterHashIndex: 0x0
+; SYM-NEXT: TypeChkSectNum: 0x0
+; SYM-NEXT: SymbolAlignmentLog2: 3
+; SYM-NEXT: SymbolType: XTY_CM (0x3)
+; SYM-NEXT: StorageMappingClass: XMC_UL (0x15)
+; SYM-NEXT: Auxiliary Type: AUX_CSECT (0xFB)
+; SYM-NEXT: }
+; SYM-NEXT: }
+
+; DIS: {{.*}}aix-tls-ld-xcoff-reloc-large.ll.tmp.o: file format aix5coff64-rs6000
+; DIS: Disassembly of section .data:
+; DIS: 0000000000000110 (idx: [[#NFA+19]]) _$TLSML[TC]:
+; DIS-NEXT: 110: 00 00 00 00
+; DIS-NEXT: 0000000000000110: R_TLSML (idx: [[#NFA+19]]) _$TLSML[TC]
+; DIS-NEXT: 114: 00 00 00 00
+; DIS: 0000000000000118 (idx: [[#NFA+21]]) IThreadLocalVarUninit[TE]:
+; DIS-NEXT: 118: 00 00 00 00
+; DIS-NEXT: 0000000000000118: R_TLS_LD (idx: [[#NFA+29]]) IThreadLocalVarUninit[UL]
+; DIS-NEXT: 11c: 00 00 00 08
+; DIS: 0000000000000120 (idx: [[#NFA+23]]) ThreadLocalVarInit[TE]:
+; DIS-NEXT: 120: 00 00 00 00
+; DIS-NEXT: 0000000000000120: R_TLS_LD (idx: [[#NFA+27]]) ThreadLocalVarInit[TL]
+; DIS-NEXT: 124: 00 00 00 00
+; DIS: 0000000000000128 (idx: [[#NFA+25]]) IThreadLocalVarUninit2[TE]:
+; DIS-NEXT: 128: 00 00 00 00
+; DIS-NEXT: 0000000000000128: R_TLS_LD (idx: [[#NFA+31]]) IThreadLocalVarUninit2[UL]
+; DIS-NEXT: 12c: 00 00 00 10
+
+; DIS: Disassembly of section .tdata:
+; DIS: 0000000000000000 (idx: [[#NFA+27]]) ThreadLocalVarInit[TL]:
+; DIS-NEXT: 0: 00 00 00 00
+; DIS-NEXT: 4: 00 00 00 01
+
+; DIS: Disassembly of section .tbss:
+; DIS: 0000000000000008 (idx: [[#NFA+29]]) IThreadLocalVarUninit[UL]:
+; DIS-NEXT: ...
+; DIS: 0000000000000010 (idx: [[#NFA+31]]) IThreadLocalVarUninit2[UL]:
+; DIS-NEXT: ...
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-local-dynamic.ll b/llvm/test/CodeGen/PowerPC/aix-tls-local-dynamic.ll
new file mode 100644
index 000000000000..22349337f189
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-local-dynamic.ll
@@ -0,0 +1,396 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
+; RUN: --code-model=small < %s | FileCheck %s --check-prefixes=SMALL64,SMALL
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
+; RUN: --code-model=large < %s | FileCheck %s --check-prefixes=LARGE64,LARGE
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
+; RUN: --code-model=small < %s | FileCheck %s --check-prefixes=SMALL32,SMALL
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
+; RUN: --code-model=large < %s | FileCheck %s --check-prefixes=LARGE32,LARGE
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
+; RUN: --code-model=small -O0 < %s | FileCheck %s --check-prefixes=WITHDUP
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
+; RUN: --code-model=small -O1 < %s | FileCheck %s --check-prefixes=NODUP
+
+@TGInit = thread_local(localdynamic) global i32 42, align 4
+@TGUninit = thread_local(localdynamic) global i32 0, align 4
+@TIInit = internal thread_local(localdynamic) global i32 42, align 4
+@TIUninit = internal thread_local(localdynamic) global i32 0, align 4
+@TWInit = weak thread_local(localdynamic) global i32 42, align 4
+@TWUninit = weak thread_local(localdynamic) global i32 0, align 4
+@x = thread_local(localdynamic) global i32 42, align 4
+@y = thread_local(localdynamic) global i32 42, align 4
+
+define i32 @loadTGInit() {
+; SMALL-LABEL: loadTGInit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TGInitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TGInitL:L..C[0-9]+]](2)
+; SMALL: lwzx [[TGInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: loadTGInit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TGInitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TGInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TGInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: lwzx [[TGInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TGInit)
+ %1 = load i32, ptr %0, align 4
+ ret i32 %1
+}
+
+define void @storeTGInit(i32 noundef signext %i) {
+; SMALL-LABEL: storeTGInit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TGInitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TGInitL:L..C[0-9]+]](2)
+; SMALL: stwx [[TGInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: storeTGInit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TGInitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TGInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TGInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: stwx [[TGInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TGInit)
+ store i32 %i, ptr %0, align 4
+ ret void
+}
+
+define i32 @loadTGUninit() {
+; SMALL-LABEL: loadTGUninit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TGUninitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TGUninitL:L..C[0-9]+]](2)
+; SMALL: lwzx [[TGInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: loadTGUninit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TGUninitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TGUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TGUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: lwzx [[TGUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TGUninit)
+ %1 = load i32, ptr %0, align 4
+ ret i32 %1
+}
+
+define void @storeTGUninit(i32 noundef signext %i) {
+; SMALL-LABEL: storeTGUninit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TGUninitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TGUninitL:L..C[0-9]+]](2)
+; SMALL: stwx [[TGUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: storeTGUninit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TGUninitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TGUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TGUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: stwx [[TGUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TGUninit)
+ store i32 %i, ptr %0, align 4
+ ret void
+}
+
+define i32 @loadTIInit() {
+; SMALL-LABEL: loadTIInit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TIInitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TIInitL:L..C[0-9]+]](2)
+; SMALL: lwzx [[TIInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: loadTIInit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TIInitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TIInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TIInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: lwzx [[TIInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TIInit)
+ %1 = load i32, ptr %0, align 4
+ ret i32 %1
+}
+
+define void @storeTIInit(i32 noundef signext %i) {
+; SMALL-LABEL: storeTIInit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TIInitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TIInitL:L..C[0-9]+]](2)
+; SMALL: stwx [[TIInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: storeTIInit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TIInitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TIInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TIInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: stwx [[TIInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TIInit)
+ store i32 %i, ptr %0, align 4
+ ret void
+}
+
+define i32 @loadTIUninit() {
+; SMALL-LABEL: loadTIUninit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TIUninitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TIUninitL:L..C[0-9]+]](2)
+; SMALL: lwzx [[TIUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: loadTIUninit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TIUninitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TIUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TIUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: lwzx [[TIUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TIUninit)
+ %1 = load i32, ptr %0, align 4
+ ret i32 %1
+}
+
+define void @storeTIUninit(i32 noundef signext %i) {
+; SMALL-LABEL: storeTIUninit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TIUninitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TIUninitL:L..C[0-9]+]](2)
+; SMALL: stwx [[TIUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: storeTIUninit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TIUninitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TIUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TIUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: stwx [[TIUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TIUninit)
+ store i32 %i, ptr %0, align 4
+ ret void
+}
+
+define i32 @loadTWInit() {
+; SMALL-LABEL: loadTWInit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TWInitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TWInitL:L..C[0-9]+]](2)
+; SMALL: lwzx [[TWInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: loadTWInit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TWInitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TWInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TWInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: lwzx [[TWInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TWInit)
+ %1 = load i32, ptr %0, align 4
+ ret i32 %1
+}
+
+define void @storeTWInit(i32 noundef signext %i) {
+; SMALL-LABEL: storeTWInit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TWInitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TWInitL:L..C[0-9]+]](2)
+; SMALL: stwx [[TWInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: storeTWInit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TWInitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TWInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TWInitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: stwx [[TWInitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TWInit)
+ store i32 %i, ptr %0, align 4
+ ret void
+}
+
+define i32 @loadTWUninit() {
+; SMALL-LABEL: loadTWUninit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TWUninitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TWUninitL:L..C[0-9]+]](2)
+; SMALL: lwzx [[TWUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: loadTWUninit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TWUninitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TWUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TWUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: lwzx [[TWUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TWUninit)
+ %1 = load i32, ptr %0, align 4
+ ret i32 %1
+}
+
+define void @storeTWUninit(i32 noundef signext %i) {
+; SMALL-LABEL: storeTWUninit:
+; SMALL64: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL32: lwz [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; SMALL: bla .__tls_get_mod[PR]
+; SMALL64: ld [[OffsetR:[0-9]+]], [[TWUninitL:L..C[0-9]+]](2)
+; SMALL32: lwz [[OffsetR:[0-9]+]], [[TWUninitL:L..C[0-9]+]](2)
+; SMALL: stwx [[TWUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+;
+; LARGE-LABEL: storeTWUninit:
+; LARGE64: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE: addis [[OffsetHR:[0-9]+]], [[TWUninitL:L..C[0-9]+]]@u(2)
+; LARGE32: addis [[ModuleHandleHR:[0-9]+]], [[ModuleHandleL:L..C[0-9]+]]@u(2)
+; LARGE64: ld [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE32: lwz [[ModuleHandleR:3]], [[ModuleHandleL]]@l([[ModuleHandleHR]])
+; LARGE: bla .__tls_get_mod[PR]
+; LARGE64: ld [[OffsetR:[0-9]+]], [[TWUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE32: lwz [[OffsetR:[0-9]+]], [[TWUninitL:L..C[0-9]+]]@l([[OffsetHR]])
+; LARGE: stwx [[TWUninitValR:[0-9]+]], [[ModuleHandleR]], [[OffsetR]]
+entry:
+ %0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @TWUninit)
+ store i32 %i, ptr %0, align 4
+ ret void
+}
+
+define i32 @DedupTlsGetMod() #0 {
+; WITHDUP-LABEL: DedupTlsGetMod:
+; WITHDUP: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; WITHDUP-NEXT: bla .__tls_get_mod[PR]
+; WITHDUP-NEXT: ld [[OffsetXR:[0-9]+]], [[X:L..C[0-9]+]](2)
+; WITHDUP: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; WITHDUP-NEXT: bla .__tls_get_mod[PR]
+; WITHDUP: ld [[OffsetYR:[0-9]+]], [[Y:L..C[0-9]+]](2)
+; WITHDUP-LABEL: L..DedupTlsGetMod0:
+;
+; NODUP-LABEL: DedupTlsGetMod:
+; NODUP: ld [[ModuleHandleR:3]], [[ModuleHandleL:L..C[0-9]+]](2)
+; NODUP-NEXT: bla .__tls_get_mod[PR]
+; NODUP-NEXT: ld [[OffsetXR:[0-9]+]], [[X:L..C[0-9]+]](2)
+; NODUP-NEXT: ld [[OffsetYR:[0-9]+]], [[Y:L..C[0-9]+]](2)
+; NODUP-NEXT: lwzx [[XValR:[0-9]+]], [[ModuleHandleR]], [[OffsetXR]]
+; NODUP-NEXT: lwzx [[YValR:[0-9]+]], [[ModuleHandleR]], [[OffsetYR]]
+; NODUP-LABEL: L..DedupTlsGetMod0:
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, ptr %retval, align 4
+ %0 = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
+ %1 = load i32, ptr %0, align 4
+ %2 = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @y)
+ %3 = load i32, ptr %2, align 4
+ %add = add nsw i32 %1, %3
+ ret i32 %add
+}
+
+; SMALL: .extern .__tls_get_mod[PR]
+; LARGE: .extern .__tls_get_mod[PR]
+; SMALL-NOT: .extern _Renamed..5f24__TLSML[TC]
+; LARGE-NOT: .extern _Renamed..5f24__TLSML[TC]
+
+; SMALL: [[ModuleHandleL]]:
+; SMALL-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; SMALL-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
+; SMALL: [[TGInitL]]:
+; SMALL-NEXT: .tc TGInit[TC],TGInit[TL]@ld
+; SMALL: [[TGUninitL]]:
+; SMALL-NEXT: .tc TGUninit[TC],TGUninit[TL]@ld
+; SMALL: [[TIInitL]]:
+; SMALL-NEXT: .tc TIInit[TC],TIInit[TL]@ld
+; SMALL: [[TIUninitL]]:
+; SMALL-NEXT: .tc TIUninit[TC],TIUninit[UL]@ld
+; SMALL: [[TWInitL]]:
+; SMALL-NEXT: .tc TWInit[TC],TWInit[TL]@ld
+; SMALL: [[TWUninitL]]:
+; SMALL-NEXT: .tc TWUninit[TC],TWUninit[TL]@ld
+
+; LARGE64: [[ModuleHandleL]]:
+; LARGE64-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; LARGE64-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
+; LARGE64: [[TGInitL]]:
+; LARGE64-NEXT: .tc TGInit[TE],TGInit[TL]@ld
+;
+; LARGE32: [[TGInitL]]:
+; LARGE32-NEXT: .tc TGInit[TE],TGInit[TL]@ld
+; LARGE32: [[ModuleHandleL]]:
+; LARGE32-NEXT: .tc _Renamed..5f24__TLSML[TC],_Renamed..5f24__TLSML[TC]@ml
+; LARGE32-NEXT: .rename _Renamed..5f24__TLSML[TC],"_$TLSML"
+;
+; LARGE: [[TGUninitL]]:
+; LARGE-NEXT: .tc TGUninit[TE],TGUninit[TL]@ld
+; LARGE: [[TIInitL]]:
+; LARGE-NEXT: .tc TIInit[TE],TIInit[TL]@ld
+; LARGE: [[TIUninitL]]:
+; LARGE-NEXT: .tc TIUninit[TE],TIUninit[UL]@ld
+; LARGE: [[TWInitL]]:
+; LARGE-NEXT: .tc TWInit[TE],TWInit[TL]@ld
+; LARGE: [[TWUninitL]]:
+; LARGE-NEXT: .tc TWUninit[TE],TWUninit[TL]@ld
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-xcoff-reloc-large.ll b/llvm/test/CodeGen/PowerPC/aix-tls-xcoff-reloc-large.ll
index 059924f392f6..1f7b497bb6c6 100644
--- a/llvm/test/CodeGen/PowerPC/aix-tls-xcoff-reloc-large.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-xcoff-reloc-large.ll
@@ -5,6 +5,7 @@
; RUN: llvm-objdump -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS %s
@GInit = global double 1.000000e+00, align 8
+; @TIInit is local-dynamic indeed
@TIInit = internal thread_local global i64 1, align 8
@TWInit = weak thread_local global double 1.000000e+00, align 8
@@ -32,7 +33,7 @@ entry:
; RELOC-NEXT: Section (index: 1) .text {
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x16
-; RELOC-NEXT: Symbol: .TIInit ([[#NFA+17]])
+; RELOC-NEXT: Symbol: TIInit ([[#NFA+19]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -40,7 +41,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x1A
-; RELOC-NEXT: Symbol: TIInit ([[#NFA+19]])
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+21]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -48,31 +49,31 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x1E
-; RELOC-NEXT: Symbol: .TIInit ([[#NFA+17]])
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+21]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
; RELOC-NEXT: Type: R_TOCL (0x31)
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
-; RELOC-NEXT: Virtual Address: 0x22
-; RELOC-NEXT: Symbol: TIInit ([[#NFA+19]])
+; RELOC-NEXT: Virtual Address: 0x20
+; RELOC-NEXT: Symbol: .__tls_get_mod ([[#NFA+1]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
-; RELOC-NEXT: Length: 16
-; RELOC-NEXT: Type: R_TOCL (0x31)
+; RELOC-NEXT: Length: 26
+; RELOC-NEXT: Type: R_RBA (0x18)
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
-; RELOC-NEXT: Virtual Address: 0x24
-; RELOC-NEXT: Symbol: .__tls_get_addr ([[#NFA+1]])
+; RELOC-NEXT: Virtual Address: 0x26
+; RELOC-NEXT: Symbol: TIInit ([[#NFA+19]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
-; RELOC-NEXT: Length: 26
-; RELOC-NEXT: Type: R_RBA (0x18)
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOCL (0x31)
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x4E
-; RELOC-NEXT: Symbol: .TWInit ([[#NFA+21]])
+; RELOC-NEXT: Symbol: .TWInit ([[#NFA+23]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -80,7 +81,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x52
-; RELOC-NEXT: Symbol: TWInit ([[#NFA+23]])
+; RELOC-NEXT: Symbol: TWInit ([[#NFA+25]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -88,7 +89,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x56
-; RELOC-NEXT: Symbol: .TWInit ([[#NFA+21]])
+; RELOC-NEXT: Symbol: .TWInit ([[#NFA+23]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -96,7 +97,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x5A
-; RELOC-NEXT: Symbol: TWInit ([[#NFA+23]])
+; RELOC-NEXT: Symbol: TWInit ([[#NFA+25]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -104,7 +105,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x5C
-; RELOC-NEXT: Symbol: .__tls_get_addr ([[#NFA+1]])
+; RELOC-NEXT: Symbol: .__tls_get_addr ([[#NFA+3]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 26
@@ -112,7 +113,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x66
-; RELOC-NEXT: Symbol: GInit ([[#NFA+25]])
+; RELOC-NEXT: Symbol: GInit ([[#NFA+27]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -120,7 +121,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x6A
-; RELOC-NEXT: Symbol: GInit ([[#NFA+25]])
+; RELOC-NEXT: Symbol: GInit ([[#NFA+27]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -130,7 +131,7 @@ entry:
; RELOC-NEXT: Section (index: 2) .data {
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x90
-; RELOC-NEXT: Symbol: .storesTIInit ([[#NFA+5]])
+; RELOC-NEXT: Symbol: .storesTIInit ([[#NFA+7]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -138,7 +139,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x94
-; RELOC-NEXT: Symbol: TOC ([[#NFA+15]])
+; RELOC-NEXT: Symbol: TOC ([[#NFA+17]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -146,7 +147,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x9C
-; RELOC-NEXT: Symbol: .loadsTWInit ([[#NFA+7]])
+; RELOC-NEXT: Symbol: .loadsTWInit ([[#NFA+9]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -154,7 +155,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0xA0
-; RELOC-NEXT: Symbol: TOC ([[#NFA+15]])
+; RELOC-NEXT: Symbol: TOC ([[#NFA+17]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -162,23 +163,23 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0xA8
-; RELOC-NEXT: Symbol: TIInit ([[#NFA+27]])
+; RELOC-NEXT: Symbol: TIInit ([[#NFA+29]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
-; RELOC-NEXT: Type: R_TLSM (0x24)
+; RELOC-NEXT: Type: R_TLS_LD (0x22)
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0xAC
-; RELOC-NEXT: Symbol: TIInit ([[#NFA+27]])
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+21]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
-; RELOC-NEXT: Type: R_TLS (0x20)
+; RELOC-NEXT: Type: R_TLSML (0x25)
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0xB0
-; RELOC-NEXT: Symbol: TWInit ([[#NFA+29]])
+; RELOC-NEXT: Symbol: TWInit ([[#NFA+31]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -186,7 +187,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0xB4
-; RELOC-NEXT: Symbol: TWInit ([[#NFA+29]])
+; RELOC-NEXT: Symbol: TWInit ([[#NFA+31]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -194,7 +195,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0xB8
-; RELOC-NEXT: Symbol: GInit ([[#NFA+9]])
+; RELOC-NEXT: Symbol: GInit ([[#NFA+11]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -230,7 +231,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: Symbol {
; SYM-NEXT: Index: [[#NFA+1]]
-; SYM-NEXT: Name: .__tls_get_addr
+; SYM-NEXT: Name: .__tls_get_mod
; SYM-NEXT: Value (RelocatableAddress): 0x0
; SYM-NEXT: Section: N_UNDEF
; SYM-NEXT: Type: 0x0
@@ -250,6 +251,26 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: Symbol {
; SYM-NEXT: Index: [[#NFA+3]]
+; SYM-NEXT: Name: .__tls_get_addr
+; SYM-NEXT: Value (RelocatableAddress): 0x0
+; SYM-NEXT: Section: N_UNDEF
+; SYM-NEXT: Type: 0x0
+; SYM-NEXT: StorageClass: C_EXT (0x2)
+; SYM-NEXT: NumberOfAuxEntries: 1
+; SYM-NEXT: CSECT Auxiliary Entry {
+; SYM-NEXT: Index: [[#NFA+4]]
+; SYM-NEXT: SectionLen: 0
+; SYM-NEXT: ParameterHashIndex: 0x0
+; SYM-NEXT: TypeChkSectNum: 0x0
+; SYM-NEXT: SymbolAlignmentLog2: 0
+; SYM-NEXT: SymbolType: XTY_ER (0x0)
+; SYM-NEXT: StorageMappingClass: XMC_PR (0x0)
+; SYM-NEXT: StabInfoIndex: 0x0
+; SYM-NEXT: StabSectNum: 0x0
+; SYM-NEXT: }
+; SYM-NEXT: }
+; SYM-NEXT: Symbol {
+; SYM-NEXT: Index: [[#NFA+5]]
; SYM-NEXT: Name:
; SYM-NEXT: Value (RelocatableAddress): 0x0
; SYM-NEXT: Section: .text
@@ -257,7 +278,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+4]]
+; SYM-NEXT: Index: [[#NFA+6]]
; SYM-NEXT: SectionLen: 132
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -269,7 +290,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+5]]
+; SYM-NEXT: Index: [[#NFA+7]]
; SYM-NEXT: Name: .storesTIInit
; SYM-NEXT: Value (RelocatableAddress): 0x0
; SYM-NEXT: Section: .text
@@ -277,8 +298,8 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+6]]
-; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+3]]
+; SYM-NEXT: Index: [[#NFA+8]]
+; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+5]]
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
; SYM-NEXT: SymbolAlignmentLog2: 0
@@ -289,7 +310,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+7]]
+; SYM-NEXT: Index: [[#NFA+9]]
; SYM-NEXT: Name: .loadsTWInit
; SYM-NEXT: Value (RelocatableAddress): 0x40
; SYM-NEXT: Section: .text
@@ -297,8 +318,8 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+8]]
-; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+3]]
+; SYM-NEXT: Index: [[#NFA+10]]
+; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+5]]
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
; SYM-NEXT: SymbolAlignmentLog2: 0
@@ -309,7 +330,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+9]]
+; SYM-NEXT: Index: [[#NFA+11]]
; SYM-NEXT: Name: GInit
; SYM-NEXT: Value (RelocatableAddress): 0x88
; SYM-NEXT: Section: .data
@@ -317,7 +338,7 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+10]]
+; SYM-NEXT: Index: [[#NFA+12]]
; SYM-NEXT: SectionLen: 8
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -329,7 +350,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+11]]
+; SYM-NEXT: Index: [[#NFA+13]]
; SYM-NEXT: Name: storesTIInit
; SYM-NEXT: Value (RelocatableAddress): 0x90
; SYM-NEXT: Section: .data
@@ -337,7 +358,7 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+12]]
+; SYM-NEXT: Index: [[#NFA+14]]
; SYM-NEXT: SectionLen: 12
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -349,7 +370,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+13]]
+; SYM-NEXT: Index: [[#NFA+15]]
; SYM-NEXT: Name: loadsTWInit
; SYM-NEXT: Value (RelocatableAddress): 0x9C
; SYM-NEXT: Section: .data
@@ -357,7 +378,7 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+14]]
+; SYM-NEXT: Index: [[#NFA+16]]
; SYM-NEXT: SectionLen: 12
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -369,7 +390,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+15]]
+; SYM-NEXT: Index: [[#NFA+17]]
; SYM-NEXT: Name: TOC
; SYM-NEXT: Value (RelocatableAddress): 0xA8
; SYM-NEXT: Section: .data
@@ -377,7 +398,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+16]]
+; SYM-NEXT: Index: [[#NFA+18]]
; SYM-NEXT: SectionLen: 0
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -389,15 +410,15 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+17]]
-; SYM-NEXT: Name: .TIInit
+; SYM-NEXT: Index: [[#NFA+19]]
+; SYM-NEXT: Name: TIInit
; SYM-NEXT: Value (RelocatableAddress): 0xA8
; SYM-NEXT: Section: .data
; SYM-NEXT: Type: 0x0
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+18]]
+; SYM-NEXT: Index: [[#NFA+20]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -409,27 +430,27 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+19]]
-; SYM-NEXT: Name: TIInit
+; SYM-NEXT: Index: [[#NFA+21]]
+; SYM-NEXT: Name: _$TLSML
; SYM-NEXT: Value (RelocatableAddress): 0xAC
; SYM-NEXT: Section: .data
; SYM-NEXT: Type: 0x0
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+20]]
+; SYM-NEXT: Index: [[#NFA+22]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
; SYM-NEXT: SymbolAlignmentLog2: 2
; SYM-NEXT: SymbolType: XTY_SD (0x1)
-; SYM-NEXT: StorageMappingClass: XMC_TE (0x16)
+; SYM-NEXT: StorageMappingClass: XMC_TC (0x3)
; SYM-NEXT: StabInfoIndex: 0x0
; SYM-NEXT: StabSectNum: 0x0
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+21]]
+; SYM-NEXT: Index: [[#NFA+23]]
; SYM-NEXT: Name: .TWInit
; SYM-NEXT: Value (RelocatableAddress): 0xB0
; SYM-NEXT: Section: .data
@@ -437,7 +458,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+22]]
+; SYM-NEXT: Index: [[#NFA+24]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -449,7 +470,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+23]]
+; SYM-NEXT: Index: [[#NFA+25]]
; SYM-NEXT: Name: TWInit
; SYM-NEXT: Value (RelocatableAddress): 0xB4
; SYM-NEXT: Section: .data
@@ -457,7 +478,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+24]]
+; SYM-NEXT: Index: [[#NFA+26]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -469,7 +490,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+25]]
+; SYM-NEXT: Index: [[#NFA+27]]
; SYM-NEXT: Name: GInit
; SYM-NEXT: Value (RelocatableAddress): 0xB8
; SYM-NEXT: Section: .data
@@ -477,7 +498,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+26]]
+; SYM-NEXT: Index: [[#NFA+28]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -489,7 +510,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+27]]
+; SYM-NEXT: Index: [[#NFA+29]]
; SYM-NEXT: Name: TIInit
; SYM-NEXT: Value (RelocatableAddress): 0x0
; SYM-NEXT: Section: .tdata
@@ -497,7 +518,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+28]]
+; SYM-NEXT: Index: [[#NFA+30]]
; SYM-NEXT: SectionLen: 8
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -509,7 +530,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+29]]
+; SYM-NEXT: Index: [[#NFA+31]]
; SYM-NEXT: Name: TWInit
; SYM-NEXT: Value (RelocatableAddress): 0x8
; SYM-NEXT: Section: .tdata
@@ -517,7 +538,7 @@ entry:
; SYM-NEXT: StorageClass: C_WEAKEXT (0x6F)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+30]]
+; SYM-NEXT: Index: [[#NFA+32]]
; SYM-NEXT: SectionLen: 8
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -536,20 +557,20 @@ entry:
; DIS-NEXT: mflr 0
; DIS-NEXT: stwu 1, -32(1)
; DIS-NEXT: stw 0, 40(1)
-; DIS-NEXT: mr 6, 4
; DIS-NEXT: mr 7, 3
-; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+17]]) .TIInit[TE]
-; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 4, 2, 0
+; DIS-NEXT: mr 6, 4
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 8, 2, 0
; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+19]]) TIInit[TE]
-; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 3, 0(3)
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+17]]) .TIInit[TE]
-; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 4, 4(4)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+21]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 3, 4(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+21]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 4, 0(8)
; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+19]]) TIInit[TE]
-; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0
-; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_addr[PR]
+; DIS-NEXT: stwux 7, 3, 4
; DIS-NEXT: stw 6, 4(3)
-; DIS-NEXT: stw 7, 0(3)
; DIS-NEXT: addi 1, 1, 32
; DIS-NEXT: lwz 0, 8(1)
; DIS-NEXT: mtlr 0
@@ -559,20 +580,20 @@ entry:
; DIS-NEXT: stwu 1, -32(1)
; DIS-NEXT: stw 0, 40(1)
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+21]]) .TWInit[TE]
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) .TWInit[TE]
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 4, 2, 0
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) TWInit[TE]
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+25]]) TWInit[TE]
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 3, 8(3)
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+21]]) .TWInit[TE]
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) .TWInit[TE]
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 4, 12(4)
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) TWInit[TE]
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+25]]) TWInit[TE]
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0
-; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_addr[PR]
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+3]]) .__tls_get_addr[PR]
; DIS-NEXT: lfd 0, 0(3)
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+25]]) GInit[TE]
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+27]]) GInit[TE]
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 3, 16(3)
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+25]]) GInit[TE]
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+27]]) GInit[TE]
; DIS-NEXT: lfd 1, 0(3)
; DIS-NEXT: fadd 1, 0, 1
; DIS-NEXT: addi 1, 1, 32
@@ -581,42 +602,42 @@ entry:
; DIS-NEXT: blr
; DIS: Disassembly of section .data:
-; DIS: 00000088 (idx: [[#NFA+9]]) GInit[RW]:
+; DIS: 00000088 (idx: [[#NFA+11]]) GInit[RW]:
; DIS-NEXT: 88: 3f f0 00 00
; DIS-NEXT: 8c: 00 00 00 00
-; DIS: 00000090 (idx: [[#NFA+11]]) storesTIInit[DS]:
+; DIS: 00000090 (idx: [[#NFA+13]]) storesTIInit[DS]:
; DIS-NEXT: 90: 00 00 00 00
-; DIS-NEXT: 00000090: R_POS (idx: [[#NFA+5]]) .storesTIInit
+; DIS-NEXT: 00000090: R_POS (idx: [[#NFA+7]]) .storesTIInit
; DIS-NEXT: 94: 00 00 00 a8
-; DIS-NEXT: 00000094: R_POS (idx: [[#NFA+15]]) TOC[TC0]
+; DIS-NEXT: 00000094: R_POS (idx: [[#NFA+17]]) TOC[TC0]
; DIS-NEXT: 98: 00 00 00 00
-; DIS: 0000009c (idx: [[#NFA+13]]) loadsTWInit[DS]:
+; DIS: 0000009c (idx: [[#NFA+15]]) loadsTWInit[DS]:
; DIS-NEXT: 9c: 00 00 00 40
-; DIS-NEXT: 0000009c: R_POS (idx: [[#NFA+7]]) .loadsTWInit
+; DIS-NEXT: 0000009c: R_POS (idx: [[#NFA+9]]) .loadsTWInit
; DIS-NEXT: a0: 00 00 00 a8
-; DIS-NEXT: 000000a0: R_POS (idx: [[#NFA+15]]) TOC[TC0]
+; DIS-NEXT: 000000a0: R_POS (idx: [[#NFA+17]]) TOC[TC0]
; DIS-NEXT: a4: 00 00 00 00
-; DIS: 000000a8 (idx: [[#NFA+17]]) .TIInit[TE]:
+; DIS: 000000a8 (idx: [[#NFA+19]]) TIInit[TE]:
; DIS-NEXT: a8: 00 00 00 00
-; DIS-NEXT: 000000a8: R_TLSM (idx: [[#NFA+27]]) TIInit[TL]
-; DIS: 000000ac (idx: [[#NFA+19]]) TIInit[TE]:
+; DIS-NEXT: 000000a8: R_TLS_LD (idx: [[#NFA+29]]) TIInit[TL]
+; DIS: 000000ac (idx: [[#NFA+21]]) _$TLSML[TC]:
; DIS-NEXT: ac: 00 00 00 00
-; DIS-NEXT: 000000ac: R_TLS (idx: [[#NFA+27]]) TIInit[TL]
-; DIS: 000000b0 (idx: [[#NFA+21]]) .TWInit[TE]:
+; DIS-NEXT: 000000ac: R_TLSML (idx: [[#NFA+21]]) _$TLSML[TC]
+; DIS: 000000b0 (idx: [[#NFA+23]]) .TWInit[TE]:
; DIS-NEXT: b0: 00 00 00 00
-; DIS-NEXT: 000000b0: R_TLSM (idx: [[#NFA+29]]) TWInit[TL]
-; DIS: 000000b4 (idx: [[#NFA+23]]) TWInit[TE]:
+; DIS-NEXT: 000000b0: R_TLSM (idx: [[#NFA+31]]) TWInit[TL]
+; DIS: 000000b4 (idx: [[#NFA+25]]) TWInit[TE]:
; DIS-NEXT: b4: 00 00 00 08
-; DIS-NEXT: 000000b4: R_TLS (idx: [[#NFA+29]]) TWInit[TL]
-; DIS: 000000b8 (idx: [[#NFA+25]]) GInit[TE]:
+; DIS-NEXT: 000000b4: R_TLS (idx: [[#NFA+31]]) TWInit[TL]
+; DIS: 000000b8 (idx: [[#NFA+27]]) GInit[TE]:
; DIS-NEXT: b8: 00 00 00 88
-; DIS-NEXT: 000000b8: R_POS (idx: [[#NFA+9]]) GInit[RW]
+; DIS-NEXT: 000000b8: R_POS (idx: [[#NFA+11]]) GInit[RW]
; DIS: Disassembly of section .tdata:
-; DIS: 00000000 (idx: [[#NFA+27]]) TIInit[TL]:
+; DIS: 00000000 (idx: [[#NFA+29]]) TIInit[TL]:
; DIS-NEXT: 0: 00 00 00 00
; DIS-NEXT: 4: 00 00 00 01
-; DIS: 00000008 (idx: [[#NFA+29]]) TWInit[TL]:
+; DIS: 00000008 (idx: [[#NFA+31]]) TWInit[TL]:
; DIS-NEXT: 8: 3f f0 00 00
; DIS-NEXT: c: 00 00 00 00
diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-xcoff-reloc.ll b/llvm/test/CodeGen/PowerPC/aix-tls-xcoff-reloc.ll
index eb7a0e277a56..0a3e7637b2e7 100644
--- a/llvm/test/CodeGen/PowerPC/aix-tls-xcoff-reloc.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-tls-xcoff-reloc.ll
@@ -7,6 +7,7 @@
@const_ivar = constant i32 6, align 4
@GInit = global i32 1, align 4
@TGInit = thread_local global i32 1, align 4
+; @TIUninit is local-dynamic indeed
@TIUninit = internal thread_local global i32 0, align 4
; Function Attrs: nofree norecurse nounwind willreturn writeonly
@@ -33,31 +34,31 @@ entry:
; RELOC-NEXT: Section (index: 1) .text {
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0xE
-; RELOC-NEXT: Symbol: .TIUninit ([[#NFA+23]])
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+25]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
; RELOC-NEXT: Type: R_TOC (0x3)
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
-; RELOC-NEXT: Virtual Address: 0x12
-; RELOC-NEXT: Symbol: TIUninit ([[#NFA+25]])
+; RELOC-NEXT: Virtual Address: 0x14
+; RELOC-NEXT: Symbol: .__tls_get_mod ([[#NFA+1]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
-; RELOC-NEXT: Length: 16
-; RELOC-NEXT: Type: R_TOC (0x3)
+; RELOC-NEXT: Length: 26
+; RELOC-NEXT: Type: R_RBA (0x18)
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
-; RELOC-NEXT: Virtual Address: 0x18
-; RELOC-NEXT: Symbol: .__tls_get_addr ([[#NFA+1]])
+; RELOC-NEXT: Virtual Address: 0x1A
+; RELOC-NEXT: Symbol: TIUninit ([[#NFA+27]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
-; RELOC-NEXT: Length: 26
-; RELOC-NEXT: Type: R_RBA (0x18)
+; RELOC-NEXT: Length: 16
+; RELOC-NEXT: Type: R_TOC (0x3)
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x3A
-; RELOC-NEXT: Symbol: .TGInit ([[#NFA+27]])
+; RELOC-NEXT: Symbol: .TGInit ([[#NFA+29]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -65,7 +66,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x3E
-; RELOC-NEXT: Symbol: TGInit ([[#NFA+29]])
+; RELOC-NEXT: Symbol: TGInit ([[#NFA+31]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -73,7 +74,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x44
-; RELOC-NEXT: Symbol: .__tls_get_addr ([[#NFA+1]])
+; RELOC-NEXT: Symbol: .__tls_get_addr ([[#NFA+3]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 26
@@ -81,7 +82,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x4A
-; RELOC-NEXT: Symbol: GInit ([[#NFA+31]])
+; RELOC-NEXT: Symbol: GInit ([[#NFA+33]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 16
@@ -91,7 +92,7 @@ entry:
; RELOC-NEXT: Section (index: 2) .data {
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x70
-; RELOC-NEXT: Symbol: .storesTIUninit ([[#NFA+5]])
+; RELOC-NEXT: Symbol: .storesTIUninit ([[#NFA+7]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -99,7 +100,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x74
-; RELOC-NEXT: Symbol: TOC ([[#NFA+21]])
+; RELOC-NEXT: Symbol: TOC ([[#NFA+23]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -107,7 +108,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x7C
-; RELOC-NEXT: Symbol: .loadsTGInit ([[#NFA+7]])
+; RELOC-NEXT: Symbol: .loadsTGInit ([[#NFA+9]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -115,7 +116,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x80
-; RELOC-NEXT: Symbol: TOC ([[#NFA+21]])
+; RELOC-NEXT: Symbol: TOC ([[#NFA+23]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -123,23 +124,23 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x88
-; RELOC-NEXT: Symbol: TIUninit ([[#NFA+37]])
+; RELOC-NEXT: Symbol: _$TLSML ([[#NFA+25]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
-; RELOC-NEXT: Type: R_TLSM (0x24)
+; RELOC-NEXT: Type: R_TLSML (0x25)
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x8C
-; RELOC-NEXT: Symbol: TIUninit ([[#NFA+37]])
+; RELOC-NEXT: Symbol: TIUninit ([[#NFA+39]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
-; RELOC-NEXT: Type: R_TLS (0x20)
+; RELOC-NEXT: Type: R_TLS_LD (0x22)
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x90
-; RELOC-NEXT: Symbol: TGInit ([[#NFA+35]])
+; RELOC-NEXT: Symbol: TGInit ([[#NFA+37]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -147,7 +148,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x94
-; RELOC-NEXT: Symbol: TGInit ([[#NFA+35]])
+; RELOC-NEXT: Symbol: TGInit ([[#NFA+37]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -155,7 +156,7 @@ entry:
; RELOC-NEXT: }
; RELOC-NEXT: Relocation {
; RELOC-NEXT: Virtual Address: 0x98
-; RELOC-NEXT: Symbol: GInit ([[#NFA+15]])
+; RELOC-NEXT: Symbol: GInit ([[#NFA+17]])
; RELOC-NEXT: IsSigned: No
; RELOC-NEXT: FixupBitValue: 0
; RELOC-NEXT: Length: 32
@@ -178,9 +179,9 @@ entry:
; SYM-NEXT: CPU Version ID: TCPU_COM (0x3)
; SYM-NEXT: StorageClass: C_FILE (0x67)
; SYM-NEXT: NumberOfAuxEntries: 2
-; SYM: Symbol {
+; SYM: Symbol {
; SYM-NEXT: Index: [[#NFA+1]]
-; SYM-NEXT: Name: .__tls_get_addr
+; SYM-NEXT: Name: .__tls_get_mod
; SYM-NEXT: Value (RelocatableAddress): 0x0
; SYM-NEXT: Section: N_UNDEF
; SYM-NEXT: Type: 0x0
@@ -200,6 +201,26 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: Symbol {
; SYM-NEXT: Index: [[#NFA+3]]
+; SYM-NEXT: Name: .__tls_get_addr
+; SYM-NEXT: Value (RelocatableAddress): 0x0
+; SYM-NEXT: Section: N_UNDEF
+; SYM-NEXT: Type: 0x0
+; SYM-NEXT: StorageClass: C_EXT (0x2)
+; SYM-NEXT: NumberOfAuxEntries: 1
+; SYM-NEXT: CSECT Auxiliary Entry {
+; SYM-NEXT: Index: 6
+; SYM-NEXT: SectionLen: 0
+; SYM-NEXT: ParameterHashIndex: 0x0
+; SYM-NEXT: TypeChkSectNum: 0x0
+; SYM-NEXT: SymbolAlignmentLog2: 0
+; SYM-NEXT: SymbolType: XTY_ER (0x0)
+; SYM-NEXT: StorageMappingClass: XMC_PR (0x0)
+; SYM-NEXT: StabInfoIndex: 0x0
+; SYM-NEXT: StabSectNum: 0x0
+; SYM-NEXT: }
+; SYM-NEXT: }
+; SYM-NEXT: Symbol {
+; SYM-NEXT: Index: [[#NFA+5]]
; SYM-NEXT: Name:
; SYM-NEXT: Value (RelocatableAddress): 0x0
; SYM-NEXT: Section: .text
@@ -207,7 +228,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+4]]
+; SYM-NEXT: Index: [[#NFA+6]]
; SYM-NEXT: SectionLen: 104
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -219,7 +240,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+5]]
+; SYM-NEXT: Index: [[#NFA+7]]
; SYM-NEXT: Name: .storesTIUninit
; SYM-NEXT: Value (RelocatableAddress): 0x0
; SYM-NEXT: Section: .text
@@ -227,8 +248,8 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+6]]
-; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+3]]
+; SYM-NEXT: Index: [[#NFA+8]]
+; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+5]]
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
; SYM-NEXT: SymbolAlignmentLog2: 0
@@ -239,7 +260,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+7]]
+; SYM-NEXT: Index: [[#NFA+9]]
; SYM-NEXT: Name: .loadsTGInit
; SYM-NEXT: Value (RelocatableAddress): 0x30
; SYM-NEXT: Section: .text
@@ -247,8 +268,8 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+8]]
-; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+3]]
+; SYM-NEXT: Index: [[#NFA+10]]
+; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+5]]
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
; SYM-NEXT: SymbolAlignmentLog2: 0
@@ -259,7 +280,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+9]]
+; SYM-NEXT: Index: [[#NFA+11]]
; SYM-NEXT: Name: .rodata
; SYM-NEXT: Value (RelocatableAddress): 0x68
; SYM-NEXT: Section: .text
@@ -267,7 +288,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+10]]
+; SYM-NEXT: Index: [[#NFA+12]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -279,7 +300,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+11]]
+; SYM-NEXT: Index: [[#NFA+13]]
; SYM-NEXT: Name: const_ivar
; SYM-NEXT: Value (RelocatableAddress): 0x68
; SYM-NEXT: Section: .text
@@ -287,8 +308,8 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+12]]
-; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+9]]
+; SYM-NEXT: Index: [[#NFA+14]]
+; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+11]]
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
; SYM-NEXT: SymbolAlignmentLog2: 0
@@ -299,7 +320,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+13]]
+; SYM-NEXT: Index: [[#NFA+15]]
; SYM-NEXT: Name: .data
; SYM-NEXT: Value (RelocatableAddress): 0x6C
; SYM-NEXT: Section: .data
@@ -307,7 +328,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+14]]
+; SYM-NEXT: Index: [[#NFA+16]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -319,7 +340,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+15]]
+; SYM-NEXT: Index: [[#NFA+17]]
; SYM-NEXT: Name: GInit
; SYM-NEXT: Value (RelocatableAddress): 0x6C
; SYM-NEXT: Section: .data
@@ -327,8 +348,8 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+16]]
-; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+13]]
+; SYM-NEXT: Index: [[#NFA+18]]
+; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+15]]
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
; SYM-NEXT: SymbolAlignmentLog2: 0
@@ -339,7 +360,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+17]]
+; SYM-NEXT: Index: [[#NFA+19]]
; SYM-NEXT: Name: storesTIUninit
; SYM-NEXT: Value (RelocatableAddress): 0x70
; SYM-NEXT: Section: .data
@@ -347,7 +368,7 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+18]]
+; SYM-NEXT: Index: [[#NFA+20]]
; SYM-NEXT: SectionLen: 12
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -359,7 +380,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+19]]
+; SYM-NEXT: Index: [[#NFA+21]]
; SYM-NEXT: Name: loadsTGInit
; SYM-NEXT: Value (RelocatableAddress): 0x7C
; SYM-NEXT: Section: .data
@@ -367,7 +388,7 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+20]]
+; SYM-NEXT: Index: [[#NFA+22]]
; SYM-NEXT: SectionLen: 12
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -379,7 +400,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+21]]
+; SYM-NEXT: Index: [[#NFA+23]]
; SYM-NEXT: Name: TOC
; SYM-NEXT: Value (RelocatableAddress): 0x88
; SYM-NEXT: Section: .data
@@ -387,7 +408,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+22]]
+; SYM-NEXT: Index: [[#NFA+24]]
; SYM-NEXT: SectionLen: 0
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -399,15 +420,15 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+23]]
-; SYM-NEXT: Name: .TIUninit
+; SYM-NEXT: Index: [[#NFA+25]]
+; SYM-NEXT: Name: _$TLSML
; SYM-NEXT: Value (RelocatableAddress): 0x88
; SYM-NEXT: Section: .data
; SYM-NEXT: Type: 0x0
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+24]]
+; SYM-NEXT: Index: [[#NFA+26]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -419,7 +440,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+25]]
+; SYM-NEXT: Index: [[#NFA+27]]
; SYM-NEXT: Name: TIUninit
; SYM-NEXT: Value (RelocatableAddress): 0x8C
; SYM-NEXT: Section: .data
@@ -427,7 +448,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+26]]
+; SYM-NEXT: Index: [[#NFA+28]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -439,7 +460,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+27]]
+; SYM-NEXT: Index: [[#NFA+29]]
; SYM-NEXT: Name: .TGInit
; SYM-NEXT: Value (RelocatableAddress): 0x90
; SYM-NEXT: Section: .data
@@ -447,7 +468,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+28]]
+; SYM-NEXT: Index: [[#NFA+30]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -459,7 +480,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+29]]
+; SYM-NEXT: Index: [[#NFA+31]]
; SYM-NEXT: Name: TGInit
; SYM-NEXT: Value (RelocatableAddress): 0x94
; SYM-NEXT: Section: .data
@@ -467,7 +488,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+30]]
+; SYM-NEXT: Index: [[#NFA+32]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -479,7 +500,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+31]]
+; SYM-NEXT: Index: [[#NFA+33]]
; SYM-NEXT: Name: GInit
; SYM-NEXT: Value (RelocatableAddress): 0x98
; SYM-NEXT: Section: .data
@@ -487,7 +508,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+32]]
+; SYM-NEXT: Index: [[#NFA+34]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -499,7 +520,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+33]]
+; SYM-NEXT: Index: [[#NFA+35]]
; SYM-NEXT: Name: .tdata
; SYM-NEXT: Value (RelocatableAddress): 0x0
; SYM-NEXT: Section: .tdata
@@ -507,7 +528,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+34]]
+; SYM-NEXT: Index: [[#NFA+36]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -519,7 +540,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+35]]
+; SYM-NEXT: Index: [[#NFA+37]]
; SYM-NEXT: Name: TGInit
; SYM-NEXT: Value (RelocatableAddress): 0x0
; SYM-NEXT: Section: .tdata
@@ -527,8 +548,8 @@ entry:
; SYM-NEXT: StorageClass: C_EXT (0x2)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+36]]
-; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+33]]
+; SYM-NEXT: Index: [[#NFA+38]]
+; SYM-NEXT: ContainingCsectSymbolIndex: [[#NFA+35]]
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
; SYM-NEXT: SymbolAlignmentLog2: 0
@@ -539,7 +560,7 @@ entry:
; SYM-NEXT: }
; SYM-NEXT: }
; SYM-NEXT: Symbol {
-; SYM-NEXT: Index: [[#NFA+37]]
+; SYM-NEXT: Index: [[#NFA+39]]
; SYM-NEXT: Name: TIUninit
; SYM-NEXT: Value (RelocatableAddress): 0x4
; SYM-NEXT: Section: .tbss
@@ -547,7 +568,7 @@ entry:
; SYM-NEXT: StorageClass: C_HIDEXT (0x6B)
; SYM-NEXT: NumberOfAuxEntries: 1
; SYM-NEXT: CSECT Auxiliary Entry {
-; SYM-NEXT: Index: [[#NFA+38]]
+; SYM-NEXT: Index: [[#NFA+40]]
; SYM-NEXT: SectionLen: 4
; SYM-NEXT: ParameterHashIndex: 0x0
; SYM-NEXT: TypeChkSectNum: 0x0
@@ -562,34 +583,34 @@ entry:
; DIS: {{.*}}aix-tls-xcoff-reloc.ll.tmp.o: file format aixcoff-rs6000
; DIS: Disassembly of section .text:
-; DIS: 00000000 (idx: [[#NFA+5]]) .storesTIUninit:
+; DIS: 00000000 (idx: [[#NFA+7]]) .storesTIUninit:
; DIS-NEXT: mflr 0
; DIS-NEXT: stwu 1, -32(1)
; DIS-NEXT: mr 6, 3
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 3, 0(2)
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOC (idx: [[#NFA+23]]) .TIUninit[TC]
-; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 4, 4(2)
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOC (idx: [[#NFA+25]]) TIUninit[TC]
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOC (idx: [[#NFA+25]]) _$TLSML[TC]
; DIS-NEXT: stw 0, 40(1)
-; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0
-; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_addr[PR]
-; DIS-NEXT: stw 6, 0(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 4, 4(2)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOC (idx: [[#NFA+27]]) TIUninit[TC]
+; DIS-NEXT: stwx 6, 3, 4
; DIS-NEXT: addi 1, 1, 32
; DIS-NEXT: lwz 0, 8(1)
; DIS-NEXT: mtlr 0
; DIS-NEXT: blr
-; DIS: 00000030 (idx: [[#NFA+7]]) .loadsTGInit:
+; DIS: 00000030 (idx: [[#NFA+9]]) .loadsTGInit:
; DIS-NEXT: mflr 0
; DIS-NEXT: stwu 1, -32(1)
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 3, 8(2)
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOC (idx: [[#NFA+27]]) .TGInit[TC]
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOC (idx: [[#NFA+29]]) .TGInit[TC]
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 4, 12(2)
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOC (idx: [[#NFA+29]]) TGInit[TC]
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOC (idx: [[#NFA+31]]) TGInit[TC]
; DIS-NEXT: stw 0, 40(1)
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0
-; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_addr[PR]
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+3]]) .__tls_get_addr[PR]
; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} lwz 4, 16(2)
-; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOC (idx: [[#NFA+31]]) GInit[TC]
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOC (idx: [[#NFA+33]]) GInit[TC]
; DIS-NEXT: lwz 3, 0(3)
; DIS-NEXT: lwz 4, 0(4)
; DIS-NEXT: add 3, 4, 3
@@ -597,46 +618,46 @@ entry:
; DIS-NEXT: lwz 0, 8(1)
; DIS-NEXT: mtlr 0
; DIS-NEXT: blr
-; DIS: 00000068 (idx: [[#NFA+11]]) const_ivar:
+; DIS: 00000068 (idx: [[#NFA+13]]) const_ivar:
; DIS-NEXT: 68: 00 00 00 06
; DIS: Disassembly of section .data:
-; DIS: 0000006c (idx: [[#NFA+15]]) GInit:
+; DIS: 0000006c (idx: [[#NFA+17]]) GInit:
; DIS-NEXT: 6c: 00 00 00 01
-; DIS: 00000070 (idx: [[#NFA+17]]) storesTIUninit[DS]:
+; DIS: 00000070 (idx: [[#NFA+19]]) storesTIUninit[DS]:
; DIS-NEXT: 70: 00 00 00 00
-; DIS-NEXT: 00000070: R_POS (idx: [[#NFA+5]]) .storesTIUninit
+; DIS-NEXT: 00000070: R_POS (idx: [[#NFA+7]]) .storesTIUninit
; DIS-NEXT: 74: 00 00 00 88
-; DIS-NEXT: 00000074: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 00000074: R_POS (idx: [[#NFA+23]]) TOC[TC0]
; DIS-NEXT: 78: 00 00 00 00
-; DIS: 0000007c (idx: [[#NFA+19]]) loadsTGInit[DS]:
+; DIS: 0000007c (idx: [[#NFA+21]]) loadsTGInit[DS]:
; DIS-NEXT: 7c: 00 00 00 30
-; DIS-NEXT: 0000007c: R_POS (idx: [[#NFA+7]]) .loadsTGInit
+; DIS-NEXT: 0000007c: R_POS (idx: [[#NFA+9]]) .loadsTGInit
; DIS-NEXT: 80: 00 00 00 88
-; DIS-NEXT: 00000080: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 00000080: R_POS (idx: [[#NFA+23]]) TOC[TC0]
; DIS-NEXT: 84: 00 00 00 00
-; DIS: 00000088 (idx: [[#NFA+23]]) .TIUninit[TC]:
+; DIS: 00000088 (idx: [[#NFA+25]]) _$TLSML[TC]:
; DIS-NEXT: 88: 00 00 00 00
-; DIS-NEXT: 00000088: R_TLSM (idx: [[#NFA+37]]) TIUninit[UL]
-; DIS: 0000008c (idx: [[#NFA+25]]) TIUninit[TC]:
+; DIS-NEXT: 00000088: R_TLSML (idx: [[#NFA+25]]) _$TLSML[TC]
+; DIS: 0000008c (idx: [[#NFA+27]]) TIUninit[TC]:
; DIS-NEXT: 8c: 00 00 00 04
-; DIS-NEXT: 0000008c: R_TLS (idx: [[#NFA+37]]) TIUninit[UL]
-; DIS: 00000090 (idx: [[#NFA+27]]) .TGInit[TC]:
+; DIS-NEXT: 0000008c: R_TLS_LD (idx: [[#NFA+39]]) TIUninit[UL]
+; DIS: 00000090 (idx: [[#NFA+29]]) .TGInit[TC]:
; DIS-NEXT: 90: 00 00 00 00
-; DIS-NEXT: 00000090: R_TLSM (idx: [[#NFA+35]]) TGInit
-; DIS: 00000094 (idx: [[#NFA+29]]) TGInit[TC]:
+; DIS-NEXT: 00000090: R_TLSM (idx: [[#NFA+37]]) TGInit
+; DIS: 00000094 (idx: [[#NFA+31]]) TGInit[TC]:
; DIS-NEXT: 94: 00 00 00 00
-; DIS-NEXT: 00000094: R_TLS (idx: [[#NFA+35]]) TGInit
-; DIS: 00000098 (idx: [[#NFA+31]]) GInit[TC]:
+; DIS-NEXT: 00000094: R_TLS (idx: [[#NFA+37]]) TGInit
+; DIS: 00000098 (idx: [[#NFA+33]]) GInit[TC]:
; DIS-NEXT: 98: 00 00 00 6c
-; DIS-NEXT: 00000098: R_POS (idx: [[#NFA+15]]) GInit
+; DIS-NEXT: 00000098: R_POS (idx: [[#NFA+17]]) GInit
; DIS: Disassembly of section .tdata:
-; DIS: 00000000 (idx: [[#NFA+35]]) TGInit:
+; DIS: 00000000 (idx: [[#NFA+37]]) TGInit:
; DIS-NEXT: 0: 00 00 00 01
; DIS: Disassembly of section .tbss:
-; DIS: 00000004 (idx: [[#NFA+37]]) TIUninit[UL]:
+; DIS: 00000004 (idx: [[#NFA+39]]) TIUninit[UL]:
; DIS-NEXT: ...
attributes #0 = { nofree norecurse nounwind willreturn writeonly "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="pwr4" "target-features"="-altivec,-bpermd,-crypto,-direct-move,-extdiv,-float128,-htm,-mma,-paired-vector-memops,-power10-vector,-power8-vector,-power9-vector,-spe,-vsx" }
diff --git a/llvm/test/CodeGen/PowerPC/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/PowerPC/atomicrmw-uinc-udec-wrap.ll
index adbb956ba32a..505ac8639595 100644
--- a/llvm/test/CodeGen/PowerPC/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/PowerPC/atomicrmw-uinc-udec-wrap.ll
@@ -6,55 +6,50 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: mr 5, 3
-; CHECK-NEXT: rlwinm 7, 5, 3, 27, 28
+; CHECK-NEXT: rlwinm 6, 5, 3, 27, 28
; CHECK-NEXT: lbz 3, 0(3)
-; CHECK-NEXT: xori 7, 7, 24
-; CHECK-NEXT: li 8, 255
-; CHECK-NEXT: li 6, 0
+; CHECK-NEXT: xori 6, 6, 24
+; CHECK-NEXT: li 7, 255
; CHECK-NEXT: clrlwi 4, 4, 24
; CHECK-NEXT: rldicr 5, 5, 0, 61
-; CHECK-NEXT: slw 8, 8, 7
+; CHECK-NEXT: slw 7, 7, 6
; CHECK-NEXT: b .LBB0_2
; CHECK-NEXT: .LBB0_1: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: srw 3, 11, 7
-; CHECK-NEXT: cmplw 3, 9
-; CHECK-NEXT: beq 0, .LBB0_8
+; CHECK-NEXT: srw 3, 10, 6
+; CHECK-NEXT: cmplw 3, 8
+; CHECK-NEXT: beq 0, .LBB0_7
; CHECK-NEXT: .LBB0_2: # %atomicrmw.start
; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB0_6 Depth 2
-; CHECK-NEXT: clrlwi 9, 3, 24
-; CHECK-NEXT: addi 10, 3, 1
-; CHECK-NEXT: cmplw 9, 4
-; CHECK-NEXT: bc 12, 0, .LBB0_4
+; CHECK-NEXT: # Child Loop BB0_5 Depth 2
+; CHECK-NEXT: clrlwi 8, 3, 24
+; CHECK-NEXT: cmplw 8, 4
+; CHECK-NEXT: li 9, 0
+; CHECK-NEXT: bge 0, .LBB0_4
; CHECK-NEXT: # %bb.3: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: ori 3, 6, 0
-; CHECK-NEXT: b .LBB0_5
+; CHECK-NEXT: addi 9, 3, 1
; CHECK-NEXT: .LBB0_4: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: addi 3, 10, 0
+; CHECK-NEXT: slw 3, 9, 6
+; CHECK-NEXT: slw 9, 8, 6
+; CHECK-NEXT: and 3, 3, 7
+; CHECK-NEXT: and 9, 9, 7
; CHECK-NEXT: .LBB0_5: # %atomicrmw.start
-; CHECK-NEXT: #
-; CHECK-NEXT: slw 11, 9, 7
-; CHECK-NEXT: slw 3, 3, 7
-; CHECK-NEXT: and 3, 3, 8
-; CHECK-NEXT: and 10, 11, 8
-; CHECK-NEXT: .LBB0_6: # %atomicrmw.start
; CHECK-NEXT: # Parent Loop BB0_2 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
-; CHECK-NEXT: lwarx 12, 0, 5
-; CHECK-NEXT: and 11, 12, 8
-; CHECK-NEXT: cmpw 11, 10
+; CHECK-NEXT: lwarx 11, 0, 5
+; CHECK-NEXT: and 10, 11, 7
+; CHECK-NEXT: cmpw 10, 9
; CHECK-NEXT: bne 0, .LBB0_1
-; CHECK-NEXT: # %bb.7: # %atomicrmw.start
+; CHECK-NEXT: # %bb.6: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: andc 12, 12, 8
-; CHECK-NEXT: or 12, 12, 3
-; CHECK-NEXT: stwcx. 12, 0, 5
-; CHECK-NEXT: bne 0, .LBB0_6
+; CHECK-NEXT: andc 11, 11, 7
+; CHECK-NEXT: or 11, 11, 3
+; CHECK-NEXT: stwcx. 11, 0, 5
+; CHECK-NEXT: bne 0, .LBB0_5
; CHECK-NEXT: b .LBB0_1
-; CHECK-NEXT: .LBB0_8: # %atomicrmw.end
+; CHECK-NEXT: .LBB0_7: # %atomicrmw.end
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
%result = atomicrmw uinc_wrap ptr %ptr, i8 %val seq_cst
@@ -66,55 +61,51 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: mr 5, 3
-; CHECK-NEXT: li 6, 0
+; CHECK-NEXT: li 7, 0
; CHECK-NEXT: lhz 3, 0(3)
-; CHECK-NEXT: rlwinm 7, 5, 3, 27, 27
-; CHECK-NEXT: xori 7, 7, 16
-; CHECK-NEXT: ori 8, 6, 65535
+; CHECK-NEXT: rlwinm 6, 5, 3, 27, 27
+; CHECK-NEXT: xori 6, 6, 16
+; CHECK-NEXT: ori 7, 7, 65535
; CHECK-NEXT: clrlwi 4, 4, 16
; CHECK-NEXT: rldicr 5, 5, 0, 61
-; CHECK-NEXT: slw 8, 8, 7
+; CHECK-NEXT: slw 7, 7, 6
; CHECK-NEXT: b .LBB1_2
; CHECK-NEXT: .LBB1_1: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: srw 3, 11, 7
-; CHECK-NEXT: cmplw 3, 9
-; CHECK-NEXT: beq 0, .LBB1_8
+; CHECK-NEXT: srw 3, 10, 6
+; CHECK-NEXT: cmplw 3, 8
+; CHECK-NEXT: beq 0, .LBB1_7
; CHECK-NEXT: .LBB1_2: # %atomicrmw.start
; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB1_6 Depth 2
-; CHECK-NEXT: clrlwi 9, 3, 16
-; CHECK-NEXT: addi 10, 3, 1
-; CHECK-NEXT: cmplw 9, 4
-; CHECK-NEXT: bc 12, 0, .LBB1_4
+; CHECK-NEXT: # Child Loop BB1_5 Depth 2
+; CHECK-NEXT: clrlwi 8, 3, 16
+; CHECK-NEXT: cmplw 8, 4
+; CHECK-NEXT: li 9, 0
+; CHECK-NEXT: bge 0, .LBB1_4
; CHECK-NEXT: # %bb.3: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: ori 3, 6, 0
-; CHECK-NEXT: b .LBB1_5
+; CHECK-NEXT: addi 9, 3, 1
; CHECK-NEXT: .LBB1_4: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: addi 3, 10, 0
+; CHECK-NEXT: slw 3, 9, 6
+; CHECK-NEXT: slw 9, 8, 6
+; CHECK-NEXT: and 3, 3, 7
+; CHECK-NEXT: and 9, 9, 7
; CHECK-NEXT: .LBB1_5: # %atomicrmw.start
-; CHECK-NEXT: #
-; CHECK-NEXT: slw 11, 9, 7
-; CHECK-NEXT: slw 3, 3, 7
-; CHECK-NEXT: and 3, 3, 8
-; CHECK-NEXT: and 10, 11, 8
-; CHECK-NEXT: .LBB1_6: # %atomicrmw.start
; CHECK-NEXT: # Parent Loop BB1_2 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
-; CHECK-NEXT: lwarx 12, 0, 5
-; CHECK-NEXT: and 11, 12, 8
-; CHECK-NEXT: cmpw 11, 10
+; CHECK-NEXT: lwarx 11, 0, 5
+; CHECK-NEXT: and 10, 11, 7
+; CHECK-NEXT: cmpw 10, 9
; CHECK-NEXT: bne 0, .LBB1_1
-; CHECK-NEXT: # %bb.7: # %atomicrmw.start
+; CHECK-NEXT: # %bb.6: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: andc 12, 12, 8
-; CHECK-NEXT: or 12, 12, 3
-; CHECK-NEXT: stwcx. 12, 0, 5
-; CHECK-NEXT: bne 0, .LBB1_6
+; CHECK-NEXT: andc 11, 11, 7
+; CHECK-NEXT: or 11, 11, 3
+; CHECK-NEXT: stwcx. 11, 0, 5
+; CHECK-NEXT: bne 0, .LBB1_5
; CHECK-NEXT: b .LBB1_1
-; CHECK-NEXT: .LBB1_8: # %atomicrmw.end
+; CHECK-NEXT: .LBB1_7: # %atomicrmw.end
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
%result = atomicrmw uinc_wrap ptr %ptr, i16 %val seq_cst
@@ -125,39 +116,34 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
; CHECK-LABEL: atomicrmw_uinc_wrap_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
-; CHECK-NEXT: li 6, 0
-; CHECK-NEXT: lwz 5, 0(3)
+; CHECK-NEXT: lwz 6, 0(3)
; CHECK-NEXT: b .LBB2_2
; CHECK-NEXT: .LBB2_1: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: cmplw 5, 7
-; CHECK-NEXT: beq 0, .LBB2_7
+; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: beq 0, .LBB2_6
; CHECK-NEXT: .LBB2_2: # %atomicrmw.start
; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB2_5 Depth 2
-; CHECK-NEXT: mr 7, 5
-; CHECK-NEXT: addi 5, 5, 1
-; CHECK-NEXT: cmplw 7, 4
-; CHECK-NEXT: bc 12, 0, .LBB2_4
+; CHECK-NEXT: # Child Loop BB2_4 Depth 2
+; CHECK-NEXT: cmplw 6, 4
+; CHECK-NEXT: li 7, 0
+; CHECK-NEXT: bge 0, .LBB2_4
; CHECK-NEXT: # %bb.3: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: ori 8, 6, 0
-; CHECK-NEXT: b .LBB2_5
+; CHECK-NEXT: addi 7, 6, 1
; CHECK-NEXT: .LBB2_4: # %atomicrmw.start
-; CHECK-NEXT: #
-; CHECK-NEXT: addi 8, 5, 0
-; CHECK-NEXT: .LBB2_5: # %atomicrmw.start
; CHECK-NEXT: # Parent Loop BB2_2 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 5, 0, 3
-; CHECK-NEXT: cmpw 5, 7
+; CHECK-NEXT: cmpw 5, 6
; CHECK-NEXT: bne 0, .LBB2_1
-; CHECK-NEXT: # %bb.6: # %atomicrmw.start
+; CHECK-NEXT: # %bb.5: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: stwcx. 8, 0, 3
-; CHECK-NEXT: bne 0, .LBB2_5
+; CHECK-NEXT: stwcx. 7, 0, 3
+; CHECK-NEXT: bne 0, .LBB2_4
; CHECK-NEXT: b .LBB2_1
-; CHECK-NEXT: .LBB2_7: # %atomicrmw.end
+; CHECK-NEXT: .LBB2_6: # %atomicrmw.end
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -169,39 +155,34 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
; CHECK-LABEL: atomicrmw_uinc_wrap_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
-; CHECK-NEXT: ld 5, 0(3)
-; CHECK-NEXT: li 6, 0
+; CHECK-NEXT: ld 6, 0(3)
; CHECK-NEXT: b .LBB3_2
; CHECK-NEXT: .LBB3_1: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: cmpld 5, 7
-; CHECK-NEXT: beq 0, .LBB3_7
+; CHECK-NEXT: cmpld 5, 6
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: beq 0, .LBB3_6
; CHECK-NEXT: .LBB3_2: # %atomicrmw.start
; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB3_5 Depth 2
-; CHECK-NEXT: mr 7, 5
-; CHECK-NEXT: addi 5, 5, 1
-; CHECK-NEXT: cmpld 7, 4
-; CHECK-NEXT: bc 12, 0, .LBB3_4
+; CHECK-NEXT: # Child Loop BB3_4 Depth 2
+; CHECK-NEXT: cmpld 6, 4
+; CHECK-NEXT: li 7, 0
+; CHECK-NEXT: bge 0, .LBB3_4
; CHECK-NEXT: # %bb.3: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: ori 8, 6, 0
-; CHECK-NEXT: b .LBB3_5
+; CHECK-NEXT: addi 7, 6, 1
; CHECK-NEXT: .LBB3_4: # %atomicrmw.start
-; CHECK-NEXT: #
-; CHECK-NEXT: addi 8, 5, 0
-; CHECK-NEXT: .LBB3_5: # %atomicrmw.start
; CHECK-NEXT: # Parent Loop BB3_2 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: ldarx 5, 0, 3
-; CHECK-NEXT: cmpd 5, 7
+; CHECK-NEXT: cmpd 5, 6
; CHECK-NEXT: bne 0, .LBB3_1
-; CHECK-NEXT: # %bb.6: # %atomicrmw.start
+; CHECK-NEXT: # %bb.5: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: stdcx. 8, 0, 3
-; CHECK-NEXT: bne 0, .LBB3_5
+; CHECK-NEXT: stdcx. 7, 0, 3
+; CHECK-NEXT: bne 0, .LBB3_4
; CHECK-NEXT: b .LBB3_1
-; CHECK-NEXT: .LBB3_7: # %atomicrmw.end
+; CHECK-NEXT: .LBB3_6: # %atomicrmw.end
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -226,43 +207,39 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; CHECK-NEXT: #
; CHECK-NEXT: srw 3, 11, 7
; CHECK-NEXT: cmplw 3, 9
-; CHECK-NEXT: beq 0, .LBB4_8
+; CHECK-NEXT: beq 0, .LBB4_7
; CHECK-NEXT: .LBB4_2: # %atomicrmw.start
; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB4_6 Depth 2
+; CHECK-NEXT: # Child Loop BB4_5 Depth 2
; CHECK-NEXT: andi. 9, 3, 255
; CHECK-NEXT: cmplw 1, 9, 6
-; CHECK-NEXT: addi 10, 3, -1
; CHECK-NEXT: cror 20, 2, 5
+; CHECK-NEXT: mr 10, 4
; CHECK-NEXT: bc 12, 20, .LBB4_4
; CHECK-NEXT: # %bb.3: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: ori 3, 10, 0
-; CHECK-NEXT: b .LBB4_5
+; CHECK-NEXT: addi 10, 3, -1
; CHECK-NEXT: .LBB4_4: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: addi 3, 4, 0
-; CHECK-NEXT: .LBB4_5: # %atomicrmw.start
-; CHECK-NEXT: #
-; CHECK-NEXT: slw 11, 9, 7
-; CHECK-NEXT: slw 3, 3, 7
+; CHECK-NEXT: slw 3, 10, 7
+; CHECK-NEXT: slw 10, 9, 7
; CHECK-NEXT: and 3, 3, 8
-; CHECK-NEXT: and 10, 11, 8
-; CHECK-NEXT: .LBB4_6: # %atomicrmw.start
+; CHECK-NEXT: and 10, 10, 8
+; CHECK-NEXT: .LBB4_5: # %atomicrmw.start
; CHECK-NEXT: # Parent Loop BB4_2 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 12, 0, 5
; CHECK-NEXT: and 11, 12, 8
; CHECK-NEXT: cmpw 11, 10
; CHECK-NEXT: bne 0, .LBB4_1
-; CHECK-NEXT: # %bb.7: # %atomicrmw.start
+; CHECK-NEXT: # %bb.6: # %atomicrmw.start
; CHECK-NEXT: #
; CHECK-NEXT: andc 12, 12, 8
; CHECK-NEXT: or 12, 12, 3
; CHECK-NEXT: stwcx. 12, 0, 5
-; CHECK-NEXT: bne 0, .LBB4_6
+; CHECK-NEXT: bne 0, .LBB4_5
; CHECK-NEXT: b .LBB4_1
-; CHECK-NEXT: .LBB4_8: # %atomicrmw.end
+; CHECK-NEXT: .LBB4_7: # %atomicrmw.end
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
%result = atomicrmw udec_wrap ptr %ptr, i8 %val seq_cst
@@ -287,43 +264,39 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
; CHECK-NEXT: #
; CHECK-NEXT: srw 3, 11, 7
; CHECK-NEXT: cmplw 3, 9
-; CHECK-NEXT: beq 0, .LBB5_8
+; CHECK-NEXT: beq 0, .LBB5_7
; CHECK-NEXT: .LBB5_2: # %atomicrmw.start
; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB5_6 Depth 2
+; CHECK-NEXT: # Child Loop BB5_5 Depth 2
; CHECK-NEXT: andi. 9, 3, 65535
; CHECK-NEXT: cmplw 1, 9, 6
-; CHECK-NEXT: addi 10, 3, -1
; CHECK-NEXT: cror 20, 2, 5
+; CHECK-NEXT: mr 10, 4
; CHECK-NEXT: bc 12, 20, .LBB5_4
; CHECK-NEXT: # %bb.3: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: ori 3, 10, 0
-; CHECK-NEXT: b .LBB5_5
+; CHECK-NEXT: addi 10, 3, -1
; CHECK-NEXT: .LBB5_4: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: addi 3, 4, 0
-; CHECK-NEXT: .LBB5_5: # %atomicrmw.start
-; CHECK-NEXT: #
-; CHECK-NEXT: slw 11, 9, 7
-; CHECK-NEXT: slw 3, 3, 7
+; CHECK-NEXT: slw 3, 10, 7
+; CHECK-NEXT: slw 10, 9, 7
; CHECK-NEXT: and 3, 3, 8
-; CHECK-NEXT: and 10, 11, 8
-; CHECK-NEXT: .LBB5_6: # %atomicrmw.start
+; CHECK-NEXT: and 10, 10, 8
+; CHECK-NEXT: .LBB5_5: # %atomicrmw.start
; CHECK-NEXT: # Parent Loop BB5_2 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 12, 0, 5
; CHECK-NEXT: and 11, 12, 8
; CHECK-NEXT: cmpw 11, 10
; CHECK-NEXT: bne 0, .LBB5_1
-; CHECK-NEXT: # %bb.7: # %atomicrmw.start
+; CHECK-NEXT: # %bb.6: # %atomicrmw.start
; CHECK-NEXT: #
; CHECK-NEXT: andc 12, 12, 8
; CHECK-NEXT: or 12, 12, 3
; CHECK-NEXT: stwcx. 12, 0, 5
-; CHECK-NEXT: bne 0, .LBB5_6
+; CHECK-NEXT: bne 0, .LBB5_5
; CHECK-NEXT: b .LBB5_1
-; CHECK-NEXT: .LBB5_8: # %atomicrmw.end
+; CHECK-NEXT: .LBB5_7: # %atomicrmw.end
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
%result = atomicrmw udec_wrap ptr %ptr, i16 %val seq_cst
@@ -334,28 +307,27 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
; CHECK-LABEL: atomicrmw_udec_wrap_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
-; CHECK-NEXT: lwz 5, 0(3)
+; CHECK-NEXT: lwz 6, 0(3)
; CHECK-NEXT: b .LBB6_2
; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
; CHECK-NEXT: #
; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: mr 6, 5
; CHECK-NEXT: beq 0, .LBB6_7
; CHECK-NEXT: .LBB6_2: # %atomicrmw.start
; CHECK-NEXT: # =>This Loop Header: Depth=1
; CHECK-NEXT: # Child Loop BB6_5 Depth 2
-; CHECK-NEXT: mr 6, 5
; CHECK-NEXT: cmpwi 6, 0
-; CHECK-NEXT: cmplw 1, 6, 4
-; CHECK-NEXT: addi 5, 5, -1
-; CHECK-NEXT: cror 20, 2, 5
-; CHECK-NEXT: bc 12, 20, .LBB6_4
+; CHECK-NEXT: mr 7, 4
+; CHECK-NEXT: bc 12, 2, .LBB6_5
; CHECK-NEXT: # %bb.3: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: ori 7, 5, 0
-; CHECK-NEXT: b .LBB6_5
-; CHECK-NEXT: .LBB6_4: # %atomicrmw.start
+; CHECK-NEXT: cmplw 6, 4
+; CHECK-NEXT: mr 7, 4
+; CHECK-NEXT: bc 12, 1, .LBB6_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: addi 7, 4, 0
+; CHECK-NEXT: addi 7, 6, -1
; CHECK-NEXT: .LBB6_5: # %atomicrmw.start
; CHECK-NEXT: # Parent Loop BB6_2 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
@@ -379,27 +351,27 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
; CHECK-LABEL: atomicrmw_udec_wrap_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: sync
-; CHECK-NEXT: ld 5, 0(3)
+; CHECK-NEXT: ld 6, 0(3)
; CHECK-NEXT: b .LBB7_2
; CHECK-NEXT: .LBB7_1: # %atomicrmw.start
; CHECK-NEXT: #
; CHECK-NEXT: cmpld 5, 6
+; CHECK-NEXT: mr 6, 5
; CHECK-NEXT: beq 0, .LBB7_7
; CHECK-NEXT: .LBB7_2: # %atomicrmw.start
; CHECK-NEXT: # =>This Loop Header: Depth=1
; CHECK-NEXT: # Child Loop BB7_5 Depth 2
-; CHECK-NEXT: mr. 6, 5
-; CHECK-NEXT: cmpld 1, 6, 4
-; CHECK-NEXT: addi 5, 5, -1
-; CHECK-NEXT: cror 20, 2, 5
-; CHECK-NEXT: bc 12, 20, .LBB7_4
+; CHECK-NEXT: cmpdi 6, 0
+; CHECK-NEXT: mr 7, 4
+; CHECK-NEXT: bc 12, 2, .LBB7_5
; CHECK-NEXT: # %bb.3: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: ori 7, 5, 0
-; CHECK-NEXT: b .LBB7_5
-; CHECK-NEXT: .LBB7_4: # %atomicrmw.start
+; CHECK-NEXT: cmpld 6, 4
+; CHECK-NEXT: mr 7, 4
+; CHECK-NEXT: bc 12, 1, .LBB7_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
; CHECK-NEXT: #
-; CHECK-NEXT: addi 7, 4, 0
+; CHECK-NEXT: addi 7, 6, -1
; CHECK-NEXT: .LBB7_5: # %atomicrmw.start
; CHECK-NEXT: # Parent Loop BB7_2 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
diff --git a/llvm/test/CodeGen/PowerPC/crsave.ll b/llvm/test/CodeGen/PowerPC/crsave.ll
index 81e7a0adcc8c..bde49d02e86e 100644
--- a/llvm/test/CodeGen/PowerPC/crsave.ll
+++ b/llvm/test/CodeGen/PowerPC/crsave.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -O0 -frame-pointer=all -mtriple=powerpc-unknown-linux-gnu -mcpu=g5 < %s | FileCheck %s -check-prefix=PPC32
; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mcpu=g5 < %s | FileCheck %s -check-prefix=PPC64
; RUN: llc -O0 -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s -check-prefix=PPC64-ELFv2
@@ -5,6 +6,101 @@
declare void @foo()
define i32 @test_cr2() nounwind uwtable {
+; PPC32-LABEL: test_cr2:
+; PPC32: # %bb.0: # %entry
+; PPC32-NEXT: mflr 0
+; PPC32-NEXT: stwu 1, -32(1)
+; PPC32-NEXT: stw 31, 28(1)
+; PPC32-NEXT: stw 0, 36(1)
+; PPC32-NEXT: .cfi_def_cfa_offset 32
+; PPC32-NEXT: .cfi_offset r31, -4
+; PPC32-NEXT: .cfi_offset lr, 4
+; PPC32-NEXT: mr 31, 1
+; PPC32-NEXT: .cfi_def_cfa_register r31
+; PPC32-NEXT: mfcr 12
+; PPC32-NEXT: stw 12, 24(31)
+; PPC32-NEXT: li 3, 1
+; PPC32-NEXT: li 4, 2
+; PPC32-NEXT: li 5, 3
+; PPC32-NEXT: li 6, 0
+; PPC32-NEXT: #APP
+; PPC32-EMPTY:
+; PPC32-NEXT: mtcr 6
+; PPC32-NEXT: cmpw 2, 4, 3
+; PPC32-NEXT: mfcr 3
+; PPC32-NEXT: #NO_APP
+; PPC32-NEXT: stw 3, 20(31)
+; PPC32-NEXT: bl foo
+; PPC32-NEXT: lwz 3, 20(31)
+; PPC32-NEXT: lwz 12, 24(31)
+; PPC32-NEXT: mtocrf 32, 12
+; PPC32-NEXT: lwz 0, 36(1)
+; PPC32-NEXT: lwz 31, 28(1)
+; PPC32-NEXT: addi 1, 1, 32
+; PPC32-NEXT: mtlr 0
+; PPC32-NEXT: blr
+;
+; PPC64-LABEL: test_cr2:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: mflr 0
+; PPC64-NEXT: mfcr 12
+; PPC64-NEXT: stw 12, 8(1)
+; PPC64-NEXT: stdu 1, -128(1)
+; PPC64-NEXT: std 0, 144(1)
+; PPC64-NEXT: .cfi_def_cfa_offset 128
+; PPC64-NEXT: .cfi_offset lr, 16
+; PPC64-NEXT: .cfi_offset cr2, 8
+; PPC64-NEXT: li 3, 1
+; PPC64-NEXT: li 4, 2
+; PPC64-NEXT: li 5, 3
+; PPC64-NEXT: li 6, 0
+; PPC64-NEXT: #APP
+; PPC64-EMPTY:
+; PPC64-NEXT: mtcr 6
+; PPC64-NEXT: cmpw 2, 4, 3
+; PPC64-NEXT: mfcr 3
+; PPC64-NEXT: #NO_APP
+; PPC64-NEXT: stw 3, 124(1)
+; PPC64-NEXT: bl foo
+; PPC64-NEXT: nop
+; PPC64-NEXT: lwz 3, 124(1)
+; PPC64-NEXT: addi 1, 1, 128
+; PPC64-NEXT: ld 0, 16(1)
+; PPC64-NEXT: lwz 12, 8(1)
+; PPC64-NEXT: mtocrf 32, 12
+; PPC64-NEXT: mtlr 0
+; PPC64-NEXT: blr
+;
+; PPC64-ELFv2-LABEL: test_cr2:
+; PPC64-ELFv2: # %bb.0: # %entry
+; PPC64-ELFv2-NEXT: mflr 0
+; PPC64-ELFv2-NEXT: mfocrf 12, 32
+; PPC64-ELFv2-NEXT: stw 12, 8(1)
+; PPC64-ELFv2-NEXT: stdu 1, -112(1)
+; PPC64-ELFv2-NEXT: std 0, 128(1)
+; PPC64-ELFv2-NEXT: .cfi_def_cfa_offset 112
+; PPC64-ELFv2-NEXT: .cfi_offset lr, 16
+; PPC64-ELFv2-NEXT: .cfi_offset cr2, 8
+; PPC64-ELFv2-NEXT: li 3, 1
+; PPC64-ELFv2-NEXT: li 4, 2
+; PPC64-ELFv2-NEXT: li 5, 3
+; PPC64-ELFv2-NEXT: li 6, 0
+; PPC64-ELFv2-NEXT: #APP
+; PPC64-ELFv2-EMPTY:
+; PPC64-ELFv2-NEXT: mtcr 6
+; PPC64-ELFv2-NEXT: cmpw 2, 4, 3
+; PPC64-ELFv2-NEXT: mfcr 3
+; PPC64-ELFv2-NEXT: #NO_APP
+; PPC64-ELFv2-NEXT: stw 3, 108(1)
+; PPC64-ELFv2-NEXT: bl foo
+; PPC64-ELFv2-NEXT: nop
+; PPC64-ELFv2-NEXT: lwz 3, 108(1)
+; PPC64-ELFv2-NEXT: addi 1, 1, 112
+; PPC64-ELFv2-NEXT: ld 0, 16(1)
+; PPC64-ELFv2-NEXT: lwz 12, 8(1)
+; PPC64-ELFv2-NEXT: mtocrf 32, 12
+; PPC64-ELFv2-NEXT: mtlr 0
+; PPC64-ELFv2-NEXT: blr
entry:
%ret = alloca i32, align 4
%0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09mfcr $0", "=r,r,r,r,r,~{cr2}"(i32 1, i32 2, i32 3, i32 0) nounwind
@@ -14,27 +110,104 @@ entry:
ret i32 %1
}
-; PPC32-LABEL: test_cr2:
-; PPC32: stwu 1, -32(1)
-; PPC32: stw 31, 28(1)
-; PPC32: mfcr 12
-; PPC32-NEXT: stw 12, 24(31)
-; PPC32: lwz 12, 24(31)
-; PPC32-NEXT: mtocrf 32, 12
-
-; PPC64: .cfi_startproc
-; PPC64: mfcr 12
-; PPC64: stw 12, 8(1)
-; PPC64: stdu 1, -[[AMT:[0-9]+]](1)
-; PPC64: .cfi_def_cfa_offset 128
-; PPC64: .cfi_offset lr, 16
-; PPC64: .cfi_offset cr2, 8
-; PPC64: addi 1, 1, [[AMT]]
-; PPC64: lwz 12, 8(1)
-; PPC64: mtocrf 32, 12
-; PPC64: .cfi_endproc
-
define i32 @test_cr234() nounwind {
+; PPC32-LABEL: test_cr234:
+; PPC32: # %bb.0: # %entry
+; PPC32-NEXT: mflr 0
+; PPC32-NEXT: stwu 1, -32(1)
+; PPC32-NEXT: stw 31, 28(1)
+; PPC32-NEXT: stw 0, 36(1)
+; PPC32-NEXT: mr 31, 1
+; PPC32-NEXT: mfcr 12
+; PPC32-NEXT: stw 12, 24(31)
+; PPC32-NEXT: li 3, 1
+; PPC32-NEXT: li 4, 2
+; PPC32-NEXT: li 5, 3
+; PPC32-NEXT: li 6, 0
+; PPC32-NEXT: #APP
+; PPC32-EMPTY:
+; PPC32-NEXT: mtcr 6
+; PPC32-NEXT: cmpw 2, 4, 3
+; PPC32-NEXT: cmpw 3, 4, 4
+; PPC32-NEXT: cmpw 4, 4, 5
+; PPC32-NEXT: mfcr 3
+; PPC32-NEXT: #NO_APP
+; PPC32-NEXT: stw 3, 20(31)
+; PPC32-NEXT: bl foo
+; PPC32-NEXT: lwz 3, 20(31)
+; PPC32-NEXT: lwz 12, 24(31)
+; PPC32-NEXT: mtocrf 32, 12
+; PPC32-NEXT: mtocrf 16, 12
+; PPC32-NEXT: mtocrf 8, 12
+; PPC32-NEXT: lwz 0, 36(1)
+; PPC32-NEXT: lwz 31, 28(1)
+; PPC32-NEXT: addi 1, 1, 32
+; PPC32-NEXT: mtlr 0
+; PPC32-NEXT: blr
+;
+; PPC64-LABEL: test_cr234:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: mflr 0
+; PPC64-NEXT: mfcr 12
+; PPC64-NEXT: stw 12, 8(1)
+; PPC64-NEXT: stdu 1, -128(1)
+; PPC64-NEXT: std 0, 144(1)
+; PPC64-NEXT: li 3, 1
+; PPC64-NEXT: li 4, 2
+; PPC64-NEXT: li 5, 3
+; PPC64-NEXT: li 6, 0
+; PPC64-NEXT: #APP
+; PPC64-EMPTY:
+; PPC64-NEXT: mtcr 6
+; PPC64-NEXT: cmpw 2, 4, 3
+; PPC64-NEXT: cmpw 3, 4, 4
+; PPC64-NEXT: cmpw 4, 4, 5
+; PPC64-NEXT: mfcr 3
+; PPC64-NEXT: #NO_APP
+; PPC64-NEXT: stw 3, 124(1)
+; PPC64-NEXT: bl foo
+; PPC64-NEXT: nop
+; PPC64-NEXT: lwz 3, 124(1)
+; PPC64-NEXT: addi 1, 1, 128
+; PPC64-NEXT: ld 0, 16(1)
+; PPC64-NEXT: lwz 12, 8(1)
+; PPC64-NEXT: mtocrf 32, 12
+; PPC64-NEXT: mtocrf 16, 12
+; PPC64-NEXT: mtocrf 8, 12
+; PPC64-NEXT: mtlr 0
+; PPC64-NEXT: blr
+;
+; PPC64-ELFv2-LABEL: test_cr234:
+; PPC64-ELFv2: # %bb.0: # %entry
+; PPC64-ELFv2-NEXT: mflr 0
+; PPC64-ELFv2-NEXT: mfcr 12
+; PPC64-ELFv2-NEXT: stw 12, 8(1)
+; PPC64-ELFv2-NEXT: stdu 1, -112(1)
+; PPC64-ELFv2-NEXT: std 0, 128(1)
+; PPC64-ELFv2-NEXT: li 3, 1
+; PPC64-ELFv2-NEXT: li 4, 2
+; PPC64-ELFv2-NEXT: li 5, 3
+; PPC64-ELFv2-NEXT: li 6, 0
+; PPC64-ELFv2-NEXT: #APP
+; PPC64-ELFv2-EMPTY:
+; PPC64-ELFv2-NEXT: mtcr 6
+; PPC64-ELFv2-NEXT: cmpw 2, 4, 3
+; PPC64-ELFv2-NEXT: cmpw 3, 4, 4
+; PPC64-ELFv2-NEXT: cmpw 4, 4, 5
+; PPC64-ELFv2-NEXT: mfcr 3
+; PPC64-ELFv2-NEXT: #NO_APP
+; PPC64-ELFv2-NEXT: stw 3, 108(1)
+; PPC64-ELFv2-NEXT: bl foo
+; PPC64-ELFv2-NEXT: nop
+; PPC64-ELFv2-NEXT: lwz 3, 108(1)
+; PPC64-ELFv2-NEXT: addi 1, 1, 112
+; PPC64-ELFv2-NEXT: ld 0, 16(1)
+; PPC64-ELFv2-NEXT: lwz 12, 8(1)
+; PPC64-ELFv2-NEXT: mtocrf 32, 12
+; PPC64-ELFv2-NEXT: mtocrf 16, 12
+; PPC64-ELFv2-NEXT: mtocrf 8, 12
+; PPC64-ELFv2-NEXT: mtlr 0
+; PPC64-ELFv2-NEXT: blr
entry:
%ret = alloca i32, align 4
%0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09cmpw 3,$2,$2\0A\09cmpw 4,$2,$3\0A\09mfcr $0", "=r,r,r,r,r,~{cr2},~{cr3},~{cr4}"(i32 1, i32 2, i32 3, i32 0) nounwind
@@ -44,41 +217,102 @@ entry:
ret i32 %1
}
-; PPC32-LABEL: test_cr234:
-; PPC32: stwu 1, -32(1)
-; PPC32: stw 31, 28(1)
-; PPC32: mfcr 12
-; PPC32-NEXT: stw 12, 24(31)
-; PPC32: lwz 12, 24(31)
-; PPC32-NEXT: mtocrf 32, 12
-; PPC32-NEXT: mtocrf 16, 12
-; PPC32-NEXT: mtocrf 8, 12
-
-; PPC64: mfcr 12
-; PPC64: stw 12, 8(1)
-; PPC64: stdu 1, -[[AMT:[0-9]+]](1)
-; PPC64: addi 1, 1, [[AMT]]
-; PPC64: lwz 12, 8(1)
-; PPC64: mtocrf 32, 12
-; PPC64: mtocrf 16, 12
-; PPC64: mtocrf 8, 12
-
; Generate mfocrf in prologue when we need to save 1 nonvolatile CR field
define void @cloberOneNvCrField() {
+; PPC32-LABEL: cloberOneNvCrField:
+; PPC32: # %bb.0: # %entry
+; PPC32-NEXT: stwu 1, -32(1)
+; PPC32-NEXT: stw 31, 28(1)
+; PPC32-NEXT: .cfi_def_cfa_offset 32
+; PPC32-NEXT: .cfi_offset r31, -4
+; PPC32-NEXT: mr 31, 1
+; PPC32-NEXT: .cfi_def_cfa_register r31
+; PPC32-NEXT: mfcr 12
+; PPC32-NEXT: stw 12, 24(31)
+; PPC32-NEXT: #APP
+; PPC32-NEXT: # clobbers
+; PPC32-NEXT: #NO_APP
+; PPC32-NEXT: lwz 12, 24(31)
+; PPC32-NEXT: mtocrf 32, 12
+; PPC32-NEXT: lwz 31, 28(1)
+; PPC32-NEXT: addi 1, 1, 32
+; PPC32-NEXT: blr
+;
+; PPC64-LABEL: cloberOneNvCrField:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: mfcr 12
+; PPC64-NEXT: stw 12, 8(1)
+; PPC64-NEXT: #APP
+; PPC64-NEXT: # clobbers
+; PPC64-NEXT: #NO_APP
+; PPC64-NEXT: lwz 12, 8(1)
+; PPC64-NEXT: mtocrf 32, 12
+; PPC64-NEXT: blr
+;
+; PPC64-ELFv2-LABEL: cloberOneNvCrField:
+; PPC64-ELFv2: # %bb.0: # %entry
+; PPC64-ELFv2-NEXT: mfocrf 12, 32
+; PPC64-ELFv2-NEXT: stw 12, 8(1)
+; PPC64-ELFv2-NEXT: #APP
+; PPC64-ELFv2-NEXT: # clobbers
+; PPC64-ELFv2-NEXT: #NO_APP
+; PPC64-ELFv2-NEXT: lwz 12, 8(1)
+; PPC64-ELFv2-NEXT: mtocrf 32, 12
+; PPC64-ELFv2-NEXT: blr
entry:
tail call void asm sideeffect "# clobbers", "~{cr2}"()
ret void
-
-; PPC64-ELFv2-LABEL: @cloberOneNvCrField
-; PPC64-ELFv2: mfocrf [[REG1:[0-9]+]], 32
}
; Generate mfcr in prologue when we need to save all nonvolatile CR field
define void @cloberAllNvCrField() {
+; PPC32-LABEL: cloberAllNvCrField:
+; PPC32: # %bb.0: # %entry
+; PPC32-NEXT: stwu 1, -32(1)
+; PPC32-NEXT: stw 31, 28(1)
+; PPC32-NEXT: .cfi_def_cfa_offset 32
+; PPC32-NEXT: .cfi_offset r31, -4
+; PPC32-NEXT: mr 31, 1
+; PPC32-NEXT: .cfi_def_cfa_register r31
+; PPC32-NEXT: mfcr 12
+; PPC32-NEXT: stw 12, 24(31)
+; PPC32-NEXT: #APP
+; PPC32-NEXT: # clobbers
+; PPC32-NEXT: #NO_APP
+; PPC32-NEXT: lwz 12, 24(31)
+; PPC32-NEXT: mtocrf 32, 12
+; PPC32-NEXT: mtocrf 16, 12
+; PPC32-NEXT: mtocrf 8, 12
+; PPC32-NEXT: lwz 31, 28(1)
+; PPC32-NEXT: addi 1, 1, 32
+; PPC32-NEXT: blr
+;
+; PPC64-LABEL: cloberAllNvCrField:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: mfcr 12
+; PPC64-NEXT: stw 12, 8(1)
+; PPC64-NEXT: #APP
+; PPC64-NEXT: # clobbers
+; PPC64-NEXT: #NO_APP
+; PPC64-NEXT: lwz 12, 8(1)
+; PPC64-NEXT: mtocrf 32, 12
+; PPC64-NEXT: mtocrf 16, 12
+; PPC64-NEXT: mtocrf 8, 12
+; PPC64-NEXT: blr
+;
+; PPC64-ELFv2-LABEL: cloberAllNvCrField:
+; PPC64-ELFv2: # %bb.0: # %entry
+; PPC64-ELFv2-NEXT: mfcr 12
+; PPC64-ELFv2-NEXT: stw 12, 8(1)
+; PPC64-ELFv2-NEXT: #APP
+; PPC64-ELFv2-NEXT: # clobbers
+; PPC64-ELFv2-NEXT: #NO_APP
+; PPC64-ELFv2-NEXT: lwz 12, 8(1)
+; PPC64-ELFv2-NEXT: mtocrf 32, 12
+; PPC64-ELFv2-NEXT: mtocrf 16, 12
+; PPC64-ELFv2-NEXT: mtocrf 8, 12
+; PPC64-ELFv2-NEXT: blr
entry:
tail call void asm sideeffect "# clobbers", "~{cr2},~{cr3},~{cr4}"()
ret void
-
-; PPC64-ELFv2-LABEL: @cloberAllNvCrField
-; PPC64-ELFv2: mfcr [[REG1:[0-9]+]]
}
diff --git a/llvm/test/CodeGen/PowerPC/ctrloops-pseudo.ll b/llvm/test/CodeGen/PowerPC/ctrloops-pseudo.ll
index e7c49c9dcc7d..9d2d70d5a4b9 100644
--- a/llvm/test/CodeGen/PowerPC/ctrloops-pseudo.ll
+++ b/llvm/test/CodeGen/PowerPC/ctrloops-pseudo.ll
@@ -35,6 +35,7 @@ define void @test1(i32 %c) nounwind {
; AIX64-NEXT: {{ $}}
; AIX64-NEXT: bb.2.for.end:
; AIX64-NEXT: BLR8 implicit $lr8, implicit $rm
+ ;
; AIX32-LABEL: name: test1
; AIX32: bb.0.entry:
; AIX32-NEXT: successors: %bb.1(0x80000000)
@@ -57,6 +58,7 @@ define void @test1(i32 %c) nounwind {
; AIX32-NEXT: {{ $}}
; AIX32-NEXT: bb.2.for.end:
; AIX32-NEXT: BLR implicit $lr, implicit $rm
+ ;
; LE64-LABEL: name: test1
; LE64: bb.0.entry:
; LE64-NEXT: successors: %bb.1(0x80000000)
@@ -134,6 +136,7 @@ define void @test2(i32 %c, i32 %d) nounwind {
; AIX64-NEXT: {{ $}}
; AIX64-NEXT: bb.3.for.end:
; AIX64-NEXT: BLR8 implicit $lr8, implicit $rm
+ ;
; AIX32-LABEL: name: test2
; AIX32: bb.0.entry:
; AIX32-NEXT: successors: %bb.1(0x50000000), %bb.3(0x30000000)
@@ -163,6 +166,7 @@ define void @test2(i32 %c, i32 %d) nounwind {
; AIX32-NEXT: {{ $}}
; AIX32-NEXT: bb.3.for.end:
; AIX32-NEXT: BLR implicit $lr, implicit $rm
+ ;
; LE64-LABEL: name: test2
; LE64: bb.0.entry:
; LE64-NEXT: successors: %bb.1(0x50000000), %bb.3(0x30000000)
@@ -257,6 +261,7 @@ define void @test3(i32 %c, i32 %d) nounwind {
; AIX64-NEXT: {{ $}}
; AIX64-NEXT: bb.3.for.end:
; AIX64-NEXT: BLR8 implicit $lr8, implicit $rm
+ ;
; AIX32-LABEL: name: test3
; AIX32: bb.0.entry:
; AIX32-NEXT: successors: %bb.1(0x50000000), %bb.3(0x30000000)
@@ -289,6 +294,7 @@ define void @test3(i32 %c, i32 %d) nounwind {
; AIX32-NEXT: {{ $}}
; AIX32-NEXT: bb.3.for.end:
; AIX32-NEXT: BLR implicit $lr, implicit $rm
+ ;
; LE64-LABEL: name: test3
; LE64: bb.0.entry:
; LE64-NEXT: successors: %bb.1(0x50000000), %bb.3(0x30000000)
@@ -352,15 +358,23 @@ for.end: ; preds = %for.body, %entry
define i32 @test4(i32 %inp) {
; AIX64-LABEL: name: test4
; AIX64: bb.0.entry:
- ; AIX64-NEXT: successors: %bb.1(0x80000000)
+ ; AIX64-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
; AIX64-NEXT: liveins: $x3
; AIX64-NEXT: {{ $}}
; AIX64-NEXT: [[COPY:%[0-9]+]]:g8rc = COPY $x3
; AIX64-NEXT: [[COPY1:%[0-9]+]]:gprc_and_gprc_nor0 = COPY [[COPY]].sub_32
; AIX64-NEXT: [[CMPWI:%[0-9]+]]:crrc = CMPWI [[COPY1]], 1
; AIX64-NEXT: [[LI:%[0-9]+]]:gprc_and_gprc_nor0 = LI 1
- ; AIX64-NEXT: [[ISEL:%[0-9]+]]:gprc = ISEL [[COPY1]], [[LI]], [[CMPWI]].sub_lt
- ; AIX64-NEXT: [[SUBF:%[0-9]+]]:gprc = SUBF [[ISEL]], [[COPY1]]
+ ; AIX64-NEXT: BCC 12, [[CMPWI]], %bb.4
+ ; AIX64-NEXT: {{ $}}
+ ; AIX64-NEXT: bb.3.entry:
+ ; AIX64-NEXT: successors: %bb.4(0x80000000)
+ ; AIX64-NEXT: {{ $}}
+ ; AIX64-NEXT: bb.4.entry:
+ ; AIX64-NEXT: successors: %bb.1(0x80000000)
+ ; AIX64-NEXT: {{ $}}
+ ; AIX64-NEXT: [[PHI:%[0-9]+]]:gprc = PHI [[LI]], %bb.3, [[COPY1]], %bb.0
+ ; AIX64-NEXT: [[SUBF:%[0-9]+]]:gprc = SUBF [[PHI]], [[COPY1]]
; AIX64-NEXT: [[DEF:%[0-9]+]]:g8rc = IMPLICIT_DEF
; AIX64-NEXT: [[INSERT_SUBREG:%[0-9]+]]:g8rc = INSERT_SUBREG [[DEF]], killed [[SUBF]], %subreg.sub_32
; AIX64-NEXT: [[RLDICL:%[0-9]+]]:g8rc_and_g8rc_nox0 = RLDICL killed [[INSERT_SUBREG]], 0, 32
@@ -379,21 +393,30 @@ define i32 @test4(i32 %inp) {
; AIX64-NEXT: [[LDtoc1:%[0-9]+]]:g8rc = LDtoc target-flags(ppc-tlsgd) @tls_var, $x2 :: (load (s64) from got)
; AIX64-NEXT: [[TLSGDAIX8_:%[0-9]+]]:g8rc = TLSGDAIX8 killed [[LDtoc1]], killed [[LDtoc]]
; AIX64-NEXT: [[COPY2:%[0-9]+]]:gprc = COPY [[TLSGDAIX8_]].sub_32
- ; AIX64-NEXT: [[ADD4_:%[0-9]+]]:gprc = ADD4 killed [[COPY2]], [[ISEL]]
+ ; AIX64-NEXT: [[ADD4_:%[0-9]+]]:gprc = ADD4 killed [[COPY2]], [[PHI]]
; AIX64-NEXT: [[DEF1:%[0-9]+]]:g8rc = IMPLICIT_DEF
; AIX64-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:g8rc = INSERT_SUBREG [[DEF1]], killed [[ADD4_]], %subreg.sub_32
; AIX64-NEXT: $x3 = COPY [[INSERT_SUBREG1]]
; AIX64-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
+ ;
; AIX32-LABEL: name: test4
; AIX32: bb.0.entry:
- ; AIX32-NEXT: successors: %bb.1(0x80000000)
+ ; AIX32-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
; AIX32-NEXT: liveins: $r3
; AIX32-NEXT: {{ $}}
; AIX32-NEXT: [[COPY:%[0-9]+]]:gprc_and_gprc_nor0 = COPY $r3
; AIX32-NEXT: [[CMPWI:%[0-9]+]]:crrc = CMPWI [[COPY]], 1
; AIX32-NEXT: [[LI:%[0-9]+]]:gprc_and_gprc_nor0 = LI 1
- ; AIX32-NEXT: [[ISEL:%[0-9]+]]:gprc = ISEL [[COPY]], [[LI]], [[CMPWI]].sub_lt
- ; AIX32-NEXT: [[SUBF:%[0-9]+]]:gprc_and_gprc_nor0 = SUBF [[ISEL]], [[COPY]]
+ ; AIX32-NEXT: BCC 12, [[CMPWI]], %bb.4
+ ; AIX32-NEXT: {{ $}}
+ ; AIX32-NEXT: bb.3.entry:
+ ; AIX32-NEXT: successors: %bb.4(0x80000000)
+ ; AIX32-NEXT: {{ $}}
+ ; AIX32-NEXT: bb.4.entry:
+ ; AIX32-NEXT: successors: %bb.1(0x80000000)
+ ; AIX32-NEXT: {{ $}}
+ ; AIX32-NEXT: [[PHI:%[0-9]+]]:gprc = PHI [[LI]], %bb.3, [[COPY]], %bb.0
+ ; AIX32-NEXT: [[SUBF:%[0-9]+]]:gprc_and_gprc_nor0 = SUBF [[PHI]], [[COPY]]
; AIX32-NEXT: [[ADDI:%[0-9]+]]:gprc = ADDI killed [[SUBF]], 1
; AIX32-NEXT: MTCTRloop killed [[ADDI]], implicit-def dead $ctr
; AIX32-NEXT: {{ $}}
@@ -408,9 +431,10 @@ define i32 @test4(i32 %inp) {
; AIX32-NEXT: [[LWZtoc:%[0-9]+]]:gprc = LWZtoc target-flags(ppc-tlsgdm) @tls_var, $r2 :: (load (s32) from got)
; AIX32-NEXT: [[LWZtoc1:%[0-9]+]]:gprc = LWZtoc target-flags(ppc-tlsgd) @tls_var, $r2 :: (load (s32) from got)
; AIX32-NEXT: [[TLSGDAIX:%[0-9]+]]:gprc = TLSGDAIX killed [[LWZtoc1]], killed [[LWZtoc]]
- ; AIX32-NEXT: [[ADD4_:%[0-9]+]]:gprc = ADD4 killed [[TLSGDAIX]], [[ISEL]]
+ ; AIX32-NEXT: [[ADD4_:%[0-9]+]]:gprc = ADD4 killed [[TLSGDAIX]], [[PHI]]
; AIX32-NEXT: $r3 = COPY [[ADD4_]]
; AIX32-NEXT: BLR implicit $lr, implicit $rm, implicit $r3
+ ;
; LE64-LABEL: name: test4
; LE64: bb.0.entry:
; LE64-NEXT: successors: %bb.1(0x80000000)
diff --git a/llvm/test/CodeGen/PowerPC/expand-isel-to-branch.ll b/llvm/test/CodeGen/PowerPC/expand-isel-to-branch.ll
index 6f3e9f78b317..f46225c6137f 100644
--- a/llvm/test/CodeGen/PowerPC/expand-isel-to-branch.ll
+++ b/llvm/test/CodeGen/PowerPC/expand-isel-to-branch.ll
@@ -6,13 +6,13 @@ define noundef signext i32 @ham(ptr nocapture noundef %arg) #0 {
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: lwz 4, 0(3)
; CHECK-NEXT: cmpwi 4, 750
-; CHECK-NEXT: addi 5, 4, 1
+; CHECK-NEXT: blt 0, L..BB0_2
+; CHECK-NEXT: # %bb.1: # %bb
; CHECK-NEXT: li 4, 1
-; CHECK-NEXT: bc 12, 0, L..BB0_1
-; CHECK-NEXT: b L..BB0_2
-; CHECK-NEXT: L..BB0_1: # %bb
-; CHECK-NEXT: addi 4, 5, 0
-; CHECK-NEXT: L..BB0_2: # %bb
+; CHECK-NEXT: b L..BB0_3
+; CHECK-NEXT: L..BB0_2:
+; CHECK-NEXT: addi 4, 4, 1
+; CHECK-NEXT: L..BB0_3: # %bb
; CHECK-NEXT: stw 4, 0(3)
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: blr
diff --git a/llvm/test/CodeGen/PowerPC/fp-strict-fcmp-spe.ll b/llvm/test/CodeGen/PowerPC/fp-strict-fcmp-spe.ll
index 6aae299786cc..c20d319f2ac7 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict-fcmp-spe.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-fcmp-spe.ll
@@ -7,7 +7,7 @@ define i32 @test_f32_oeq_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-NEXT: efscmpeq cr0, r5, r6
; SPE-NEXT: bclr 12, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"oeq", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -20,7 +20,7 @@ define i32 @test_f32_ogt_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-NEXT: efscmpgt cr0, r5, r6
; SPE-NEXT: bclr 12, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"ogt", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -31,13 +31,15 @@ define i32 @test_f32_oge_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_oge_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmpeq cr0, r6, r6
-; SPE-NEXT: efscmpeq cr1, r5, r5
-; SPE-NEXT: crand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: efscmplt cr0, r5, r6
-; SPE-NEXT: crandc 4*cr5+lt, 4*cr5+lt, gt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bc 4, gt, .LBB2_3
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efscmpeq cr0, r5, r5
+; SPE-NEXT: bc 4, gt, .LBB2_3
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: efscmplt cr0, r5, r6
+; SPE-NEXT: bclr 4, gt, 0
+; SPE-NEXT: .LBB2_3:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"oge", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -50,7 +52,7 @@ define i32 @test_f32_olt_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-NEXT: efscmplt cr0, r5, r6
; SPE-NEXT: bclr 12, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"olt", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -61,13 +63,15 @@ define i32 @test_f32_ole_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_ole_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmpeq cr0, r6, r6
-; SPE-NEXT: efscmpeq cr1, r5, r5
-; SPE-NEXT: crand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: efscmpgt cr0, r5, r6
-; SPE-NEXT: crandc 4*cr5+lt, 4*cr5+lt, gt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bc 4, gt, .LBB4_3
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efscmpeq cr0, r5, r5
+; SPE-NEXT: bc 4, gt, .LBB4_3
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: efscmpgt cr0, r5, r6
+; SPE-NEXT: bclr 4, gt, 0
+; SPE-NEXT: .LBB4_3:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"ole", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -78,11 +82,12 @@ define i32 @test_f32_one_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_one_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmplt cr0, r5, r6
-; SPE-NEXT: efscmpgt cr1, r5, r6
-; SPE-NEXT: cror 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bclr 12, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efscmpgt cr0, r5, r6
+; SPE-NEXT: bclr 12, gt, 0
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"one", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -93,11 +98,12 @@ define i32 @test_f32_ord_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_ord_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmpeq cr0, r6, r6
-; SPE-NEXT: efscmpeq cr1, r5, r5
-; SPE-NEXT: crand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bc 4, gt, .LBB6_2
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efscmpeq cr0, r5, r5
+; SPE-NEXT: bclr 12, gt, 0
+; SPE-NEXT: .LBB6_2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"ord", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -108,12 +114,14 @@ define i32 @test_f32_ueq_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_ueq_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmplt cr0, r5, r6
-; SPE-NEXT: efscmpgt cr1, r5, r6
-; SPE-NEXT: cror 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: bc 12, 4*cr5+lt, .LBB7_1
-; SPE-NEXT: blr
-; SPE-NEXT: .LBB7_1:
-; SPE-NEXT: addi r3, r4, 0
+; SPE-NEXT: bc 12, gt, .LBB7_3
+; SPE-NEXT: # %bb.1:
+; SPE-NEXT: efscmpgt cr0, r5, r6
+; SPE-NEXT: bc 12, gt, .LBB7_3
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: mr r4, r3
+; SPE-NEXT: .LBB7_3:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"ueq", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -124,13 +132,15 @@ define i32 @test_f32_ugt_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_ugt_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmpeq cr0, r5, r5
-; SPE-NEXT: efscmpeq cr1, r6, r6
-; SPE-NEXT: crnand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: efscmpgt cr0, r5, r6
-; SPE-NEXT: cror 4*cr5+lt, gt, 4*cr5+lt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bclr 4, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efscmpeq cr0, r6, r6
+; SPE-NEXT: bclr 4, gt, 0
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: efscmpgt cr0, r5, r6
+; SPE-NEXT: bclr 12, gt, 0
+; SPE-NEXT: # %bb.3:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"ugt", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -141,10 +151,11 @@ define i32 @test_f32_uge_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_uge_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmplt cr0, r5, r6
-; SPE-NEXT: bc 12, gt, .LBB9_1
-; SPE-NEXT: blr
-; SPE-NEXT: .LBB9_1:
-; SPE-NEXT: addi r3, r4, 0
+; SPE-NEXT: bc 12, gt, .LBB9_2
+; SPE-NEXT: # %bb.1:
+; SPE-NEXT: mr r4, r3
+; SPE-NEXT: .LBB9_2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"uge", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -155,13 +166,15 @@ define i32 @test_f32_ult_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_ult_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmpeq cr0, r5, r5
-; SPE-NEXT: efscmpeq cr1, r6, r6
-; SPE-NEXT: crnand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: efscmplt cr0, r5, r6
-; SPE-NEXT: cror 4*cr5+lt, gt, 4*cr5+lt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bclr 4, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efscmpeq cr0, r6, r6
+; SPE-NEXT: bclr 4, gt, 0
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: efscmplt cr0, r5, r6
+; SPE-NEXT: bclr 12, gt, 0
+; SPE-NEXT: # %bb.3:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"ult", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -172,10 +185,11 @@ define i32 @test_f32_ule_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_ule_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmpgt cr0, r5, r6
-; SPE-NEXT: bc 12, gt, .LBB11_1
-; SPE-NEXT: blr
-; SPE-NEXT: .LBB11_1:
-; SPE-NEXT: addi r3, r4, 0
+; SPE-NEXT: bc 12, gt, .LBB11_2
+; SPE-NEXT: # %bb.1:
+; SPE-NEXT: mr r4, r3
+; SPE-NEXT: .LBB11_2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"ule", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -186,10 +200,11 @@ define i32 @test_f32_une_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_une_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmpeq cr0, r5, r6
-; SPE-NEXT: bc 12, gt, .LBB12_1
-; SPE-NEXT: blr
-; SPE-NEXT: .LBB12_1:
-; SPE-NEXT: addi r3, r4, 0
+; SPE-NEXT: bc 12, gt, .LBB12_2
+; SPE-NEXT: # %bb.1:
+; SPE-NEXT: mr r4, r3
+; SPE-NEXT: .LBB12_2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"une", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -200,11 +215,12 @@ define i32 @test_f32_uno_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
; SPE-LABEL: test_f32_uno_s:
; SPE: # %bb.0:
; SPE-NEXT: efscmpeq cr0, r5, r5
-; SPE-NEXT: efscmpeq cr1, r6, r6
-; SPE-NEXT: crnand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bclr 4, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efscmpeq cr0, r6, r6
+; SPE-NEXT: bclr 4, gt, 0
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f32(float %f1, float %f2, metadata !"uno", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -219,7 +235,7 @@ define i32 @test_f64_oeq_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: efdcmpeq cr0, r5, r7
; SPE-NEXT: bclr 12, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"oeq", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -234,7 +250,7 @@ define i32 @test_f64_ogt_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: efdcmpgt cr0, r5, r7
; SPE-NEXT: bclr 12, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"ogt", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -247,13 +263,15 @@ define i32 @test_f64_oge_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: evmergelo r6, r7, r8
; SPE-NEXT: efdcmpeq cr0, r6, r6
-; SPE-NEXT: efdcmpeq cr1, r5, r5
-; SPE-NEXT: efdcmplt cr5, r5, r6
-; SPE-NEXT: crand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: crandc 4*cr5+lt, 4*cr5+lt, 4*cr5+gt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bc 4, gt, .LBB16_3
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efdcmpeq cr0, r5, r5
+; SPE-NEXT: bc 4, gt, .LBB16_3
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: efdcmplt cr0, r5, r6
+; SPE-NEXT: bclr 4, gt, 0
+; SPE-NEXT: .LBB16_3:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"oge", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -268,7 +286,7 @@ define i32 @test_f64_olt_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: efdcmplt cr0, r5, r7
; SPE-NEXT: bclr 12, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"olt", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -281,13 +299,15 @@ define i32 @test_f64_ole_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: evmergelo r6, r7, r8
; SPE-NEXT: efdcmpeq cr0, r6, r6
-; SPE-NEXT: efdcmpeq cr1, r5, r5
-; SPE-NEXT: efdcmpgt cr5, r5, r6
-; SPE-NEXT: crand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: crandc 4*cr5+lt, 4*cr5+lt, 4*cr5+gt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bc 4, gt, .LBB18_3
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efdcmpeq cr0, r5, r5
+; SPE-NEXT: bc 4, gt, .LBB18_3
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: efdcmpgt cr0, r5, r6
+; SPE-NEXT: bclr 4, gt, 0
+; SPE-NEXT: .LBB18_3:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"ole", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -300,11 +320,12 @@ define i32 @test_f64_one_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r7, r7, r8
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: efdcmplt cr0, r5, r7
-; SPE-NEXT: efdcmpgt cr1, r5, r7
-; SPE-NEXT: cror 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bclr 12, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efdcmpgt cr0, r5, r7
+; SPE-NEXT: bclr 12, gt, 0
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"one", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -317,11 +338,12 @@ define i32 @test_f64_ord_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: evmergelo r6, r7, r8
; SPE-NEXT: efdcmpeq cr0, r6, r6
-; SPE-NEXT: efdcmpeq cr1, r5, r5
-; SPE-NEXT: crand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bc 4, gt, .LBB20_2
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efdcmpeq cr0, r5, r5
+; SPE-NEXT: bclr 12, gt, 0
+; SPE-NEXT: .LBB20_2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"ord", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -334,12 +356,14 @@ define i32 @test_f64_ueq_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r7, r7, r8
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: efdcmplt cr0, r5, r7
-; SPE-NEXT: efdcmpgt cr1, r5, r7
-; SPE-NEXT: cror 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: bc 12, 4*cr5+lt, .LBB21_1
-; SPE-NEXT: blr
-; SPE-NEXT: .LBB21_1:
-; SPE-NEXT: addi r3, r4, 0
+; SPE-NEXT: bc 12, gt, .LBB21_3
+; SPE-NEXT: # %bb.1:
+; SPE-NEXT: efdcmpgt cr0, r5, r7
+; SPE-NEXT: bc 12, gt, .LBB21_3
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: mr r4, r3
+; SPE-NEXT: .LBB21_3:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"ueq", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -352,13 +376,15 @@ define i32 @test_f64_ugt_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r7, r7, r8
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: efdcmpeq cr0, r5, r5
-; SPE-NEXT: efdcmpeq cr1, r7, r7
-; SPE-NEXT: efdcmpgt cr5, r5, r7
-; SPE-NEXT: crnand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: cror 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bclr 4, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efdcmpeq cr0, r7, r7
+; SPE-NEXT: bclr 4, gt, 0
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: efdcmpgt cr0, r5, r7
+; SPE-NEXT: bclr 12, gt, 0
+; SPE-NEXT: # %bb.3:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"ugt", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -371,10 +397,11 @@ define i32 @test_f64_uge_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r7, r7, r8
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: efdcmplt cr0, r5, r7
-; SPE-NEXT: bc 12, gt, .LBB23_1
-; SPE-NEXT: blr
-; SPE-NEXT: .LBB23_1:
-; SPE-NEXT: addi r3, r4, 0
+; SPE-NEXT: bc 12, gt, .LBB23_2
+; SPE-NEXT: # %bb.1:
+; SPE-NEXT: mr r4, r3
+; SPE-NEXT: .LBB23_2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"uge", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -387,13 +414,15 @@ define i32 @test_f64_ult_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r7, r7, r8
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: efdcmpeq cr0, r5, r5
-; SPE-NEXT: efdcmpeq cr1, r7, r7
-; SPE-NEXT: efdcmplt cr5, r5, r7
-; SPE-NEXT: crnand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: cror 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bclr 4, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efdcmpeq cr0, r7, r7
+; SPE-NEXT: bclr 4, gt, 0
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: efdcmplt cr0, r5, r7
+; SPE-NEXT: bclr 12, gt, 0
+; SPE-NEXT: # %bb.3:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"ult", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -406,10 +435,11 @@ define i32 @test_f64_ule_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r7, r7, r8
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: efdcmpgt cr0, r5, r7
-; SPE-NEXT: bc 12, gt, .LBB25_1
-; SPE-NEXT: blr
-; SPE-NEXT: .LBB25_1:
-; SPE-NEXT: addi r3, r4, 0
+; SPE-NEXT: bc 12, gt, .LBB25_2
+; SPE-NEXT: # %bb.1:
+; SPE-NEXT: mr r4, r3
+; SPE-NEXT: .LBB25_2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"ule", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -422,10 +452,11 @@ define i32 @test_f64_une_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r7, r7, r8
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: efdcmpeq cr0, r5, r7
-; SPE-NEXT: bc 12, gt, .LBB26_1
-; SPE-NEXT: blr
-; SPE-NEXT: .LBB26_1:
-; SPE-NEXT: addi r3, r4, 0
+; SPE-NEXT: bc 12, gt, .LBB26_2
+; SPE-NEXT: # %bb.1:
+; SPE-NEXT: mr r4, r3
+; SPE-NEXT: .LBB26_2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"une", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
@@ -438,11 +469,12 @@ define i32 @test_f64_uno_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
; SPE-NEXT: evmergelo r7, r7, r8
; SPE-NEXT: evmergelo r5, r5, r6
; SPE-NEXT: efdcmpeq cr0, r5, r5
-; SPE-NEXT: efdcmpeq cr1, r7, r7
-; SPE-NEXT: crnand 4*cr5+lt, 4*cr1+gt, gt
-; SPE-NEXT: bclr 12, 4*cr5+lt, 0
+; SPE-NEXT: bclr 4, gt, 0
; SPE-NEXT: # %bb.1:
-; SPE-NEXT: ori r3, r4, 0
+; SPE-NEXT: efdcmpeq cr0, r7, r7
+; SPE-NEXT: bclr 4, gt, 0
+; SPE-NEXT: # %bb.2:
+; SPE-NEXT: mr r3, r4
; SPE-NEXT: blr
%cond = call i1 @llvm.experimental.constrained.fcmps.f64(double %f1, double %f2, metadata !"uno", metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
diff --git a/llvm/test/CodeGen/PowerPC/fp-to-int-to-fp.ll b/llvm/test/CodeGen/PowerPC/fp-to-int-to-fp.ll
index 9cc42cf74b7f..11460349c90f 100644
--- a/llvm/test/CodeGen/PowerPC/fp-to-int-to-fp.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-to-int-to-fp.ll
@@ -66,61 +66,60 @@ define float @fooul(float %X) #0 {
; PPC64-LABEL: fooul:
; PPC64: # %bb.0: # %entry
; PPC64-NEXT: addis 3, 2, .LCPI2_0@toc@ha
-; PPC64-NEXT: li 4, 1
; PPC64-NEXT: lfs 0, .LCPI2_0@toc@l(3)
-; PPC64-NEXT: rldic 4, 4, 63, 0
; PPC64-NEXT: fsubs 2, 1, 0
; PPC64-NEXT: fcmpu 0, 1, 0
; PPC64-NEXT: fctidz 2, 2
; PPC64-NEXT: stfd 2, -8(1)
; PPC64-NEXT: fctidz 2, 1
; PPC64-NEXT: stfd 2, -16(1)
+; PPC64-NEXT: blt 0, .LBB2_2
+; PPC64-NEXT: # %bb.1: # %entry
; PPC64-NEXT: ld 3, -8(1)
-; PPC64-NEXT: ld 5, -16(1)
+; PPC64-NEXT: li 4, 1
+; PPC64-NEXT: rldic 4, 4, 63, 0
; PPC64-NEXT: xor 3, 3, 4
-; PPC64-NEXT: bc 12, 0, .LBB2_1
-; PPC64-NEXT: b .LBB2_2
-; PPC64-NEXT: .LBB2_1: # %entry
-; PPC64-NEXT: addi 3, 5, 0
-; PPC64-NEXT: .LBB2_2: # %entry
+; PPC64-NEXT: b .LBB2_3
+; PPC64-NEXT: .LBB2_2:
+; PPC64-NEXT: ld 3, -16(1)
+; PPC64-NEXT: .LBB2_3: # %entry
; PPC64-NEXT: sradi 4, 3, 53
-; PPC64-NEXT: rldicl 5, 3, 63, 1
; PPC64-NEXT: addi 4, 4, 1
-; PPC64-NEXT: clrldi 6, 3, 63
; PPC64-NEXT: cmpldi 4, 1
+; PPC64-NEXT: bgt 0, .LBB2_5
+; PPC64-NEXT: # %bb.4: # %entry
+; PPC64-NEXT: mr 4, 3
+; PPC64-NEXT: b .LBB2_6
+; PPC64-NEXT: .LBB2_5:
; PPC64-NEXT: clrldi 4, 3, 53
-; PPC64-NEXT: or 6, 6, 5
-; PPC64-NEXT: clrldi 7, 6, 53
; PPC64-NEXT: addi 4, 4, 2047
-; PPC64-NEXT: addi 7, 7, 2047
; PPC64-NEXT: or 4, 4, 3
-; PPC64-NEXT: or 5, 7, 5
-; PPC64-NEXT: rldicl 7, 3, 10, 54
; PPC64-NEXT: rldicr 4, 4, 0, 52
-; PPC64-NEXT: addi 7, 7, 1
-; PPC64-NEXT: bc 12, 1, .LBB2_4
-; PPC64-NEXT: # %bb.3: # %entry
-; PPC64-NEXT: ori 4, 3, 0
-; PPC64-NEXT: b .LBB2_4
-; PPC64-NEXT: .LBB2_4: # %entry
-; PPC64-NEXT: rldicl 5, 5, 53, 11
-; PPC64-NEXT: std 4, -32(1)
-; PPC64-NEXT: rldicl 4, 5, 11, 1
-; PPC64-NEXT: cmpldi 7, 1
-; PPC64-NEXT: bc 12, 1, .LBB2_6
-; PPC64-NEXT: # %bb.5: # %entry
-; PPC64-NEXT: ori 4, 6, 0
-; PPC64-NEXT: b .LBB2_6
; PPC64-NEXT: .LBB2_6: # %entry
+; PPC64-NEXT: rldicl 5, 3, 10, 54
+; PPC64-NEXT: clrldi 6, 3, 63
+; PPC64-NEXT: std 4, -32(1)
+; PPC64-NEXT: addi 5, 5, 1
+; PPC64-NEXT: cmpldi 5, 1
+; PPC64-NEXT: rldicl 5, 3, 63, 1
+; PPC64-NEXT: or 4, 6, 5
+; PPC64-NEXT: ble 0, .LBB2_8
+; PPC64-NEXT: # %bb.7:
+; PPC64-NEXT: clrldi 4, 4, 53
+; PPC64-NEXT: addi 4, 4, 2047
+; PPC64-NEXT: or 4, 4, 5
+; PPC64-NEXT: rldicl 4, 4, 53, 11
+; PPC64-NEXT: rldicl 4, 4, 11, 1
+; PPC64-NEXT: .LBB2_8: # %entry
; PPC64-NEXT: cmpdi 3, 0
; PPC64-NEXT: std 4, -24(1)
-; PPC64-NEXT: bc 12, 0, .LBB2_8
-; PPC64-NEXT: # %bb.7: # %entry
+; PPC64-NEXT: bc 12, 0, .LBB2_10
+; PPC64-NEXT: # %bb.9: # %entry
; PPC64-NEXT: lfd 0, -32(1)
; PPC64-NEXT: fcfid 0, 0
; PPC64-NEXT: frsp 1, 0
; PPC64-NEXT: blr
-; PPC64-NEXT: .LBB2_8:
+; PPC64-NEXT: .LBB2_10:
; PPC64-NEXT: lfd 0, -24(1)
; PPC64-NEXT: fcfid 0, 0
; PPC64-NEXT: frsp 0, 0
@@ -148,34 +147,34 @@ define double @fooudl(double %X) #0 {
; PPC64-LABEL: fooudl:
; PPC64: # %bb.0: # %entry
; PPC64-NEXT: addis 3, 2, .LCPI3_0@toc@ha
-; PPC64-NEXT: li 4, 1
; PPC64-NEXT: lfs 0, .LCPI3_0@toc@l(3)
-; PPC64-NEXT: rldic 4, 4, 63, 0
; PPC64-NEXT: fsub 2, 1, 0
; PPC64-NEXT: fcmpu 0, 1, 0
; PPC64-NEXT: fctidz 2, 2
; PPC64-NEXT: stfd 2, -8(1)
; PPC64-NEXT: fctidz 2, 1
; PPC64-NEXT: stfd 2, -16(1)
+; PPC64-NEXT: blt 0, .LBB3_2
+; PPC64-NEXT: # %bb.1: # %entry
; PPC64-NEXT: ld 3, -8(1)
-; PPC64-NEXT: ld 5, -16(1)
+; PPC64-NEXT: li 4, 1
+; PPC64-NEXT: rldic 4, 4, 63, 0
; PPC64-NEXT: xor 3, 3, 4
-; PPC64-NEXT: li 4, 1107
-; PPC64-NEXT: rldic 4, 4, 52, 1
-; PPC64-NEXT: bc 12, 0, .LBB3_1
-; PPC64-NEXT: b .LBB3_2
-; PPC64-NEXT: .LBB3_1: # %entry
-; PPC64-NEXT: addi 3, 5, 0
-; PPC64-NEXT: .LBB3_2: # %entry
-; PPC64-NEXT: rldicl 5, 3, 32, 32
+; PPC64-NEXT: b .LBB3_3
+; PPC64-NEXT: .LBB3_2:
+; PPC64-NEXT: ld 3, -16(1)
+; PPC64-NEXT: .LBB3_3: # %entry
+; PPC64-NEXT: li 5, 1107
+; PPC64-NEXT: rldicl 4, 3, 32, 32
+; PPC64-NEXT: rldic 5, 5, 52, 1
; PPC64-NEXT: clrldi 3, 3, 32
-; PPC64-NEXT: or 4, 5, 4
-; PPC64-NEXT: addis 5, 2, .LCPI3_1@toc@ha
+; PPC64-NEXT: or 4, 4, 5
+; PPC64-NEXT: li 5, 1075
; PPC64-NEXT: std 4, -24(1)
-; PPC64-NEXT: li 4, 1075
-; PPC64-NEXT: rldic 4, 4, 52, 1
+; PPC64-NEXT: addis 4, 2, .LCPI3_1@toc@ha
+; PPC64-NEXT: lfd 0, .LCPI3_1@toc@l(4)
+; PPC64-NEXT: rldic 4, 5, 52, 1
; PPC64-NEXT: or 3, 3, 4
-; PPC64-NEXT: lfd 0, .LCPI3_1@toc@l(5)
; PPC64-NEXT: std 3, -32(1)
; PPC64-NEXT: lfd 1, -24(1)
; PPC64-NEXT: lfd 2, -32(1)
@@ -269,12 +268,10 @@ define double @si1_to_f64(i1 %X) #0 {
; PPC64-LABEL: si1_to_f64:
; PPC64: # %bb.0: # %entry
; PPC64-NEXT: andi. 3, 3, 1
-; PPC64-NEXT: li 4, -1
+; PPC64-NEXT: li 3, -1
+; PPC64-NEXT: bc 12, 1, .LBB6_2
+; PPC64-NEXT: # %bb.1: # %entry
; PPC64-NEXT: li 3, 0
-; PPC64-NEXT: bc 12, 1, .LBB6_1
-; PPC64-NEXT: b .LBB6_2
-; PPC64-NEXT: .LBB6_1: # %entry
-; PPC64-NEXT: addi 3, 4, 0
; PPC64-NEXT: .LBB6_2: # %entry
; PPC64-NEXT: std 3, -8(1)
; PPC64-NEXT: lfd 0, -8(1)
diff --git a/llvm/test/CodeGen/PowerPC/fptoui-be-crash.ll b/llvm/test/CodeGen/PowerPC/fptoui-be-crash.ll
index fc93f893b1d5..004d77eb3393 100644
--- a/llvm/test/CodeGen/PowerPC/fptoui-be-crash.ll
+++ b/llvm/test/CodeGen/PowerPC/fptoui-be-crash.ll
@@ -5,67 +5,65 @@ define dso_local void @calc_buffer() local_unnamed_addr #0 {
; CHECK-LABEL: calc_buffer:
; CHECK: # %bb.0:
; CHECK-NEXT: ld r3, 0(r3)
-; CHECK-NEXT: sradi r5, r3, 53
-; CHECK-NEXT: rldicl r6, r3, 63, 1
-; CHECK-NEXT: clrldi r7, r3, 63
+; CHECK-NEXT: sradi r4, r3, 53
+; CHECK-NEXT: addi r4, r4, 1
+; CHECK-NEXT: cmpldi r4, 1
+; CHECK-NEXT: bgt cr0, .LBB0_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mr r4, r3
+; CHECK-NEXT: b .LBB0_3
+; CHECK-NEXT: .LBB0_2:
; CHECK-NEXT: clrldi r4, r3, 53
-; CHECK-NEXT: addi r5, r5, 1
-; CHECK-NEXT: or r7, r7, r6
-; CHECK-NEXT: cmpldi r5, 1
-; CHECK-NEXT: clrldi r5, r7, 53
; CHECK-NEXT: addi r4, r4, 2047
-; CHECK-NEXT: addi r5, r5, 2047
-; CHECK-NEXT: or r5, r5, r6
-; CHECK-NEXT: rldicl r6, r3, 10, 54
; CHECK-NEXT: or r4, r4, r3
-; CHECK-NEXT: addi r6, r6, 1
-; CHECK-NEXT: rldicl r5, r5, 53, 11
-; CHECK-NEXT: cmpldi cr1, r6, 1
; CHECK-NEXT: rldicr r4, r4, 0, 52
-; CHECK-NEXT: rldicl r5, r5, 11, 1
-; CHECK-NEXT: bc 12, gt, .LBB0_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: ori r4, r3, 0
-; CHECK-NEXT: b .LBB0_2
-; CHECK-NEXT: .LBB0_2:
-; CHECK-NEXT: bc 12, 4*cr1+gt, .LBB0_4
-; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: ori r5, r7, 0
-; CHECK-NEXT: b .LBB0_4
-; CHECK-NEXT: .LBB0_4:
-; CHECK-NEXT: cmpdi r3, 0
+; CHECK-NEXT: .LBB0_3:
+; CHECK-NEXT: rldicl r6, r3, 10, 54
; CHECK-NEXT: std r4, -32(r1)
-; CHECK-NEXT: std r5, -24(r1)
-; CHECK-NEXT: bc 12, lt, .LBB0_6
-; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: rldicl r5, r3, 63, 1
+; CHECK-NEXT: clrldi r4, r3, 63
+; CHECK-NEXT: addi r6, r6, 1
+; CHECK-NEXT: cmpldi r6, 1
+; CHECK-NEXT: or r4, r4, r5
+; CHECK-NEXT: ble cr0, .LBB0_5
+; CHECK-NEXT: # %bb.4:
+; CHECK-NEXT: clrldi r4, r4, 53
+; CHECK-NEXT: addi r4, r4, 2047
+; CHECK-NEXT: or r4, r4, r5
+; CHECK-NEXT: rldicl r4, r4, 53, 11
+; CHECK-NEXT: rldicl r4, r4, 11, 1
+; CHECK-NEXT: .LBB0_5:
+; CHECK-NEXT: cmpdi r3, 0
+; CHECK-NEXT: std r4, -24(r1)
+; CHECK-NEXT: bc 12, lt, .LBB0_7
+; CHECK-NEXT: # %bb.6:
; CHECK-NEXT: lfd f0, -32(r1)
; CHECK-NEXT: fcfid f0, f0
; CHECK-NEXT: frsp f0, f0
-; CHECK-NEXT: b .LBB0_7
-; CHECK-NEXT: .LBB0_6:
+; CHECK-NEXT: b .LBB0_8
+; CHECK-NEXT: .LBB0_7:
; CHECK-NEXT: lfd f0, -24(r1)
; CHECK-NEXT: fcfid f0, f0
; CHECK-NEXT: frsp f0, f0
; CHECK-NEXT: fadds f0, f0, f0
-; CHECK-NEXT: .LBB0_7:
+; CHECK-NEXT: .LBB0_8:
; CHECK-NEXT: addis r3, r2, .LCPI0_0@toc@ha
-; CHECK-NEXT: li r4, 1
; CHECK-NEXT: lfs f1, .LCPI0_0@toc@l(r3)
-; CHECK-NEXT: rldic r4, r4, 63, 0
; CHECK-NEXT: fsubs f2, f0, f1
; CHECK-NEXT: fctidz f2, f2
; CHECK-NEXT: stfd f2, -8(r1)
; CHECK-NEXT: fctidz f2, f0
+; CHECK-NEXT: fcmpu cr0, f0, f1
; CHECK-NEXT: stfd f2, -16(r1)
+; CHECK-NEXT: blt cr0, .LBB0_10
+; CHECK-NEXT: # %bb.9:
; CHECK-NEXT: ld r3, -8(r1)
-; CHECK-NEXT: ld r5, -16(r1)
-; CHECK-NEXT: fcmpu cr0, f0, f1
+; CHECK-NEXT: li r4, 1
+; CHECK-NEXT: rldic r4, r4, 63, 0
; CHECK-NEXT: xor r3, r3, r4
-; CHECK-NEXT: bc 12, lt, .LBB0_8
-; CHECK-NEXT: b .LBB0_9
-; CHECK-NEXT: .LBB0_8:
-; CHECK-NEXT: addi r3, r5, 0
-; CHECK-NEXT: .LBB0_9:
+; CHECK-NEXT: std r3, 0(r3)
+; CHECK-NEXT: .LBB0_10:
+; CHECK-NEXT: ld r3, -16(r1)
; CHECK-NEXT: std r3, 0(r3)
%load_initial = load i64, ptr poison, align 8
%conv39 = uitofp i64 %load_initial to float
diff --git a/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll b/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll
index 72f8af9dfed5..12078adbbc2f 100644
--- a/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll
+++ b/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll
@@ -74,47 +74,27 @@ define i32 @rotl_i32(i32 %x, i32 %z) {
}
define i64 @rotl_i64(i64 %x, i64 %z) {
-; CHECK32_32-LABEL: rotl_i64:
-; CHECK32_32: # %bb.0:
-; CHECK32_32-NEXT: andi. 5, 6, 32
-; CHECK32_32-NEXT: clrlwi 5, 6, 27
-; CHECK32_32-NEXT: subfic 6, 5, 32
-; CHECK32_32-NEXT: bc 12, 2, .LBB4_2
-; CHECK32_32-NEXT: # %bb.1:
-; CHECK32_32-NEXT: ori 7, 3, 0
-; CHECK32_32-NEXT: ori 3, 4, 0
-; CHECK32_32-NEXT: b .LBB4_3
-; CHECK32_32-NEXT: .LBB4_2:
-; CHECK32_32-NEXT: addi 7, 4, 0
-; CHECK32_32-NEXT: .LBB4_3:
-; CHECK32_32-NEXT: srw 4, 7, 6
-; CHECK32_32-NEXT: slw 8, 3, 5
-; CHECK32_32-NEXT: srw 6, 3, 6
-; CHECK32_32-NEXT: slw 5, 7, 5
-; CHECK32_32-NEXT: or 3, 8, 4
-; CHECK32_32-NEXT: or 4, 5, 6
-; CHECK32_32-NEXT: blr
-;
-; CHECK32_64-LABEL: rotl_i64:
-; CHECK32_64: # %bb.0:
-; CHECK32_64-NEXT: andi. 5, 6, 32
-; CHECK32_64-NEXT: clrlwi 5, 6, 27
-; CHECK32_64-NEXT: bc 12, 2, .LBB4_2
-; CHECK32_64-NEXT: # %bb.1:
-; CHECK32_64-NEXT: ori 7, 3, 0
-; CHECK32_64-NEXT: ori 3, 4, 0
-; CHECK32_64-NEXT: b .LBB4_3
-; CHECK32_64-NEXT: .LBB4_2:
-; CHECK32_64-NEXT: addi 7, 4, 0
-; CHECK32_64-NEXT: .LBB4_3:
-; CHECK32_64-NEXT: subfic 6, 5, 32
-; CHECK32_64-NEXT: srw 4, 7, 6
-; CHECK32_64-NEXT: slw 8, 3, 5
-; CHECK32_64-NEXT: srw 6, 3, 6
-; CHECK32_64-NEXT: slw 5, 7, 5
-; CHECK32_64-NEXT: or 3, 8, 4
-; CHECK32_64-NEXT: or 4, 5, 6
-; CHECK32_64-NEXT: blr
+; CHECK32-LABEL: rotl_i64:
+; CHECK32: # %bb.0:
+; CHECK32-NEXT: andi. 5, 6, 32
+; CHECK32-NEXT: mr 5, 3
+; CHECK32-NEXT: bne 0, .LBB4_2
+; CHECK32-NEXT: # %bb.1:
+; CHECK32-NEXT: mr 5, 4
+; CHECK32-NEXT: .LBB4_2:
+; CHECK32-NEXT: clrlwi 6, 6, 27
+; CHECK32-NEXT: subfic 8, 6, 32
+; CHECK32-NEXT: srw 7, 5, 8
+; CHECK32-NEXT: bne 0, .LBB4_4
+; CHECK32-NEXT: # %bb.3:
+; CHECK32-NEXT: mr 4, 3
+; CHECK32-NEXT: .LBB4_4:
+; CHECK32-NEXT: slw 3, 4, 6
+; CHECK32-NEXT: srw 4, 4, 8
+; CHECK32-NEXT: slw 5, 5, 6
+; CHECK32-NEXT: or 3, 3, 7
+; CHECK32-NEXT: or 4, 5, 4
+; CHECK32-NEXT: blr
;
; CHECK64-LABEL: rotl_i64:
; CHECK64: # %bb.0:
@@ -224,47 +204,27 @@ define i32 @rotr_i32(i32 %x, i32 %z) {
}
define i64 @rotr_i64(i64 %x, i64 %z) {
-; CHECK32_32-LABEL: rotr_i64:
-; CHECK32_32: # %bb.0:
-; CHECK32_32-NEXT: andi. 5, 6, 32
-; CHECK32_32-NEXT: clrlwi 5, 6, 27
-; CHECK32_32-NEXT: subfic 6, 5, 32
-; CHECK32_32-NEXT: bc 12, 2, .LBB11_2
-; CHECK32_32-NEXT: # %bb.1:
-; CHECK32_32-NEXT: ori 7, 4, 0
-; CHECK32_32-NEXT: b .LBB11_3
-; CHECK32_32-NEXT: .LBB11_2:
-; CHECK32_32-NEXT: addi 7, 3, 0
-; CHECK32_32-NEXT: addi 3, 4, 0
-; CHECK32_32-NEXT: .LBB11_3:
-; CHECK32_32-NEXT: srw 4, 7, 5
-; CHECK32_32-NEXT: slw 8, 3, 6
-; CHECK32_32-NEXT: srw 5, 3, 5
-; CHECK32_32-NEXT: slw 6, 7, 6
-; CHECK32_32-NEXT: or 3, 8, 4
-; CHECK32_32-NEXT: or 4, 6, 5
-; CHECK32_32-NEXT: blr
-;
-; CHECK32_64-LABEL: rotr_i64:
-; CHECK32_64: # %bb.0:
-; CHECK32_64-NEXT: andi. 5, 6, 32
-; CHECK32_64-NEXT: clrlwi 5, 6, 27
-; CHECK32_64-NEXT: bc 12, 2, .LBB11_2
-; CHECK32_64-NEXT: # %bb.1:
-; CHECK32_64-NEXT: ori 7, 4, 0
-; CHECK32_64-NEXT: b .LBB11_3
-; CHECK32_64-NEXT: .LBB11_2:
-; CHECK32_64-NEXT: addi 7, 3, 0
-; CHECK32_64-NEXT: addi 3, 4, 0
-; CHECK32_64-NEXT: .LBB11_3:
-; CHECK32_64-NEXT: subfic 6, 5, 32
-; CHECK32_64-NEXT: srw 4, 7, 5
-; CHECK32_64-NEXT: slw 8, 3, 6
-; CHECK32_64-NEXT: srw 5, 3, 5
-; CHECK32_64-NEXT: slw 6, 7, 6
-; CHECK32_64-NEXT: or 3, 8, 4
-; CHECK32_64-NEXT: or 4, 6, 5
-; CHECK32_64-NEXT: blr
+; CHECK32-LABEL: rotr_i64:
+; CHECK32: # %bb.0:
+; CHECK32-NEXT: andi. 5, 6, 32
+; CHECK32-NEXT: mr 5, 3
+; CHECK32-NEXT: beq 0, .LBB11_2
+; CHECK32-NEXT: # %bb.1:
+; CHECK32-NEXT: mr 5, 4
+; CHECK32-NEXT: .LBB11_2:
+; CHECK32-NEXT: clrlwi 7, 6, 27
+; CHECK32-NEXT: srw 6, 5, 7
+; CHECK32-NEXT: beq 0, .LBB11_4
+; CHECK32-NEXT: # %bb.3:
+; CHECK32-NEXT: mr 4, 3
+; CHECK32-NEXT: .LBB11_4:
+; CHECK32-NEXT: subfic 3, 7, 32
+; CHECK32-NEXT: srw 7, 4, 7
+; CHECK32-NEXT: slw 4, 4, 3
+; CHECK32-NEXT: slw 5, 5, 3
+; CHECK32-NEXT: or 3, 4, 6
+; CHECK32-NEXT: or 4, 5, 7
+; CHECK32-NEXT: blr
;
; CHECK64-LABEL: rotr_i64:
; CHECK64: # %bb.0:
diff --git a/llvm/test/CodeGen/PowerPC/funnel-shift.ll b/llvm/test/CodeGen/PowerPC/funnel-shift.ll
index c766c950f0a5..be95233656f4 100644
--- a/llvm/test/CodeGen/PowerPC/funnel-shift.ll
+++ b/llvm/test/CodeGen/PowerPC/funnel-shift.ll
@@ -32,50 +32,31 @@ define i32 @fshl_i32(i32 %x, i32 %y, i32 %z) {
}
define i64 @fshl_i64(i64 %x, i64 %y, i64 %z) {
-; CHECK32_32-LABEL: fshl_i64:
-; CHECK32_32: # %bb.0:
-; CHECK32_32-NEXT: andi. 7, 8, 32
-; CHECK32_32-NEXT: clrlwi 7, 8, 27
-; CHECK32_32-NEXT: subfic 8, 7, 32
-; CHECK32_32-NEXT: bc 12, 2, .LBB1_2
-; CHECK32_32-NEXT: # %bb.1:
-; CHECK32_32-NEXT: ori 9, 5, 0
-; CHECK32_32-NEXT: ori 3, 4, 0
-; CHECK32_32-NEXT: ori 4, 6, 0
-; CHECK32_32-NEXT: b .LBB1_3
-; CHECK32_32-NEXT: .LBB1_2:
-; CHECK32_32-NEXT: addi 9, 4, 0
-; CHECK32_32-NEXT: addi 4, 5, 0
-; CHECK32_32-NEXT: .LBB1_3:
-; CHECK32_32-NEXT: srw 5, 9, 8
-; CHECK32_32-NEXT: slw 3, 3, 7
-; CHECK32_32-NEXT: srw 4, 4, 8
-; CHECK32_32-NEXT: slw 6, 9, 7
-; CHECK32_32-NEXT: or 3, 3, 5
-; CHECK32_32-NEXT: or 4, 6, 4
-; CHECK32_32-NEXT: blr
-;
-; CHECK32_64-LABEL: fshl_i64:
-; CHECK32_64: # %bb.0:
-; CHECK32_64-NEXT: andi. 7, 8, 32
-; CHECK32_64-NEXT: clrlwi 7, 8, 27
-; CHECK32_64-NEXT: bc 12, 2, .LBB1_2
-; CHECK32_64-NEXT: # %bb.1:
-; CHECK32_64-NEXT: ori 9, 5, 0
-; CHECK32_64-NEXT: ori 3, 4, 0
-; CHECK32_64-NEXT: ori 5, 6, 0
-; CHECK32_64-NEXT: b .LBB1_3
-; CHECK32_64-NEXT: .LBB1_2:
-; CHECK32_64-NEXT: addi 9, 4, 0
-; CHECK32_64-NEXT: .LBB1_3:
-; CHECK32_64-NEXT: subfic 8, 7, 32
-; CHECK32_64-NEXT: srw 4, 9, 8
-; CHECK32_64-NEXT: slw 3, 3, 7
-; CHECK32_64-NEXT: srw 5, 5, 8
-; CHECK32_64-NEXT: slw 6, 9, 7
-; CHECK32_64-NEXT: or 3, 3, 4
-; CHECK32_64-NEXT: or 4, 6, 5
-; CHECK32_64-NEXT: blr
+; CHECK32-LABEL: fshl_i64:
+; CHECK32: # %bb.0:
+; CHECK32-NEXT: andi. 7, 8, 32
+; CHECK32-NEXT: mr 7, 5
+; CHECK32-NEXT: bne 0, .LBB1_2
+; CHECK32-NEXT: # %bb.1:
+; CHECK32-NEXT: mr 7, 4
+; CHECK32-NEXT: .LBB1_2:
+; CHECK32-NEXT: clrlwi 8, 8, 27
+; CHECK32-NEXT: subfic 9, 8, 32
+; CHECK32-NEXT: srw 10, 7, 9
+; CHECK32-NEXT: bne 0, .LBB1_4
+; CHECK32-NEXT: # %bb.3:
+; CHECK32-NEXT: mr 4, 3
+; CHECK32-NEXT: .LBB1_4:
+; CHECK32-NEXT: slw 3, 4, 8
+; CHECK32-NEXT: or 3, 3, 10
+; CHECK32-NEXT: bne 0, .LBB1_6
+; CHECK32-NEXT: # %bb.5:
+; CHECK32-NEXT: mr 6, 5
+; CHECK32-NEXT: .LBB1_6:
+; CHECK32-NEXT: srw 4, 6, 9
+; CHECK32-NEXT: slw 5, 7, 8
+; CHECK32-NEXT: or 4, 5, 4
+; CHECK32-NEXT: blr
;
; CHECK64-LABEL: fshl_i64:
; CHECK64: # %bb.0:
@@ -92,113 +73,177 @@ define i64 @fshl_i64(i64 %x, i64 %y, i64 %z) {
define i128 @fshl_i128(i128 %x, i128 %y, i128 %z) nounwind {
; CHECK32_32-LABEL: fshl_i128:
; CHECK32_32: # %bb.0:
-; CHECK32_32-NEXT: lwz 11, 20(1)
-; CHECK32_32-NEXT: andi. 12, 11, 64
+; CHECK32_32-NEXT: stwu 1, -32(1)
+; CHECK32_32-NEXT: lwz 12, 52(1)
+; CHECK32_32-NEXT: stw 29, 20(1) # 4-byte Folded Spill
+; CHECK32_32-NEXT: andi. 11, 12, 64
; CHECK32_32-NEXT: mcrf 1, 0
-; CHECK32_32-NEXT: andi. 12, 11, 32
-; CHECK32_32-NEXT: clrlwi 11, 11, 27
-; CHECK32_32-NEXT: bc 12, 6, .LBB2_2
+; CHECK32_32-NEXT: mr 11, 6
+; CHECK32_32-NEXT: stw 30, 24(1) # 4-byte Folded Spill
+; CHECK32_32-NEXT: bne 0, .LBB2_2
; CHECK32_32-NEXT: # %bb.1:
-; CHECK32_32-NEXT: ori 4, 6, 0
-; CHECK32_32-NEXT: ori 12, 7, 0
-; CHECK32_32-NEXT: ori 3, 5, 0
-; CHECK32_32-NEXT: ori 5, 8, 0
-; CHECK32_32-NEXT: ori 6, 9, 0
-; CHECK32_32-NEXT: ori 7, 10, 0
-; CHECK32_32-NEXT: b .LBB2_3
+; CHECK32_32-NEXT: mr 11, 4
; CHECK32_32-NEXT: .LBB2_2:
-; CHECK32_32-NEXT: addi 12, 5, 0
-; CHECK32_32-NEXT: addi 5, 6, 0
-; CHECK32_32-NEXT: addi 6, 7, 0
-; CHECK32_32-NEXT: addi 7, 8, 0
-; CHECK32_32-NEXT: .LBB2_3:
-; CHECK32_32-NEXT: subfic 8, 11, 32
-; CHECK32_32-NEXT: bc 12, 2, .LBB2_5
-; CHECK32_32-NEXT: # %bb.4:
-; CHECK32_32-NEXT: ori 9, 12, 0
-; CHECK32_32-NEXT: ori 3, 4, 0
-; CHECK32_32-NEXT: ori 4, 5, 0
-; CHECK32_32-NEXT: ori 5, 6, 0
-; CHECK32_32-NEXT: ori 6, 7, 0
-; CHECK32_32-NEXT: b .LBB2_6
-; CHECK32_32-NEXT: .LBB2_5:
-; CHECK32_32-NEXT: addi 9, 4, 0
-; CHECK32_32-NEXT: addi 4, 12, 0
+; CHECK32_32-NEXT: mr 30, 7
+; CHECK32_32-NEXT: bne 1, .LBB2_4
+; CHECK32_32-NEXT: # %bb.3:
+; CHECK32_32-NEXT: mr 30, 5
+; CHECK32_32-NEXT: .LBB2_4:
+; CHECK32_32-NEXT: andi. 4, 12, 32
+; CHECK32_32-NEXT: mr 4, 30
+; CHECK32_32-NEXT: beq 0, .LBB2_18
+; CHECK32_32-NEXT: # %bb.5:
+; CHECK32_32-NEXT: beq 1, .LBB2_19
; CHECK32_32-NEXT: .LBB2_6:
-; CHECK32_32-NEXT: srw 7, 9, 8
-; CHECK32_32-NEXT: slw 3, 3, 11
-; CHECK32_32-NEXT: srw 10, 4, 8
-; CHECK32_32-NEXT: slw 9, 9, 11
-; CHECK32_32-NEXT: srw 12, 5, 8
-; CHECK32_32-NEXT: slw 0, 4, 11
-; CHECK32_32-NEXT: srw 6, 6, 8
-; CHECK32_32-NEXT: slw 8, 5, 11
-; CHECK32_32-NEXT: or 3, 3, 7
-; CHECK32_32-NEXT: or 4, 9, 10
-; CHECK32_32-NEXT: or 5, 0, 12
-; CHECK32_32-NEXT: or 6, 8, 6
+; CHECK32_32-NEXT: beq 0, .LBB2_20
+; CHECK32_32-NEXT: .LBB2_7:
+; CHECK32_32-NEXT: mr 5, 8
+; CHECK32_32-NEXT: beq 1, .LBB2_21
+; CHECK32_32-NEXT: .LBB2_8:
+; CHECK32_32-NEXT: mr 3, 5
+; CHECK32_32-NEXT: beq 0, .LBB2_22
+; CHECK32_32-NEXT: .LBB2_9:
+; CHECK32_32-NEXT: clrlwi 6, 12, 27
+; CHECK32_32-NEXT: bne 1, .LBB2_11
+; CHECK32_32-NEXT: .LBB2_10:
+; CHECK32_32-NEXT: mr 9, 7
+; CHECK32_32-NEXT: .LBB2_11:
+; CHECK32_32-NEXT: subfic 7, 6, 32
+; CHECK32_32-NEXT: mr 12, 9
+; CHECK32_32-NEXT: bne 0, .LBB2_13
+; CHECK32_32-NEXT: # %bb.12:
+; CHECK32_32-NEXT: mr 12, 5
+; CHECK32_32-NEXT: .LBB2_13:
+; CHECK32_32-NEXT: srw 5, 4, 7
+; CHECK32_32-NEXT: slw 11, 11, 6
+; CHECK32_32-NEXT: srw 0, 3, 7
+; CHECK32_32-NEXT: slw 4, 4, 6
+; CHECK32_32-NEXT: srw 30, 12, 7
+; CHECK32_32-NEXT: slw 29, 3, 6
+; CHECK32_32-NEXT: bne 1, .LBB2_15
+; CHECK32_32-NEXT: # %bb.14:
+; CHECK32_32-NEXT: mr 10, 8
+; CHECK32_32-NEXT: .LBB2_15:
+; CHECK32_32-NEXT: or 3, 11, 5
+; CHECK32_32-NEXT: or 4, 4, 0
+; CHECK32_32-NEXT: or 5, 29, 30
+; CHECK32_32-NEXT: bne 0, .LBB2_17
+; CHECK32_32-NEXT: # %bb.16:
+; CHECK32_32-NEXT: mr 10, 9
+; CHECK32_32-NEXT: .LBB2_17:
+; CHECK32_32-NEXT: srw 7, 10, 7
+; CHECK32_32-NEXT: slw 6, 12, 6
+; CHECK32_32-NEXT: or 6, 6, 7
+; CHECK32_32-NEXT: lwz 30, 24(1) # 4-byte Folded Reload
+; CHECK32_32-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
+; CHECK32_32-NEXT: addi 1, 1, 32
; CHECK32_32-NEXT: blr
+; CHECK32_32-NEXT: .LBB2_18:
+; CHECK32_32-NEXT: mr 4, 11
+; CHECK32_32-NEXT: bne 1, .LBB2_6
+; CHECK32_32-NEXT: .LBB2_19:
+; CHECK32_32-NEXT: mr 5, 3
+; CHECK32_32-NEXT: bne 0, .LBB2_7
+; CHECK32_32-NEXT: .LBB2_20:
+; CHECK32_32-NEXT: mr 11, 5
+; CHECK32_32-NEXT: mr 5, 8
+; CHECK32_32-NEXT: bne 1, .LBB2_8
+; CHECK32_32-NEXT: .LBB2_21:
+; CHECK32_32-NEXT: mr 5, 6
+; CHECK32_32-NEXT: mr 3, 5
+; CHECK32_32-NEXT: bne 0, .LBB2_9
+; CHECK32_32-NEXT: .LBB2_22:
+; CHECK32_32-NEXT: mr 3, 30
+; CHECK32_32-NEXT: clrlwi 6, 12, 27
+; CHECK32_32-NEXT: beq 1, .LBB2_10
+; CHECK32_32-NEXT: b .LBB2_11
;
; CHECK32_64-LABEL: fshl_i128:
; CHECK32_64: # %bb.0:
-; CHECK32_64-NEXT: stwu 1, -16(1)
-; CHECK32_64-NEXT: lwz 11, 36(1)
-; CHECK32_64-NEXT: andi. 12, 11, 64
-; CHECK32_64-NEXT: stw 30, 8(1) # 4-byte Folded Spill
+; CHECK32_64-NEXT: stwu 1, -32(1)
+; CHECK32_64-NEXT: lwz 12, 52(1)
+; CHECK32_64-NEXT: andi. 11, 12, 64
+; CHECK32_64-NEXT: stw 29, 20(1) # 4-byte Folded Spill
; CHECK32_64-NEXT: mcrf 1, 0
-; CHECK32_64-NEXT: clrlwi 12, 11, 27
-; CHECK32_64-NEXT: andi. 11, 11, 32
-; CHECK32_64-NEXT: bc 12, 6, .LBB2_2
+; CHECK32_64-NEXT: mr 11, 6
+; CHECK32_64-NEXT: stw 30, 24(1) # 4-byte Folded Spill
+; CHECK32_64-NEXT: bne 0, .LBB2_2
; CHECK32_64-NEXT: # %bb.1:
-; CHECK32_64-NEXT: ori 4, 6, 0
-; CHECK32_64-NEXT: ori 30, 7, 0
-; CHECK32_64-NEXT: ori 3, 5, 0
-; CHECK32_64-NEXT: ori 7, 9, 0
-; CHECK32_64-NEXT: b .LBB2_3
+; CHECK32_64-NEXT: mr 11, 4
; CHECK32_64-NEXT: .LBB2_2:
-; CHECK32_64-NEXT: addi 30, 5, 0
-; CHECK32_64-NEXT: .LBB2_3:
-; CHECK32_64-NEXT: bc 12, 2, .LBB2_5
-; CHECK32_64-NEXT: # %bb.4:
-; CHECK32_64-NEXT: ori 5, 30, 0
-; CHECK32_64-NEXT: ori 3, 4, 0
-; CHECK32_64-NEXT: b .LBB2_6
-; CHECK32_64-NEXT: .LBB2_5:
-; CHECK32_64-NEXT: addi 5, 4, 0
+; CHECK32_64-NEXT: mr 30, 7
+; CHECK32_64-NEXT: bne 1, .LBB2_4
+; CHECK32_64-NEXT: # %bb.3:
+; CHECK32_64-NEXT: mr 30, 5
+; CHECK32_64-NEXT: .LBB2_4:
+; CHECK32_64-NEXT: andi. 4, 12, 32
+; CHECK32_64-NEXT: mr 4, 30
+; CHECK32_64-NEXT: beq 0, .LBB2_18
+; CHECK32_64-NEXT: # %bb.5:
+; CHECK32_64-NEXT: beq 1, .LBB2_19
; CHECK32_64-NEXT: .LBB2_6:
-; CHECK32_64-NEXT: bc 12, 6, .LBB2_8
-; CHECK32_64-NEXT: # %bb.7:
-; CHECK32_64-NEXT: ori 4, 8, 0
-; CHECK32_64-NEXT: ori 8, 10, 0
-; CHECK32_64-NEXT: b .LBB2_9
+; CHECK32_64-NEXT: beq 0, .LBB2_20
+; CHECK32_64-NEXT: .LBB2_7:
+; CHECK32_64-NEXT: mr 5, 8
+; CHECK32_64-NEXT: beq 1, .LBB2_21
; CHECK32_64-NEXT: .LBB2_8:
-; CHECK32_64-NEXT: addi 4, 6, 0
+; CHECK32_64-NEXT: mr 3, 5
+; CHECK32_64-NEXT: beq 0, .LBB2_22
; CHECK32_64-NEXT: .LBB2_9:
-; CHECK32_64-NEXT: subfic 11, 12, 32
-; CHECK32_64-NEXT: bc 12, 2, .LBB2_11
-; CHECK32_64-NEXT: # %bb.10:
-; CHECK32_64-NEXT: ori 0, 4, 0
-; CHECK32_64-NEXT: ori 4, 7, 0
-; CHECK32_64-NEXT: ori 7, 8, 0
-; CHECK32_64-NEXT: b .LBB2_12
+; CHECK32_64-NEXT: clrlwi 6, 12, 27
+; CHECK32_64-NEXT: bne 1, .LBB2_11
+; CHECK32_64-NEXT: .LBB2_10:
+; CHECK32_64-NEXT: mr 9, 7
; CHECK32_64-NEXT: .LBB2_11:
-; CHECK32_64-NEXT: addi 0, 30, 0
-; CHECK32_64-NEXT: .LBB2_12:
-; CHECK32_64-NEXT: srw 6, 5, 11
-; CHECK32_64-NEXT: lwz 30, 8(1) # 4-byte Folded Reload
-; CHECK32_64-NEXT: slw 3, 3, 12
-; CHECK32_64-NEXT: srw 9, 0, 11
-; CHECK32_64-NEXT: slw 5, 5, 12
-; CHECK32_64-NEXT: srw 10, 4, 11
-; CHECK32_64-NEXT: slw 0, 0, 12
-; CHECK32_64-NEXT: srw 7, 7, 11
-; CHECK32_64-NEXT: slw 8, 4, 12
-; CHECK32_64-NEXT: or 3, 3, 6
-; CHECK32_64-NEXT: or 4, 5, 9
-; CHECK32_64-NEXT: or 5, 0, 10
-; CHECK32_64-NEXT: or 6, 8, 7
-; CHECK32_64-NEXT: addi 1, 1, 16
+; CHECK32_64-NEXT: subfic 7, 6, 32
+; CHECK32_64-NEXT: mr 12, 9
+; CHECK32_64-NEXT: bne 0, .LBB2_13
+; CHECK32_64-NEXT: # %bb.12:
+; CHECK32_64-NEXT: mr 12, 5
+; CHECK32_64-NEXT: .LBB2_13:
+; CHECK32_64-NEXT: srw 5, 4, 7
+; CHECK32_64-NEXT: slw 11, 11, 6
+; CHECK32_64-NEXT: srw 0, 3, 7
+; CHECK32_64-NEXT: slw 4, 4, 6
+; CHECK32_64-NEXT: srw 30, 12, 7
+; CHECK32_64-NEXT: slw 29, 3, 6
+; CHECK32_64-NEXT: bne 1, .LBB2_15
+; CHECK32_64-NEXT: # %bb.14:
+; CHECK32_64-NEXT: mr 10, 8
+; CHECK32_64-NEXT: .LBB2_15:
+; CHECK32_64-NEXT: or 3, 11, 5
+; CHECK32_64-NEXT: or 4, 4, 0
+; CHECK32_64-NEXT: or 5, 29, 30
+; CHECK32_64-NEXT: bne 0, .LBB2_17
+; CHECK32_64-NEXT: # %bb.16:
+; CHECK32_64-NEXT: mr 10, 9
+; CHECK32_64-NEXT: .LBB2_17:
+; CHECK32_64-NEXT: srw 7, 10, 7
+; CHECK32_64-NEXT: slw 6, 12, 6
+; CHECK32_64-NEXT: lwz 30, 24(1) # 4-byte Folded Reload
+; CHECK32_64-NEXT: or 6, 6, 7
+; CHECK32_64-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
+; CHECK32_64-NEXT: addi 1, 1, 32
; CHECK32_64-NEXT: blr
+; CHECK32_64-NEXT: .LBB2_18:
+; CHECK32_64-NEXT: mr 4, 11
+; CHECK32_64-NEXT: bne 1, .LBB2_6
+; CHECK32_64-NEXT: .LBB2_19:
+; CHECK32_64-NEXT: mr 5, 3
+; CHECK32_64-NEXT: bne 0, .LBB2_7
+; CHECK32_64-NEXT: .LBB2_20:
+; CHECK32_64-NEXT: mr 11, 5
+; CHECK32_64-NEXT: mr 5, 8
+; CHECK32_64-NEXT: bne 1, .LBB2_8
+; CHECK32_64-NEXT: .LBB2_21:
+; CHECK32_64-NEXT: mr 5, 6
+; CHECK32_64-NEXT: mr 3, 5
+; CHECK32_64-NEXT: bne 0, .LBB2_9
+; CHECK32_64-NEXT: .LBB2_22:
+; CHECK32_64-NEXT: mr 3, 30
+; CHECK32_64-NEXT: clrlwi 6, 12, 27
+; CHECK32_64-NEXT: beq 1, .LBB2_10
+; CHECK32_64-NEXT: b .LBB2_11
;
; CHECK64-LABEL: fshl_i128:
; CHECK64: # %bb.0:
@@ -235,11 +280,11 @@ define i37 @fshl_i37(i37 %x, i37 %y, i37 %z) {
; CHECK32_32-NEXT: .cfi_offset r29, -12
; CHECK32_32-NEXT: .cfi_offset r30, -8
; CHECK32_32-NEXT: stw 27, 12(1) # 4-byte Folded Spill
-; CHECK32_32-NEXT: mr 27, 3
+; CHECK32_32-NEXT: mr 27, 5
; CHECK32_32-NEXT: stw 28, 16(1) # 4-byte Folded Spill
-; CHECK32_32-NEXT: mr 28, 4
+; CHECK32_32-NEXT: mr 28, 3
; CHECK32_32-NEXT: stw 29, 20(1) # 4-byte Folded Spill
-; CHECK32_32-NEXT: mr 29, 5
+; CHECK32_32-NEXT: mr 29, 4
; CHECK32_32-NEXT: stw 30, 24(1) # 4-byte Folded Spill
; CHECK32_32-NEXT: mr 30, 6
; CHECK32_32-NEXT: clrlwi 3, 7, 27
@@ -247,29 +292,31 @@ define i37 @fshl_i37(i37 %x, i37 %y, i37 %z) {
; CHECK32_32-NEXT: li 5, 0
; CHECK32_32-NEXT: li 6, 37
; CHECK32_32-NEXT: bl __umoddi3
-; CHECK32_32-NEXT: rotlwi 3, 30, 27
-; CHECK32_32-NEXT: slwi 5, 30, 27
-; CHECK32_32-NEXT: andi. 6, 4, 32
-; CHECK32_32-NEXT: rlwimi 3, 29, 27, 0, 4
-; CHECK32_32-NEXT: clrlwi 4, 4, 27
-; CHECK32_32-NEXT: subfic 6, 4, 32
-; CHECK32_32-NEXT: bc 12, 2, .LBB3_2
+; CHECK32_32-NEXT: rotlwi 5, 30, 27
+; CHECK32_32-NEXT: rlwimi 5, 27, 27, 0, 4
+; CHECK32_32-NEXT: andi. 3, 4, 32
+; CHECK32_32-NEXT: mr 6, 5
+; CHECK32_32-NEXT: bne 0, .LBB3_2
; CHECK32_32-NEXT: # %bb.1:
-; CHECK32_32-NEXT: ori 7, 3, 0
-; CHECK32_32-NEXT: ori 8, 28, 0
-; CHECK32_32-NEXT: ori 3, 5, 0
-; CHECK32_32-NEXT: b .LBB3_3
+; CHECK32_32-NEXT: mr 6, 29
; CHECK32_32-NEXT: .LBB3_2:
-; CHECK32_32-NEXT: addi 7, 28, 0
-; CHECK32_32-NEXT: addi 8, 27, 0
-; CHECK32_32-NEXT: .LBB3_3:
+; CHECK32_32-NEXT: clrlwi 4, 4, 27
+; CHECK32_32-NEXT: subfic 7, 4, 32
+; CHECK32_32-NEXT: srw 3, 6, 7
+; CHECK32_32-NEXT: bne 0, .LBB3_4
+; CHECK32_32-NEXT: # %bb.3:
+; CHECK32_32-NEXT: mr 29, 28
+; CHECK32_32-NEXT: .LBB3_4:
+; CHECK32_32-NEXT: slw 8, 29, 4
+; CHECK32_32-NEXT: or 3, 8, 3
+; CHECK32_32-NEXT: beq 0, .LBB3_6
+; CHECK32_32-NEXT: # %bb.5:
+; CHECK32_32-NEXT: slwi 5, 30, 27
+; CHECK32_32-NEXT: .LBB3_6:
+; CHECK32_32-NEXT: srw 5, 5, 7
+; CHECK32_32-NEXT: slw 4, 6, 4
+; CHECK32_32-NEXT: or 4, 4, 5
; CHECK32_32-NEXT: lwz 30, 24(1) # 4-byte Folded Reload
-; CHECK32_32-NEXT: srw 5, 7, 6
-; CHECK32_32-NEXT: slw 8, 8, 4
-; CHECK32_32-NEXT: srw 6, 3, 6
-; CHECK32_32-NEXT: slw 4, 7, 4
-; CHECK32_32-NEXT: or 3, 8, 5
-; CHECK32_32-NEXT: or 4, 4, 6
; CHECK32_32-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
; CHECK32_32-NEXT: lwz 28, 16(1) # 4-byte Folded Reload
; CHECK32_32-NEXT: lwz 27, 12(1) # 4-byte Folded Reload
@@ -290,53 +337,46 @@ define i37 @fshl_i37(i37 %x, i37 %y, i37 %z) {
; CHECK32_64-NEXT: .cfi_offset r29, -12
; CHECK32_64-NEXT: .cfi_offset r30, -8
; CHECK32_64-NEXT: stw 27, 12(1) # 4-byte Folded Spill
-; CHECK32_64-NEXT: mr 27, 3
-; CHECK32_64-NEXT: clrlwi 3, 7, 27
+; CHECK32_64-NEXT: mr 27, 5
+; CHECK32_64-NEXT: li 5, 0
; CHECK32_64-NEXT: stw 28, 16(1) # 4-byte Folded Spill
-; CHECK32_64-NEXT: mr 28, 4
-; CHECK32_64-NEXT: mr 4, 8
+; CHECK32_64-NEXT: mr 28, 3
+; CHECK32_64-NEXT: clrlwi 3, 7, 27
; CHECK32_64-NEXT: stw 29, 20(1) # 4-byte Folded Spill
-; CHECK32_64-NEXT: mr 29, 5
-; CHECK32_64-NEXT: li 5, 0
+; CHECK32_64-NEXT: mr 29, 4
+; CHECK32_64-NEXT: mr 4, 8
; CHECK32_64-NEXT: stw 30, 24(1) # 4-byte Folded Spill
; CHECK32_64-NEXT: mr 30, 6
; CHECK32_64-NEXT: li 6, 37
; CHECK32_64-NEXT: bl __umoddi3
-; CHECK32_64-NEXT: rotlwi 3, 30, 27
-; CHECK32_64-NEXT: andi. 5, 4, 32
-; CHECK32_64-NEXT: bc 12, 2, .LBB3_2
+; CHECK32_64-NEXT: rotlwi 5, 30, 27
+; CHECK32_64-NEXT: andi. 3, 4, 32
+; CHECK32_64-NEXT: rlwimi 5, 27, 27, 0, 4
+; CHECK32_64-NEXT: mr 6, 5
+; CHECK32_64-NEXT: bne 0, .LBB3_2
; CHECK32_64-NEXT: # %bb.1:
-; CHECK32_64-NEXT: ori 8, 28, 0
-; CHECK32_64-NEXT: b .LBB3_3
+; CHECK32_64-NEXT: mr 6, 29
; CHECK32_64-NEXT: .LBB3_2:
-; CHECK32_64-NEXT: addi 8, 27, 0
-; CHECK32_64-NEXT: .LBB3_3:
-; CHECK32_64-NEXT: lwz 27, 12(1) # 4-byte Folded Reload
-; CHECK32_64-NEXT: rlwimi 3, 29, 27, 0, 4
; CHECK32_64-NEXT: clrlwi 4, 4, 27
-; CHECK32_64-NEXT: bc 12, 2, .LBB3_5
-; CHECK32_64-NEXT: # %bb.4:
-; CHECK32_64-NEXT: ori 7, 3, 0
-; CHECK32_64-NEXT: b .LBB3_6
-; CHECK32_64-NEXT: .LBB3_5:
-; CHECK32_64-NEXT: addi 7, 28, 0
-; CHECK32_64-NEXT: .LBB3_6:
+; CHECK32_64-NEXT: subfic 7, 4, 32
+; CHECK32_64-NEXT: srw 3, 6, 7
+; CHECK32_64-NEXT: bne 0, .LBB3_4
+; CHECK32_64-NEXT: # %bb.3:
+; CHECK32_64-NEXT: mr 29, 28
+; CHECK32_64-NEXT: .LBB3_4:
+; CHECK32_64-NEXT: slw 8, 29, 4
+; CHECK32_64-NEXT: or 3, 8, 3
+; CHECK32_64-NEXT: beq 0, .LBB3_6
+; CHECK32_64-NEXT: # %bb.5:
; CHECK32_64-NEXT: slwi 5, 30, 27
+; CHECK32_64-NEXT: .LBB3_6:
+; CHECK32_64-NEXT: srw 5, 5, 7
+; CHECK32_64-NEXT: slw 4, 6, 4
; CHECK32_64-NEXT: lwz 30, 24(1) # 4-byte Folded Reload
-; CHECK32_64-NEXT: bc 12, 2, .LBB3_8
-; CHECK32_64-NEXT: # %bb.7:
-; CHECK32_64-NEXT: ori 3, 5, 0
-; CHECK32_64-NEXT: b .LBB3_8
-; CHECK32_64-NEXT: .LBB3_8:
-; CHECK32_64-NEXT: subfic 6, 4, 32
-; CHECK32_64-NEXT: slw 8, 8, 4
+; CHECK32_64-NEXT: or 4, 4, 5
; CHECK32_64-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
-; CHECK32_64-NEXT: srw 9, 7, 6
-; CHECK32_64-NEXT: srw 5, 3, 6
-; CHECK32_64-NEXT: slw 4, 7, 4
-; CHECK32_64-NEXT: or 3, 8, 9
; CHECK32_64-NEXT: lwz 28, 16(1) # 4-byte Folded Reload
-; CHECK32_64-NEXT: or 4, 4, 5
+; CHECK32_64-NEXT: lwz 27, 12(1) # 4-byte Folded Reload
; CHECK32_64-NEXT: lwz 0, 36(1)
; CHECK32_64-NEXT: addi 1, 1, 32
; CHECK32_64-NEXT: mtlr 0
@@ -453,50 +493,31 @@ define i32 @fshr_i32(i32 %x, i32 %y, i32 %z) {
}
define i64 @fshr_i64(i64 %x, i64 %y, i64 %z) {
-; CHECK32_32-LABEL: fshr_i64:
-; CHECK32_32: # %bb.0:
-; CHECK32_32-NEXT: andi. 7, 8, 32
-; CHECK32_32-NEXT: clrlwi 7, 8, 27
-; CHECK32_32-NEXT: subfic 8, 7, 32
-; CHECK32_32-NEXT: bc 12, 2, .LBB10_2
-; CHECK32_32-NEXT: # %bb.1:
-; CHECK32_32-NEXT: ori 9, 4, 0
-; CHECK32_32-NEXT: ori 4, 5, 0
-; CHECK32_32-NEXT: b .LBB10_3
-; CHECK32_32-NEXT: .LBB10_2:
-; CHECK32_32-NEXT: addi 9, 5, 0
-; CHECK32_32-NEXT: addi 3, 4, 0
-; CHECK32_32-NEXT: addi 4, 6, 0
-; CHECK32_32-NEXT: .LBB10_3:
-; CHECK32_32-NEXT: srw 5, 9, 7
-; CHECK32_32-NEXT: slw 3, 3, 8
-; CHECK32_32-NEXT: srw 4, 4, 7
-; CHECK32_32-NEXT: slw 6, 9, 8
-; CHECK32_32-NEXT: or 3, 3, 5
-; CHECK32_32-NEXT: or 4, 6, 4
-; CHECK32_32-NEXT: blr
-;
-; CHECK32_64-LABEL: fshr_i64:
-; CHECK32_64: # %bb.0:
-; CHECK32_64-NEXT: andi. 7, 8, 32
-; CHECK32_64-NEXT: clrlwi 7, 8, 27
-; CHECK32_64-NEXT: bc 12, 2, .LBB10_2
-; CHECK32_64-NEXT: # %bb.1:
-; CHECK32_64-NEXT: ori 9, 4, 0
-; CHECK32_64-NEXT: b .LBB10_3
-; CHECK32_64-NEXT: .LBB10_2:
-; CHECK32_64-NEXT: addi 9, 5, 0
-; CHECK32_64-NEXT: addi 3, 4, 0
-; CHECK32_64-NEXT: addi 5, 6, 0
-; CHECK32_64-NEXT: .LBB10_3:
-; CHECK32_64-NEXT: subfic 8, 7, 32
-; CHECK32_64-NEXT: srw 4, 9, 7
-; CHECK32_64-NEXT: slw 3, 3, 8
-; CHECK32_64-NEXT: srw 5, 5, 7
-; CHECK32_64-NEXT: slw 6, 9, 8
-; CHECK32_64-NEXT: or 3, 3, 4
-; CHECK32_64-NEXT: or 4, 6, 5
-; CHECK32_64-NEXT: blr
+; CHECK32-LABEL: fshr_i64:
+; CHECK32: # %bb.0:
+; CHECK32-NEXT: andi. 7, 8, 32
+; CHECK32-NEXT: mr 7, 5
+; CHECK32-NEXT: beq 0, .LBB10_2
+; CHECK32-NEXT: # %bb.1:
+; CHECK32-NEXT: mr 7, 4
+; CHECK32-NEXT: .LBB10_2:
+; CHECK32-NEXT: clrlwi 8, 8, 27
+; CHECK32-NEXT: srw 10, 7, 8
+; CHECK32-NEXT: beq 0, .LBB10_4
+; CHECK32-NEXT: # %bb.3:
+; CHECK32-NEXT: mr 4, 3
+; CHECK32-NEXT: .LBB10_4:
+; CHECK32-NEXT: subfic 9, 8, 32
+; CHECK32-NEXT: slw 3, 4, 9
+; CHECK32-NEXT: or 3, 3, 10
+; CHECK32-NEXT: beq 0, .LBB10_6
+; CHECK32-NEXT: # %bb.5:
+; CHECK32-NEXT: mr 6, 5
+; CHECK32-NEXT: .LBB10_6:
+; CHECK32-NEXT: srw 4, 6, 8
+; CHECK32-NEXT: slw 5, 7, 9
+; CHECK32-NEXT: or 4, 5, 4
+; CHECK32-NEXT: blr
;
; CHECK64-LABEL: fshr_i64:
; CHECK64: # %bb.0:
@@ -525,11 +546,11 @@ define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) {
; CHECK32_32-NEXT: .cfi_offset r29, -12
; CHECK32_32-NEXT: .cfi_offset r30, -8
; CHECK32_32-NEXT: stw 27, 12(1) # 4-byte Folded Spill
-; CHECK32_32-NEXT: mr 27, 3
+; CHECK32_32-NEXT: mr 27, 5
; CHECK32_32-NEXT: stw 28, 16(1) # 4-byte Folded Spill
-; CHECK32_32-NEXT: mr 28, 4
+; CHECK32_32-NEXT: mr 28, 3
; CHECK32_32-NEXT: stw 29, 20(1) # 4-byte Folded Spill
-; CHECK32_32-NEXT: mr 29, 5
+; CHECK32_32-NEXT: mr 29, 4
; CHECK32_32-NEXT: stw 30, 24(1) # 4-byte Folded Spill
; CHECK32_32-NEXT: mr 30, 6
; CHECK32_32-NEXT: clrlwi 3, 7, 27
@@ -537,30 +558,32 @@ define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) {
; CHECK32_32-NEXT: li 5, 0
; CHECK32_32-NEXT: li 6, 37
; CHECK32_32-NEXT: bl __umoddi3
-; CHECK32_32-NEXT: rotlwi 3, 30, 27
-; CHECK32_32-NEXT: addi 4, 4, 27
-; CHECK32_32-NEXT: slwi 5, 30, 27
-; CHECK32_32-NEXT: rlwimi 3, 29, 27, 0, 4
-; CHECK32_32-NEXT: andi. 6, 4, 32
-; CHECK32_32-NEXT: clrlwi 4, 4, 27
-; CHECK32_32-NEXT: subfic 6, 4, 32
-; CHECK32_32-NEXT: bc 12, 2, .LBB11_2
+; CHECK32_32-NEXT: rotlwi 5, 30, 27
+; CHECK32_32-NEXT: addi 3, 4, 27
+; CHECK32_32-NEXT: andi. 4, 3, 32
+; CHECK32_32-NEXT: rlwimi 5, 27, 27, 0, 4
+; CHECK32_32-NEXT: mr 4, 5
+; CHECK32_32-NEXT: beq 0, .LBB11_2
; CHECK32_32-NEXT: # %bb.1:
-; CHECK32_32-NEXT: ori 7, 28, 0
-; CHECK32_32-NEXT: ori 8, 27, 0
-; CHECK32_32-NEXT: b .LBB11_3
+; CHECK32_32-NEXT: mr 4, 29
; CHECK32_32-NEXT: .LBB11_2:
-; CHECK32_32-NEXT: addi 7, 3, 0
-; CHECK32_32-NEXT: addi 8, 28, 0
-; CHECK32_32-NEXT: addi 3, 5, 0
-; CHECK32_32-NEXT: .LBB11_3:
+; CHECK32_32-NEXT: clrlwi 6, 3, 27
+; CHECK32_32-NEXT: srw 3, 4, 6
+; CHECK32_32-NEXT: beq 0, .LBB11_4
+; CHECK32_32-NEXT: # %bb.3:
+; CHECK32_32-NEXT: mr 29, 28
+; CHECK32_32-NEXT: .LBB11_4:
+; CHECK32_32-NEXT: subfic 7, 6, 32
+; CHECK32_32-NEXT: slw 8, 29, 7
+; CHECK32_32-NEXT: or 3, 8, 3
+; CHECK32_32-NEXT: bne 0, .LBB11_6
+; CHECK32_32-NEXT: # %bb.5:
+; CHECK32_32-NEXT: slwi 5, 30, 27
+; CHECK32_32-NEXT: .LBB11_6:
+; CHECK32_32-NEXT: srw 5, 5, 6
+; CHECK32_32-NEXT: slw 4, 4, 7
+; CHECK32_32-NEXT: or 4, 4, 5
; CHECK32_32-NEXT: lwz 30, 24(1) # 4-byte Folded Reload
-; CHECK32_32-NEXT: srw 5, 7, 4
-; CHECK32_32-NEXT: slw 8, 8, 6
-; CHECK32_32-NEXT: srw 4, 3, 4
-; CHECK32_32-NEXT: slw 6, 7, 6
-; CHECK32_32-NEXT: or 3, 8, 5
-; CHECK32_32-NEXT: or 4, 6, 4
; CHECK32_32-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
; CHECK32_32-NEXT: lwz 28, 16(1) # 4-byte Folded Reload
; CHECK32_32-NEXT: lwz 27, 12(1) # 4-byte Folded Reload
@@ -581,49 +604,47 @@ define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) {
; CHECK32_64-NEXT: .cfi_offset r29, -12
; CHECK32_64-NEXT: .cfi_offset r30, -8
; CHECK32_64-NEXT: stw 27, 12(1) # 4-byte Folded Spill
-; CHECK32_64-NEXT: mr 27, 3
-; CHECK32_64-NEXT: clrlwi 3, 7, 27
+; CHECK32_64-NEXT: mr 27, 5
+; CHECK32_64-NEXT: li 5, 0
; CHECK32_64-NEXT: stw 28, 16(1) # 4-byte Folded Spill
-; CHECK32_64-NEXT: mr 28, 4
-; CHECK32_64-NEXT: mr 4, 8
+; CHECK32_64-NEXT: mr 28, 3
+; CHECK32_64-NEXT: clrlwi 3, 7, 27
; CHECK32_64-NEXT: stw 29, 20(1) # 4-byte Folded Spill
-; CHECK32_64-NEXT: mr 29, 5
-; CHECK32_64-NEXT: li 5, 0
+; CHECK32_64-NEXT: mr 29, 4
+; CHECK32_64-NEXT: mr 4, 8
; CHECK32_64-NEXT: stw 30, 24(1) # 4-byte Folded Spill
; CHECK32_64-NEXT: mr 30, 6
; CHECK32_64-NEXT: li 6, 37
; CHECK32_64-NEXT: bl __umoddi3
-; CHECK32_64-NEXT: addi 4, 4, 27
-; CHECK32_64-NEXT: rotlwi 3, 30, 27
-; CHECK32_64-NEXT: andi. 5, 4, 32
-; CHECK32_64-NEXT: rlwimi 3, 29, 27, 0, 4
-; CHECK32_64-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
-; CHECK32_64-NEXT: bc 12, 2, .LBB11_2
+; CHECK32_64-NEXT: rotlwi 5, 30, 27
+; CHECK32_64-NEXT: addi 3, 4, 27
+; CHECK32_64-NEXT: andi. 4, 3, 32
+; CHECK32_64-NEXT: rlwimi 5, 27, 27, 0, 4
+; CHECK32_64-NEXT: mr 4, 5
+; CHECK32_64-NEXT: beq 0, .LBB11_2
; CHECK32_64-NEXT: # %bb.1:
-; CHECK32_64-NEXT: ori 7, 28, 0
-; CHECK32_64-NEXT: ori 8, 27, 0
-; CHECK32_64-NEXT: b .LBB11_3
+; CHECK32_64-NEXT: mr 4, 29
; CHECK32_64-NEXT: .LBB11_2:
-; CHECK32_64-NEXT: addi 7, 3, 0
-; CHECK32_64-NEXT: addi 8, 28, 0
-; CHECK32_64-NEXT: .LBB11_3:
-; CHECK32_64-NEXT: clrlwi 4, 4, 27
-; CHECK32_64-NEXT: lwz 28, 16(1) # 4-byte Folded Reload
-; CHECK32_64-NEXT: slwi 5, 30, 27
-; CHECK32_64-NEXT: subfic 6, 4, 32
-; CHECK32_64-NEXT: bc 12, 2, .LBB11_4
-; CHECK32_64-NEXT: b .LBB11_5
+; CHECK32_64-NEXT: clrlwi 6, 3, 27
+; CHECK32_64-NEXT: srw 3, 4, 6
+; CHECK32_64-NEXT: beq 0, .LBB11_4
+; CHECK32_64-NEXT: # %bb.3:
+; CHECK32_64-NEXT: mr 29, 28
; CHECK32_64-NEXT: .LBB11_4:
-; CHECK32_64-NEXT: addi 3, 5, 0
-; CHECK32_64-NEXT: .LBB11_5:
-; CHECK32_64-NEXT: srw 9, 7, 4
-; CHECK32_64-NEXT: slw 8, 8, 6
+; CHECK32_64-NEXT: subfic 7, 6, 32
+; CHECK32_64-NEXT: slw 8, 29, 7
+; CHECK32_64-NEXT: or 3, 8, 3
+; CHECK32_64-NEXT: bne 0, .LBB11_6
+; CHECK32_64-NEXT: # %bb.5:
+; CHECK32_64-NEXT: slwi 5, 30, 27
+; CHECK32_64-NEXT: .LBB11_6:
+; CHECK32_64-NEXT: srw 5, 5, 6
+; CHECK32_64-NEXT: slw 4, 4, 7
; CHECK32_64-NEXT: lwz 30, 24(1) # 4-byte Folded Reload
-; CHECK32_64-NEXT: srw 4, 3, 4
-; CHECK32_64-NEXT: slw 5, 7, 6
+; CHECK32_64-NEXT: or 4, 4, 5
+; CHECK32_64-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
+; CHECK32_64-NEXT: lwz 28, 16(1) # 4-byte Folded Reload
; CHECK32_64-NEXT: lwz 27, 12(1) # 4-byte Folded Reload
-; CHECK32_64-NEXT: or 3, 8, 9
-; CHECK32_64-NEXT: or 4, 5, 4
; CHECK32_64-NEXT: lwz 0, 36(1)
; CHECK32_64-NEXT: addi 1, 1, 32
; CHECK32_64-NEXT: mtlr 0
diff --git a/llvm/test/CodeGen/PowerPC/i1-to-double.ll b/llvm/test/CodeGen/PowerPC/i1-to-double.ll
index 0b8cdef87053..df5ed27056be 100644
--- a/llvm/test/CodeGen/PowerPC/i1-to-double.ll
+++ b/llvm/test/CodeGen/PowerPC/i1-to-double.ll
@@ -4,16 +4,16 @@
define double @test(i1 %X) {
; CHECK-LABEL: test:
; CHECK: # %bb.0:
-; CHECK-NEXT: li 4, .LCPI0_0@l
; CHECK-NEXT: andi. 3, 3, 1
-; CHECK-NEXT: addis 3, 4, .LCPI0_0@ha
-; CHECK-NEXT: li 4, .LCPI0_1@l
-; CHECK-NEXT: addis 4, 4, .LCPI0_1@ha
-; CHECK-NEXT: bc 12, 1, .LBB0_1
-; CHECK-NEXT: b .LBB0_2
-; CHECK-NEXT: .LBB0_1:
-; CHECK-NEXT: addi 3, 4, 0
+; CHECK-NEXT: bc 12, 1, .LBB0_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li 3, .LCPI0_0@l
+; CHECK-NEXT: addis 3, 3, .LCPI0_0@ha
+; CHECK-NEXT: lfs 1, 0(3)
+; CHECK-NEXT: blr
; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: li 3, .LCPI0_1@l
+; CHECK-NEXT: addis 3, 3, .LCPI0_1@ha
; CHECK-NEXT: lfs 1, 0(3)
; CHECK-NEXT: blr
%Y = uitofp i1 %X to double
@@ -27,17 +27,17 @@ define double @test(i1 %X) {
define double @u1tofp(i1 %i, double %d) #0 {
; CHECK-LABEL: u1tofp:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: li 4, .LCPI1_0@l
; CHECK-NEXT: andi. 3, 3, 1
-; CHECK-NEXT: addis 3, 4, .LCPI1_0@ha
-; CHECK-NEXT: li 4, .LCPI1_1@l
-; CHECK-NEXT: addis 4, 4, .LCPI1_1@ha
-; CHECK-NEXT: bc 12, 1, .LBB1_1
-; CHECK-NEXT: b .LBB1_2
-; CHECK-NEXT: .LBB1_1: # %entry
-; CHECK-NEXT: addi 3, 4, 0
-; CHECK-NEXT: .LBB1_2: # %entry
; CHECK-NEXT: fmr 0, 1
+; CHECK-NEXT: bc 12, 1, .LBB1_2
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: li 3, .LCPI1_0@l
+; CHECK-NEXT: addis 3, 3, .LCPI1_0@ha
+; CHECK-NEXT: b .LBB1_3
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: li 3, .LCPI1_1@l
+; CHECK-NEXT: addis 3, 3, .LCPI1_1@ha
+; CHECK-NEXT: .LBB1_3: # %entry
; CHECK-NEXT: lfs 1, 0(3)
; CHECK-NEXT: lis 3, foo@ha
; CHECK-NEXT: stfd 0, foo@l(3)
@@ -51,17 +51,17 @@ entry:
define double @s1tofp(i1 %i, double %d) #0 {
; CHECK-LABEL: s1tofp:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: li 4, .LCPI2_0@l
; CHECK-NEXT: andi. 3, 3, 1
-; CHECK-NEXT: addis 3, 4, .LCPI2_0@ha
-; CHECK-NEXT: li 4, .LCPI2_1@l
-; CHECK-NEXT: addis 4, 4, .LCPI2_1@ha
-; CHECK-NEXT: bc 12, 1, .LBB2_1
-; CHECK-NEXT: b .LBB2_2
-; CHECK-NEXT: .LBB2_1: # %entry
-; CHECK-NEXT: addi 3, 4, 0
-; CHECK-NEXT: .LBB2_2: # %entry
; CHECK-NEXT: fmr 0, 1
+; CHECK-NEXT: bc 12, 1, .LBB2_2
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: li 3, .LCPI2_0@l
+; CHECK-NEXT: addis 3, 3, .LCPI2_0@ha
+; CHECK-NEXT: b .LBB2_3
+; CHECK-NEXT: .LBB2_2:
+; CHECK-NEXT: li 3, .LCPI2_1@l
+; CHECK-NEXT: addis 3, 3, .LCPI2_1@ha
+; CHECK-NEXT: .LBB2_3: # %entry
; CHECK-NEXT: lfs 1, 0(3)
; CHECK-NEXT: lis 3, foo@ha
; CHECK-NEXT: stfd 0, foo@l(3)
diff --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
index 033728500abc..42972fe069df 100644
--- a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
@@ -1383,19 +1383,18 @@ define i32 @test_fptoui_ppc_i32_ppc_fp128(ppc_fp128 %first) #0 {
; PC64-NEXT: nop
; PC64-NEXT: mffs 0
; PC64-NEXT: mtfsb1 31
-; PC64-NEXT: lis 4, -32768
-; PC64-NEXT: bc 12, 8, .LBB31_3
-; PC64-NEXT: b .LBB31_4
-; PC64-NEXT: .LBB31_3: # %entry
-; PC64-NEXT: li 4, 0
-; PC64-NEXT: .LBB31_4: # %entry
+; PC64-NEXT: li 3, 0
; PC64-NEXT: mtfsb0 30
; PC64-NEXT: fadd 1, 2, 1
; PC64-NEXT: mtfsf 1, 0
; PC64-NEXT: fctiwz 0, 1
; PC64-NEXT: stfd 0, 120(1)
-; PC64-NEXT: lwz 3, 124(1)
-; PC64-NEXT: xor 3, 3, 4
+; PC64-NEXT: bc 12, 8, .LBB31_4
+; PC64-NEXT: # %bb.3: # %entry
+; PC64-NEXT: lis 3, -32768
+; PC64-NEXT: .LBB31_4: # %entry
+; PC64-NEXT: lwz 4, 124(1)
+; PC64-NEXT: xor 3, 4, 3
; PC64-NEXT: addi 1, 1, 128
; PC64-NEXT: ld 0, 16(1)
; PC64-NEXT: lwz 12, 8(1)
diff --git a/llvm/test/CodeGen/PowerPC/pr43976.ll b/llvm/test/CodeGen/PowerPC/pr43976.ll
index 5c29a1508315..9cc49efd78eb 100644
--- a/llvm/test/CodeGen/PowerPC/pr43976.ll
+++ b/llvm/test/CodeGen/PowerPC/pr43976.ll
@@ -10,25 +10,25 @@ define dso_local signext i32 @b() local_unnamed_addr #0 {
; CHECK-NEXT: stdu r1, -144(r1)
; CHECK-NEXT: std r0, 160(r1)
; CHECK-NEXT: addis r3, r2, a@toc@ha
-; CHECK-NEXT: li r4, 1
; CHECK-NEXT: lfd f0, a@toc@l(r3)
; CHECK-NEXT: addis r3, r2, .LCPI0_0@toc@ha
-; CHECK-NEXT: rldic r4, r4, 63, 0
; CHECK-NEXT: lfs f1, .LCPI0_0@toc@l(r3)
; CHECK-NEXT: fsub f2, f0, f1
; CHECK-NEXT: fctidz f2, f2
; CHECK-NEXT: stfd f2, 128(r1)
; CHECK-NEXT: fctidz f2, f0
+; CHECK-NEXT: fcmpu cr0, f0, f1
; CHECK-NEXT: stfd f2, 120(r1)
+; CHECK-NEXT: blt cr0, .LBB0_2
+; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: ld r3, 128(r1)
-; CHECK-NEXT: ld r5, 120(r1)
-; CHECK-NEXT: fcmpu cr0, f0, f1
+; CHECK-NEXT: li r4, 1
+; CHECK-NEXT: rldic r4, r4, 63, 0
; CHECK-NEXT: xor r3, r3, r4
-; CHECK-NEXT: bc 12, lt, .LBB0_1
-; CHECK-NEXT: b .LBB0_2
-; CHECK-NEXT: .LBB0_1: # %entry
-; CHECK-NEXT: addi r3, r5, 0
-; CHECK-NEXT: .LBB0_2: # %entry
+; CHECK-NEXT: b .LBB0_3
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: ld r3, 120(r1)
+; CHECK-NEXT: .LBB0_3: # %entry
; CHECK-NEXT: std r3, 112(r1)
; CHECK-NEXT: addis r3, r2, .LCPI0_1@toc@ha
; CHECK-NEXT: lfd f0, 112(r1)
diff --git a/llvm/test/CodeGen/PowerPC/pr49509.ll b/llvm/test/CodeGen/PowerPC/pr49509.ll
index 7b6248c60ab4..48fe65e48e1f 100644
--- a/llvm/test/CodeGen/PowerPC/pr49509.ll
+++ b/llvm/test/CodeGen/PowerPC/pr49509.ll
@@ -23,32 +23,23 @@ define void @test() {
; CHECK-NEXT: lbz 3, 0(3)
; CHECK-NEXT: and 5, 5, 6
; CHECK-NEXT: and 4, 4, 7
-; CHECK-NEXT: and 4, 4, 5
+; CHECK-NEXT: and 5, 4, 5
; CHECK-NEXT: cmpwi 3, 0
-; CHECK-NEXT: lis 3, 256
-; CHECK-NEXT: lis 7, 512
-; CHECK-NEXT: bc 12, 2, .LBB0_4
-; CHECK-NEXT: b .LBB0_5
-; CHECK-NEXT: .LBB0_4: # %bb66
; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: cmpwi 1, 5, -1
+; CHECK-NEXT: li 4, 0
+; CHECK-NEXT: bc 12, 2, .LBB0_5
+; CHECK-NEXT: # %bb.4: # %bb66
+; CHECK-NEXT: lis 4, 256
; CHECK-NEXT: .LBB0_5: # %bb66
-; CHECK-NEXT: cmpwi 1, 4, -1
-; CHECK-NEXT: cmpwi 5, 4, -1
-; CHECK-NEXT: li 6, 0
-; CHECK-NEXT: bc 12, 6, .LBB0_6
-; CHECK-NEXT: b .LBB0_7
-; CHECK-NEXT: .LBB0_6: # %bb66
-; CHECK-NEXT: addi 3, 7, 0
+; CHECK-NEXT: cmpwi 5, 5, -1
+; CHECK-NEXT: lis 5, 512
+; CHECK-NEXT: beq 5, .LBB0_7
+; CHECK-NEXT: # %bb.6: # %bb66
+; CHECK-NEXT: mr 5, 4
; CHECK-NEXT: .LBB0_7: # %bb66
-; CHECK-NEXT: cror 20, 22, 2
-; CHECK-NEXT: stw 3, 0(3)
-; CHECK-NEXT: bc 12, 20, .LBB0_9
-; CHECK-NEXT: # %bb.8: # %bb66
-; CHECK-NEXT: ori 3, 6, 0
-; CHECK-NEXT: b .LBB0_10
-; CHECK-NEXT: .LBB0_9: # %bb66
-; CHECK-NEXT: li 3, 0
-; CHECK-NEXT: .LBB0_10: # %bb66
+; CHECK-NEXT: cror 20, 6, 2
+; CHECK-NEXT: stw 5, 0(3)
; CHECK-NEXT: stw 3, 0(3)
; CHECK-NEXT: blr
bb:
diff --git a/llvm/test/CodeGen/PowerPC/save-crbp-ppc32svr4.ll b/llvm/test/CodeGen/PowerPC/save-crbp-ppc32svr4.ll
index 514f96b22035..9e29c6fc9821 100644
--- a/llvm/test/CodeGen/PowerPC/save-crbp-ppc32svr4.ll
+++ b/llvm/test/CodeGen/PowerPC/save-crbp-ppc32svr4.ll
@@ -13,7 +13,7 @@
; CHECK: addic 29, 0, 20
; Save CR through R12 using R29 as the stack pointer (aligned base pointer).
; CHECK: mfcr 12
-; CHECK: stw 12, -24(29)
+; CHECK: stw 12, -28(29)
target datalayout = "E-m:e-p:32:32-i64:64-n32"
target triple = "powerpc-unknown-freebsd"
diff --git a/llvm/test/CodeGen/PowerPC/select-cc-no-isel.ll b/llvm/test/CodeGen/PowerPC/select-cc-no-isel.ll
index 345f3804c0c4..1e9dd0caf0ad 100644
--- a/llvm/test/CodeGen/PowerPC/select-cc-no-isel.ll
+++ b/llvm/test/CodeGen/PowerPC/select-cc-no-isel.ll
@@ -7,6 +7,7 @@
define signext i32 @foo(ptr nocapture noundef %dummy) #0 {
; CHECK-LABEL: name: foo
; CHECK: bb.0.entry:
+ ; CHECK-NEXT: successors: %bb.1, %bb.2
; CHECK-NEXT: liveins: $x3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:g8rc_and_g8rc_nox0 = COPY $x3
@@ -14,14 +15,20 @@ define signext i32 @foo(ptr nocapture noundef %dummy) #0 {
; CHECK-NEXT: [[ADDI:%[0-9]+]]:gprc_and_gprc_nor0 = nsw ADDI [[LWZ]], 1
; CHECK-NEXT: [[CMPWI:%[0-9]+]]:crrc = CMPWI [[LWZ]], 750
; CHECK-NEXT: [[LI:%[0-9]+]]:gprc_and_gprc_nor0 = LI 1
- ; CHECK-NEXT: [[ISEL:%[0-9]+]]:gprc = ISEL [[ADDI]], [[LI]], [[CMPWI]].sub_lt
- ; CHECK-NEXT: STW killed [[ISEL]], 0, [[COPY]] :: (store (s32) into %ir.dummy)
+ ; CHECK-NEXT: BCC 12, [[CMPWI]], %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.entry:
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.entry:
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gprc = PHI [[LI]], %bb.1, [[ADDI]], %bb.0
+ ; CHECK-NEXT: STW killed [[PHI]], 0, [[COPY]] :: (store (s32) into %ir.dummy)
; CHECK-NEXT: [[LI8_:%[0-9]+]]:g8rc = LI8 0
; CHECK-NEXT: $x3 = COPY [[LI8_]]
; CHECK-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
;
; CHECK-32-LABEL: name: foo
; CHECK-32: bb.0.entry:
+ ; CHECK-32-NEXT: successors: %bb.1, %bb.2
; CHECK-32-NEXT: liveins: $r3
; CHECK-32-NEXT: {{ $}}
; CHECK-32-NEXT: [[COPY:%[0-9]+]]:gprc_and_gprc_nor0 = COPY $r3
@@ -29,8 +36,13 @@ define signext i32 @foo(ptr nocapture noundef %dummy) #0 {
; CHECK-32-NEXT: [[ADDI:%[0-9]+]]:gprc_and_gprc_nor0 = nsw ADDI [[LWZ]], 1
; CHECK-32-NEXT: [[CMPWI:%[0-9]+]]:crrc = CMPWI [[LWZ]], 750
; CHECK-32-NEXT: [[LI:%[0-9]+]]:gprc_and_gprc_nor0 = LI 1
- ; CHECK-32-NEXT: [[ISEL:%[0-9]+]]:gprc = ISEL [[ADDI]], [[LI]], [[CMPWI]].sub_lt
- ; CHECK-32-NEXT: STW killed [[ISEL]], 0, [[COPY]] :: (store (s32) into %ir.dummy)
+ ; CHECK-32-NEXT: BCC 12, [[CMPWI]], %bb.2
+ ; CHECK-32-NEXT: {{ $}}
+ ; CHECK-32-NEXT: bb.1.entry:
+ ; CHECK-32-NEXT: {{ $}}
+ ; CHECK-32-NEXT: bb.2.entry:
+ ; CHECK-32-NEXT: [[PHI:%[0-9]+]]:gprc = PHI [[LI]], %bb.1, [[ADDI]], %bb.0
+ ; CHECK-32-NEXT: STW killed [[PHI]], 0, [[COPY]] :: (store (s32) into %ir.dummy)
; CHECK-32-NEXT: [[LI1:%[0-9]+]]:gprc = LI 0
; CHECK-32-NEXT: $r3 = COPY [[LI1]]
; CHECK-32-NEXT: BLR implicit $lr, implicit $rm, implicit $r3
diff --git a/llvm/test/CodeGen/PowerPC/select.ll b/llvm/test/CodeGen/PowerPC/select.ll
index 49d55c7df524..289f83c475ff 100644
--- a/llvm/test/CodeGen/PowerPC/select.ll
+++ b/llvm/test/CodeGen/PowerPC/select.ll
@@ -17,13 +17,11 @@ define i64 @f0(i64 %x) {
;
; CHECK-32-LABEL: f0:
; CHECK-32: # %bb.0:
-; CHECK-32-NEXT: li r4, 125
-; CHECK-32-NEXT: li r5, -3
; CHECK-32-NEXT: cmpwi r3, 0
-; CHECK-32-NEXT: bc 12, lt, .LBB0_1
-; CHECK-32-NEXT: b .LBB0_2
-; CHECK-32-NEXT: .LBB0_1:
-; CHECK-32-NEXT: addi r4, r5, 0
+; CHECK-32-NEXT: li r4, -3
+; CHECK-32-NEXT: blt cr0, .LBB0_2
+; CHECK-32-NEXT: # %bb.1:
+; CHECK-32-NEXT: li r4, 125
; CHECK-32-NEXT: .LBB0_2:
; CHECK-32-NEXT: srawi r3, r3, 31
; CHECK-32-NEXT: blr
@@ -43,13 +41,11 @@ define i64 @f1(i64 %x) {
;
; CHECK-32-LABEL: f1:
; CHECK-32: # %bb.0:
-; CHECK-32-NEXT: li r4, 512
; CHECK-32-NEXT: cmpwi r3, 0
-; CHECK-32-NEXT: li r3, 64
-; CHECK-32-NEXT: bc 12, lt, .LBB1_1
-; CHECK-32-NEXT: b .LBB1_2
-; CHECK-32-NEXT: .LBB1_1:
-; CHECK-32-NEXT: addi r4, r3, 0
+; CHECK-32-NEXT: li r4, 64
+; CHECK-32-NEXT: blt cr0, .LBB1_2
+; CHECK-32-NEXT: # %bb.1:
+; CHECK-32-NEXT: li r4, 512
; CHECK-32-NEXT: .LBB1_2:
; CHECK-32-NEXT: li r3, 0
; CHECK-32-NEXT: blr
@@ -69,14 +65,11 @@ define i64 @f2(i64 %x) {
; CHECK-32-LABEL: f2:
; CHECK-32: # %bb.0:
; CHECK-32-NEXT: or. r3, r4, r3
-; CHECK-32-NEXT: li r3, 1024
+; CHECK-32-NEXT: li r4, 0
; CHECK-32-NEXT: bc 12, eq, .LBB2_2
; CHECK-32-NEXT: # %bb.1:
-; CHECK-32-NEXT: ori r4, r3, 0
-; CHECK-32-NEXT: b .LBB2_3
+; CHECK-32-NEXT: li r4, 1024
; CHECK-32-NEXT: .LBB2_2:
-; CHECK-32-NEXT: li r4, 0
-; CHECK-32-NEXT: .LBB2_3:
; CHECK-32-NEXT: li r3, 0
; CHECK-32-NEXT: blr
%c = icmp eq i64 %x, 0
@@ -93,15 +86,17 @@ define i64 @f3(i64 %x, i64 %y) {
;
; CHECK-32-LABEL: f3:
; CHECK-32: # %bb.0:
-; CHECK-32-NEXT: or. r3, r4, r3
-; CHECK-32-NEXT: bc 12, eq, .LBB3_2
+; CHECK-32-NEXT: mr r7, r4
+; CHECK-32-NEXT: or. r3, r7, r3
+; CHECK-32-NEXT: li r4, 0
+; CHECK-32-NEXT: li r3, 0
+; CHECK-32-NEXT: beq cr0, .LBB3_2
; CHECK-32-NEXT: # %bb.1:
-; CHECK-32-NEXT: ori r3, r5, 0
-; CHECK-32-NEXT: ori r4, r6, 0
-; CHECK-32-NEXT: blr
+; CHECK-32-NEXT: mr r3, r5
; CHECK-32-NEXT: .LBB3_2:
-; CHECK-32-NEXT: li r3, 0
-; CHECK-32-NEXT: li r4, 0
+; CHECK-32-NEXT: beqlr cr0
+; CHECK-32-NEXT: # %bb.3:
+; CHECK-32-NEXT: mr r4, r6
; CHECK-32-NEXT: blr
%c = icmp eq i64 %x, 0
%r = select i1 %c, i64 0, i64 %y
@@ -140,14 +135,18 @@ define i64 @f4_sge_0(i64 %x) {
;
; CHECK-32-LABEL: f4_sge_0:
; CHECK-32: # %bb.0:
-; CHECK-32-NEXT: subfic r5, r4, 0
-; CHECK-32-NEXT: subfze r6, r3
+; CHECK-32-NEXT: mr r5, r4
+; CHECK-32-NEXT: subfic r4, r4, 0
+; CHECK-32-NEXT: mr r6, r3
; CHECK-32-NEXT: cmpwi r3, -1
-; CHECK-32-NEXT: bc 12, gt, .LBB5_1
-; CHECK-32-NEXT: blr
-; CHECK-32-NEXT: .LBB5_1:
-; CHECK-32-NEXT: addi r3, r6, 0
-; CHECK-32-NEXT: addi r4, r5, 0
+; CHECK-32-NEXT: subfze r3, r3
+; CHECK-32-NEXT: bgt cr0, .LBB5_2
+; CHECK-32-NEXT: # %bb.1:
+; CHECK-32-NEXT: mr r3, r6
+; CHECK-32-NEXT: .LBB5_2:
+; CHECK-32-NEXT: bgtlr cr0
+; CHECK-32-NEXT: # %bb.3:
+; CHECK-32-NEXT: mr r4, r5
; CHECK-32-NEXT: blr
%c = icmp sge i64 %x, 0
%x.neg = sub i64 0, %x
@@ -191,14 +190,17 @@ define i64 @f4_sle_0(i64 %x) {
; CHECK-32-NEXT: cmpwi cr1, r3, 0
; CHECK-32-NEXT: crandc 4*cr5+lt, 4*cr1+lt, eq
; CHECK-32-NEXT: cmpwi cr1, r4, 0
-; CHECK-32-NEXT: subfic r5, r4, 0
; CHECK-32-NEXT: crand 4*cr5+gt, eq, 4*cr1+eq
+; CHECK-32-NEXT: subfic r5, r4, 0
; CHECK-32-NEXT: cror 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
; CHECK-32-NEXT: subfze r6, r3
-; CHECK-32-NEXT: bclr 12, 4*cr5+lt, 0
+; CHECK-32-NEXT: bc 12, 4*cr5+lt, .LBB7_2
; CHECK-32-NEXT: # %bb.1:
-; CHECK-32-NEXT: ori r3, r6, 0
-; CHECK-32-NEXT: ori r4, r5, 0
+; CHECK-32-NEXT: mr r3, r6
+; CHECK-32-NEXT: .LBB7_2:
+; CHECK-32-NEXT: bclr 12, 4*cr5+lt, 0
+; CHECK-32-NEXT: # %bb.3:
+; CHECK-32-NEXT: mr r4, r5
; CHECK-32-NEXT: blr
%c = icmp sle i64 %x, 0
%x.neg = sub i64 0, %x
@@ -238,16 +240,20 @@ define i64 @f5(i64 %x, i64 %y) {
;
; CHECK-32-LABEL: f5:
; CHECK-32: # %bb.0:
-; CHECK-32-NEXT: li r7, 0
; CHECK-32-NEXT: or. r3, r4, r3
-; CHECK-32-NEXT: bc 12, eq, .LBB9_2
+; CHECK-32-NEXT: mr r3, r5
+; CHECK-32-NEXT: bne cr0, .LBB9_3
; CHECK-32-NEXT: # %bb.1:
-; CHECK-32-NEXT: ori r3, r7, 0
-; CHECK-32-NEXT: ori r4, r7, 0
-; CHECK-32-NEXT: blr
+; CHECK-32-NEXT: bne cr0, .LBB9_4
; CHECK-32-NEXT: .LBB9_2:
-; CHECK-32-NEXT: addi r3, r5, 0
-; CHECK-32-NEXT: addi r4, r6, 0
+; CHECK-32-NEXT: mr r4, r6
+; CHECK-32-NEXT: blr
+; CHECK-32-NEXT: .LBB9_3:
+; CHECK-32-NEXT: li r3, 0
+; CHECK-32-NEXT: beq cr0, .LBB9_2
+; CHECK-32-NEXT: .LBB9_4:
+; CHECK-32-NEXT: li r6, 0
+; CHECK-32-NEXT: mr r4, r6
; CHECK-32-NEXT: blr
%c = icmp eq i64 %x, 0
%r = select i1 %c, i64 %y, i64 0
@@ -264,14 +270,11 @@ define i32 @f5_i32(i32 %x, i32 %y) {
;
; CHECK-32-LABEL: f5_i32:
; CHECK-32: # %bb.0:
-; CHECK-32-NEXT: li r5, 0
; CHECK-32-NEXT: cmplwi r3, 0
-; CHECK-32-NEXT: bc 12, eq, .LBB10_2
+; CHECK-32-NEXT: mr r3, r4
+; CHECK-32-NEXT: beqlr cr0
; CHECK-32-NEXT: # %bb.1:
-; CHECK-32-NEXT: ori r3, r5, 0
-; CHECK-32-NEXT: blr
-; CHECK-32-NEXT: .LBB10_2:
-; CHECK-32-NEXT: addi r3, r4, 0
+; CHECK-32-NEXT: li r3, 0
; CHECK-32-NEXT: blr
%c = icmp eq i32 %x, 0
%r = select i1 %c, i32 %y, i32 0
diff --git a/llvm/test/CodeGen/PowerPC/select_const.ll b/llvm/test/CodeGen/PowerPC/select_const.ll
index 606cfe228878..ca4be83cc16a 100644
--- a/llvm/test/CodeGen/PowerPC/select_const.ll
+++ b/llvm/test/CodeGen/PowerPC/select_const.ll
@@ -198,12 +198,10 @@ define i32 @select_C1_C2(i1 %cond) {
; NO_ISEL-LABEL: select_C1_C2:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 421
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 42
-; NO_ISEL-NEXT: li 4, 421
-; NO_ISEL-NEXT: bc 12, 1, .LBB18_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB18_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i32 421, i32 42
ret i32 %sel
@@ -221,12 +219,10 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
; NO_ISEL-LABEL: select_C1_C2_zeroext:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 421
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 42
-; NO_ISEL-NEXT: li 4, 421
-; NO_ISEL-NEXT: bc 12, 1, .LBB19_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB19_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i32 421, i32 42
ret i32 %sel
@@ -244,12 +240,10 @@ define i32 @select_C1_C2_signext(i1 signext %cond) {
; NO_ISEL-LABEL: select_C1_C2_signext:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 421
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 42
-; NO_ISEL-NEXT: li 4, 421
-; NO_ISEL-NEXT: bc 12, 1, .LBB20_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB20_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i32 421, i32 42
ret i32 %sel
@@ -269,12 +263,10 @@ define i8 @sel_constants_add_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_add_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 1
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 28
-; NO_ISEL-NEXT: li 4, 1
-; NO_ISEL-NEXT: bc 12, 1, .LBB21_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB21_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = add i8 %sel, 5
@@ -293,12 +285,10 @@ define i8 @sel_constants_sub_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_sub_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, -9
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 18
-; NO_ISEL-NEXT: li 4, -9
-; NO_ISEL-NEXT: bc 12, 1, .LBB22_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB22_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = sub i8 %sel, 5
@@ -317,12 +307,10 @@ define i8 @sel_constants_sub_constant_sel_constants(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_sub_constant_sel_constants:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 9
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 2
-; NO_ISEL-NEXT: li 4, 9
-; NO_ISEL-NEXT: bc 12, 1, .LBB23_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB23_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 3
%bo = sub i8 5, %sel
@@ -341,12 +329,10 @@ define i8 @sel_constants_mul_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_mul_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, -20
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 115
-; NO_ISEL-NEXT: li 4, -20
-; NO_ISEL-NEXT: bc 12, 1, .LBB24_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB24_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = mul i8 %sel, 5
@@ -364,11 +350,10 @@ define i8 @sel_constants_sdiv_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_sdiv_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
-; NO_ISEL-NEXT: li 3, 4
-; NO_ISEL-NEXT: bc 12, 1, .LBB25_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB25_1:
; NO_ISEL-NEXT: li 3, 0
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
+; NO_ISEL-NEXT: li 3, 4
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = sdiv i8 %sel, 5
@@ -386,11 +371,10 @@ define i8 @sdiv_constant_sel_constants(i1 %cond) {
; NO_ISEL-LABEL: sdiv_constant_sel_constants:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
-; NO_ISEL-NEXT: li 3, 5
-; NO_ISEL-NEXT: bc 12, 1, .LBB26_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB26_1:
; NO_ISEL-NEXT: li 3, 0
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
+; NO_ISEL-NEXT: li 3, 5
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 121, i8 23
%bo = sdiv i8 120, %sel
@@ -409,12 +393,10 @@ define i8 @sel_constants_udiv_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_udiv_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 50
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 4
-; NO_ISEL-NEXT: li 4, 50
-; NO_ISEL-NEXT: bc 12, 1, .LBB27_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB27_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = udiv i8 %sel, 5
@@ -432,11 +414,10 @@ define i8 @udiv_constant_sel_constants(i1 %cond) {
; NO_ISEL-LABEL: udiv_constant_sel_constants:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
-; NO_ISEL-NEXT: li 3, 5
-; NO_ISEL-NEXT: bc 12, 1, .LBB28_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB28_1:
; NO_ISEL-NEXT: li 3, 0
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
+; NO_ISEL-NEXT: li 3, 5
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = udiv i8 120, %sel
@@ -455,12 +436,10 @@ define i8 @sel_constants_srem_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_srem_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, -4
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 3
-; NO_ISEL-NEXT: li 4, -4
-; NO_ISEL-NEXT: bc 12, 1, .LBB29_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB29_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = srem i8 %sel, 5
@@ -479,12 +458,10 @@ define i8 @srem_constant_sel_constants(i1 %cond) {
; NO_ISEL-LABEL: srem_constant_sel_constants:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 120
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 5
-; NO_ISEL-NEXT: li 4, 120
-; NO_ISEL-NEXT: bc 12, 1, .LBB30_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB30_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 121, i8 23
%bo = srem i8 120, %sel
@@ -514,12 +491,10 @@ define i8 @urem_constant_sel_constants(i1 %cond) {
; NO_ISEL-LABEL: urem_constant_sel_constants:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 120
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 5
-; NO_ISEL-NEXT: li 4, 120
-; NO_ISEL-NEXT: bc 12, 1, .LBB32_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB32_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = urem i8 120, %sel
@@ -549,12 +524,10 @@ define i8 @sel_constants_or_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_or_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, -3
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 23
-; NO_ISEL-NEXT: li 4, -3
-; NO_ISEL-NEXT: bc 12, 1, .LBB34_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB34_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = or i8 %sel, 5
@@ -573,12 +546,10 @@ define i8 @sel_constants_xor_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_xor_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, -7
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 18
-; NO_ISEL-NEXT: li 4, -7
-; NO_ISEL-NEXT: bc 12, 1, .LBB35_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB35_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = xor i8 %sel, 5
@@ -597,12 +568,10 @@ define i8 @sel_constants_shl_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_shl_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, -128
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, -32
-; NO_ISEL-NEXT: li 4, -128
-; NO_ISEL-NEXT: bc 12, 1, .LBB36_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB36_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = shl i8 %sel, 5
@@ -634,12 +603,10 @@ define i8 @sel_constants_lshr_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_lshr_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 7
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: li 3, 0
-; NO_ISEL-NEXT: li 4, 7
-; NO_ISEL-NEXT: bc 12, 1, .LBB38_1
-; NO_ISEL-NEXT: blr
-; NO_ISEL-NEXT: .LBB38_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
%bo = lshr i8 %sel, 5
@@ -699,15 +666,15 @@ define double @sel_constants_fadd_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_fadd_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: bc 12, 1, .LBB42_2
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: addis 3, 2, .LCPI42_0@toc@ha
-; NO_ISEL-NEXT: addis 4, 2, .LCPI42_1@toc@ha
; NO_ISEL-NEXT: addi 3, 3, .LCPI42_0@toc@l
-; NO_ISEL-NEXT: addi 4, 4, .LCPI42_1@toc@l
-; NO_ISEL-NEXT: bc 12, 1, .LBB42_1
-; NO_ISEL-NEXT: b .LBB42_2
-; NO_ISEL-NEXT: .LBB42_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: lfd 1, 0(3)
+; NO_ISEL-NEXT: blr
; NO_ISEL-NEXT: .LBB42_2:
+; NO_ISEL-NEXT: addis 3, 2, .LCPI42_1@toc@ha
+; NO_ISEL-NEXT: addi 3, 3, .LCPI42_1@toc@l
; NO_ISEL-NEXT: lfd 1, 0(3)
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, double -4.0, double 23.3
@@ -730,15 +697,15 @@ define double @sel_constants_fsub_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_fsub_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: bc 12, 1, .LBB43_2
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: addis 3, 2, .LCPI43_0@toc@ha
-; NO_ISEL-NEXT: addis 4, 2, .LCPI43_1@toc@ha
; NO_ISEL-NEXT: addi 3, 3, .LCPI43_0@toc@l
-; NO_ISEL-NEXT: addi 4, 4, .LCPI43_1@toc@l
-; NO_ISEL-NEXT: bc 12, 1, .LBB43_1
-; NO_ISEL-NEXT: b .LBB43_2
-; NO_ISEL-NEXT: .LBB43_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: lfd 1, 0(3)
+; NO_ISEL-NEXT: blr
; NO_ISEL-NEXT: .LBB43_2:
+; NO_ISEL-NEXT: addis 3, 2, .LCPI43_1@toc@ha
+; NO_ISEL-NEXT: addi 3, 3, .LCPI43_1@toc@l
; NO_ISEL-NEXT: lfd 1, 0(3)
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, double -4.0, double 23.3
@@ -761,15 +728,15 @@ define double @fsub_constant_sel_constants(i1 %cond) {
; NO_ISEL-LABEL: fsub_constant_sel_constants:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: bc 12, 1, .LBB44_2
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: addis 3, 2, .LCPI44_0@toc@ha
-; NO_ISEL-NEXT: addis 4, 2, .LCPI44_1@toc@ha
; NO_ISEL-NEXT: addi 3, 3, .LCPI44_0@toc@l
-; NO_ISEL-NEXT: addi 4, 4, .LCPI44_1@toc@l
-; NO_ISEL-NEXT: bc 12, 1, .LBB44_1
-; NO_ISEL-NEXT: b .LBB44_2
-; NO_ISEL-NEXT: .LBB44_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: lfd 1, 0(3)
+; NO_ISEL-NEXT: blr
; NO_ISEL-NEXT: .LBB44_2:
+; NO_ISEL-NEXT: addis 3, 2, .LCPI44_1@toc@ha
+; NO_ISEL-NEXT: addi 3, 3, .LCPI44_1@toc@l
; NO_ISEL-NEXT: lfd 1, 0(3)
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, double -4.0, double 23.3
@@ -792,15 +759,15 @@ define double @sel_constants_fmul_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_fmul_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: bc 12, 1, .LBB45_2
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: addis 3, 2, .LCPI45_0@toc@ha
-; NO_ISEL-NEXT: addis 4, 2, .LCPI45_1@toc@ha
; NO_ISEL-NEXT: addi 3, 3, .LCPI45_0@toc@l
-; NO_ISEL-NEXT: addi 4, 4, .LCPI45_1@toc@l
-; NO_ISEL-NEXT: bc 12, 1, .LBB45_1
-; NO_ISEL-NEXT: b .LBB45_2
-; NO_ISEL-NEXT: .LBB45_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: lfd 1, 0(3)
+; NO_ISEL-NEXT: blr
; NO_ISEL-NEXT: .LBB45_2:
+; NO_ISEL-NEXT: addis 3, 2, .LCPI45_1@toc@ha
+; NO_ISEL-NEXT: addi 3, 3, .LCPI45_1@toc@l
; NO_ISEL-NEXT: lfd 1, 0(3)
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, double -4.0, double 23.3
@@ -823,15 +790,15 @@ define double @sel_constants_fdiv_constant(i1 %cond) {
; NO_ISEL-LABEL: sel_constants_fdiv_constant:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: bc 12, 1, .LBB46_2
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: addis 3, 2, .LCPI46_0@toc@ha
-; NO_ISEL-NEXT: addis 4, 2, .LCPI46_1@toc@ha
; NO_ISEL-NEXT: addi 3, 3, .LCPI46_0@toc@l
-; NO_ISEL-NEXT: addi 4, 4, .LCPI46_1@toc@l
-; NO_ISEL-NEXT: bc 12, 1, .LBB46_1
-; NO_ISEL-NEXT: b .LBB46_2
-; NO_ISEL-NEXT: .LBB46_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: lfd 1, 0(3)
+; NO_ISEL-NEXT: blr
; NO_ISEL-NEXT: .LBB46_2:
+; NO_ISEL-NEXT: addis 3, 2, .LCPI46_1@toc@ha
+; NO_ISEL-NEXT: addi 3, 3, .LCPI46_1@toc@l
; NO_ISEL-NEXT: lfd 1, 0(3)
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, double -4.0, double 23.3
@@ -854,15 +821,15 @@ define double @fdiv_constant_sel_constants(i1 %cond) {
; NO_ISEL-LABEL: fdiv_constant_sel_constants:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: bc 12, 1, .LBB47_2
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: addis 3, 2, .LCPI47_0@toc@ha
-; NO_ISEL-NEXT: addis 4, 2, .LCPI47_1@toc@ha
; NO_ISEL-NEXT: addi 3, 3, .LCPI47_0@toc@l
-; NO_ISEL-NEXT: addi 4, 4, .LCPI47_1@toc@l
-; NO_ISEL-NEXT: bc 12, 1, .LBB47_1
-; NO_ISEL-NEXT: b .LBB47_2
-; NO_ISEL-NEXT: .LBB47_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: lfd 1, 0(3)
+; NO_ISEL-NEXT: blr
; NO_ISEL-NEXT: .LBB47_2:
+; NO_ISEL-NEXT: addis 3, 2, .LCPI47_1@toc@ha
+; NO_ISEL-NEXT: addi 3, 3, .LCPI47_1@toc@l
; NO_ISEL-NEXT: lfd 1, 0(3)
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, double -4.0, double 23.3
@@ -905,15 +872,15 @@ define double @frem_constant_sel_constants(i1 %cond) {
; NO_ISEL-LABEL: frem_constant_sel_constants:
; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: bc 12, 1, .LBB49_2
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: addis 3, 2, .LCPI49_0@toc@ha
-; NO_ISEL-NEXT: addis 4, 2, .LCPI49_1@toc@ha
; NO_ISEL-NEXT: addi 3, 3, .LCPI49_0@toc@l
-; NO_ISEL-NEXT: addi 4, 4, .LCPI49_1@toc@l
-; NO_ISEL-NEXT: bc 12, 1, .LBB49_1
-; NO_ISEL-NEXT: b .LBB49_2
-; NO_ISEL-NEXT: .LBB49_1:
-; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: lfd 1, 0(3)
+; NO_ISEL-NEXT: blr
; NO_ISEL-NEXT: .LBB49_2:
+; NO_ISEL-NEXT: addis 3, 2, .LCPI49_1@toc@ha
+; NO_ISEL-NEXT: addi 3, 3, .LCPI49_1@toc@l
; NO_ISEL-NEXT: lfd 1, 0(3)
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, double -4.0, double 23.3
diff --git a/llvm/test/CodeGen/PowerPC/smulfixsat.ll b/llvm/test/CodeGen/PowerPC/smulfixsat.ll
index 9e371d499da3..b65c99d68090 100644
--- a/llvm/test/CodeGen/PowerPC/smulfixsat.ll
+++ b/llvm/test/CodeGen/PowerPC/smulfixsat.ll
@@ -10,12 +10,11 @@ define i32 @func1(i32 %x, i32 %y) nounwind {
; CHECK-NEXT: mullw 3, 3, 4
; CHECK-NEXT: srawi 4, 3, 31
; CHECK-NEXT: cmplw 5, 4
-; CHECK-NEXT: srawi 4, 5, 31
-; CHECK-NEXT: xori 4, 4, 65535
-; CHECK-NEXT: xoris 4, 4, 32767
-; CHECK-NEXT: bclr 12, 2, 0
+; CHECK-NEXT: beqlr 0
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: ori 3, 4, 0
+; CHECK-NEXT: srawi 3, 5, 31
+; CHECK-NEXT: xori 3, 3, 65535
+; CHECK-NEXT: xoris 3, 3, 32767
; CHECK-NEXT: blr
%tmp = call i32 @llvm.smul.fix.sat.i32(i32 %x, i32 %y, i32 0)
ret i32 %tmp
@@ -24,23 +23,22 @@ define i32 @func1(i32 %x, i32 %y) nounwind {
define i32 @func2(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: func2:
; CHECK: # %bb.0:
-; CHECK-NEXT: mulhw. 6, 3, 4
-; CHECK-NEXT: lis 5, 32767
+; CHECK-NEXT: mulhw. 5, 3, 4
+; CHECK-NEXT: bgt 0, .LBB1_2
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mullw 3, 3, 4
-; CHECK-NEXT: rotlwi 3, 3, 31
-; CHECK-NEXT: ori 4, 5, 65535
-; CHECK-NEXT: rlwimi 3, 6, 31, 0, 0
-; CHECK-NEXT: bc 12, 1, .LBB1_1
-; CHECK-NEXT: b .LBB1_2
-; CHECK-NEXT: .LBB1_1:
-; CHECK-NEXT: addi 3, 4, 0
+; CHECK-NEXT: rotlwi 4, 3, 31
+; CHECK-NEXT: rlwimi 4, 5, 31, 0, 0
+; CHECK-NEXT: b .LBB1_3
; CHECK-NEXT: .LBB1_2:
-; CHECK-NEXT: cmpwi 6, -1
-; CHECK-NEXT: lis 4, -32768
-; CHECK-NEXT: bc 12, 0, .LBB1_3
-; CHECK-NEXT: blr
+; CHECK-NEXT: lis 3, 32767
+; CHECK-NEXT: ori 4, 3, 65535
; CHECK-NEXT: .LBB1_3:
-; CHECK-NEXT: addi 3, 4, 0
+; CHECK-NEXT: cmpwi 5, -1
+; CHECK-NEXT: lis 3, -32768
+; CHECK-NEXT: bltlr 0
+; CHECK-NEXT: # %bb.4:
+; CHECK-NEXT: mr 3, 4
; CHECK-NEXT: blr
%tmp = call i32 @llvm.smul.fix.sat.i32(i32 %x, i32 %y, i32 1)
ret i32 %tmp
diff --git a/llvm/test/CodeGen/PowerPC/spe.ll b/llvm/test/CodeGen/PowerPC/spe.ll
index 4bfc413a5a2a..b9df47d6d645 100644
--- a/llvm/test/CodeGen/PowerPC/spe.ll
+++ b/llvm/test/CodeGen/PowerPC/spe.ll
@@ -252,15 +252,13 @@ define i1 @test_fcmpuno(float %a, float %b) #0 {
; CHECK-LABEL: test_fcmpuno:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: efscmpeq 0, 3, 3
-; CHECK-NEXT: efscmpeq 1, 4, 4
-; CHECK-NEXT: li 5, 1
-; CHECK-NEXT: crand 20, 5, 1
-; CHECK-NEXT: bc 12, 20, .LBB12_2
+; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: bc 4, 1, .LBB12_2
; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: ori 3, 5, 0
-; CHECK-NEXT: blr
+; CHECK-NEXT: efscmpeq 0, 4, 4
+; CHECK-NEXT: bclr 12, 1, 0
; CHECK-NEXT: .LBB12_2: # %entry
-; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: li 3, 1
; CHECK-NEXT: blr
entry:
%r = fcmp uno float %a, %b
@@ -270,16 +268,15 @@ define i1 @test_fcmpuno(float %a, float %b) #0 {
define i1 @test_fcmpord(float %a, float %b) #0 {
; CHECK-LABEL: test_fcmpord:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: efscmpeq 0, 4, 4
-; CHECK-NEXT: efscmpeq 1, 3, 3
-; CHECK-NEXT: li 5, 1
-; CHECK-NEXT: crnand 20, 5, 1
-; CHECK-NEXT: bc 12, 20, .LBB13_2
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: ori 3, 5, 0
-; CHECK-NEXT: blr
-; CHECK-NEXT: .LBB13_2: # %entry
; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: bclr 4, 1, 0
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: efscmpeq 0, 5, 5
+; CHECK-NEXT: bclr 4, 1, 0
+; CHECK-NEXT: # %bb.2: # %entry
+; CHECK-NEXT: li 3, 1
; CHECK-NEXT: blr
entry:
%r = fcmp ord float %a, %b
@@ -289,16 +286,15 @@ define i1 @test_fcmpord(float %a, float %b) #0 {
define i1 @test_fcmpueq(float %a, float %b) #0 {
; CHECK-LABEL: test_fcmpueq:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: efscmpgt 0, 3, 4
-; CHECK-NEXT: efscmplt 1, 3, 4
-; CHECK-NEXT: li 5, 1
-; CHECK-NEXT: cror 20, 5, 1
-; CHECK-NEXT: bc 12, 20, .LBB14_2
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: ori 3, 5, 0
-; CHECK-NEXT: blr
-; CHECK-NEXT: .LBB14_2: # %entry
; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: bclr 12, 1, 0
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: efscmplt 0, 5, 4
+; CHECK-NEXT: bclr 12, 1, 0
+; CHECK-NEXT: # %bb.2: # %entry
+; CHECK-NEXT: li 3, 1
; CHECK-NEXT: blr
entry:
%r = fcmp ueq float %a, %b
@@ -308,16 +304,15 @@ define i1 @test_fcmpueq(float %a, float %b) #0 {
define i1 @test_fcmpne(float %a, float %b) #0 {
; CHECK-LABEL: test_fcmpne:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: efscmplt 0, 3, 4
-; CHECK-NEXT: efscmpgt 1, 3, 4
-; CHECK-NEXT: li 5, 1
-; CHECK-NEXT: crnor 20, 5, 1
-; CHECK-NEXT: bc 12, 20, .LBB15_2
+; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: bc 12, 1, .LBB15_2
; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: ori 3, 5, 0
-; CHECK-NEXT: blr
+; CHECK-NEXT: efscmpgt 0, 5, 4
+; CHECK-NEXT: bclr 4, 1, 0
; CHECK-NEXT: .LBB15_2: # %entry
-; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: li 3, 1
; CHECK-NEXT: blr
entry:
%r = fcmp one float %a, %b
@@ -389,18 +384,18 @@ ret:
define i1 @test_fcmpult(float %a, float %b) #0 {
; CHECK-LABEL: test_fcmpult:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: efscmpeq 0, 3, 3
-; CHECK-NEXT: efscmpeq 1, 4, 4
-; CHECK-NEXT: crnand 20, 5, 1
-; CHECK-NEXT: efscmplt 0, 3, 4
-; CHECK-NEXT: li 5, 1
-; CHECK-NEXT: crnor 20, 1, 20
-; CHECK-NEXT: bc 12, 20, .LBB18_2
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: ori 3, 5, 0
-; CHECK-NEXT: blr
-; CHECK-NEXT: .LBB18_2: # %entry
; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: bc 4, 1, .LBB18_3
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: efscmpeq 0, 4, 4
+; CHECK-NEXT: bc 4, 1, .LBB18_3
+; CHECK-NEXT: # %bb.2: # %entry
+; CHECK-NEXT: efscmplt 0, 5, 4
+; CHECK-NEXT: bclr 4, 1, 0
+; CHECK-NEXT: .LBB18_3: # %entry
+; CHECK-NEXT: li 3, 1
; CHECK-NEXT: blr
entry:
%r = fcmp ult float %a, %b
@@ -747,16 +742,14 @@ define i1 @test_dcmpuno(double %a, double %b) #0 {
; SPE: # %bb.0: # %entry
; SPE-NEXT: evmergelo 5, 5, 6
; SPE-NEXT: evmergelo 3, 3, 4
-; SPE-NEXT: li 7, 1
; SPE-NEXT: efdcmpeq 0, 3, 3
-; SPE-NEXT: efdcmpeq 1, 5, 5
-; SPE-NEXT: crand 20, 5, 1
-; SPE-NEXT: bc 12, 20, .LBB35_2
+; SPE-NEXT: li 3, 0
+; SPE-NEXT: bc 4, 1, .LBB35_2
; SPE-NEXT: # %bb.1: # %entry
-; SPE-NEXT: ori 3, 7, 0
-; SPE-NEXT: blr
+; SPE-NEXT: efdcmpeq 0, 5, 5
+; SPE-NEXT: bclr 12, 1, 0
; SPE-NEXT: .LBB35_2: # %entry
-; SPE-NEXT: li 3, 0
+; SPE-NEXT: li 3, 1
; SPE-NEXT: blr
;
; EFPU2-LABEL: test_dcmpuno:
@@ -780,18 +773,16 @@ define i1 @test_dcmpuno(double %a, double %b) #0 {
define i1 @test_dcmpord(double %a, double %b) #0 {
; SPE-LABEL: test_dcmpord:
; SPE: # %bb.0: # %entry
-; SPE-NEXT: evmergelo 3, 3, 4
-; SPE-NEXT: evmergelo 4, 5, 6
-; SPE-NEXT: li 7, 1
-; SPE-NEXT: efdcmpeq 0, 4, 4
-; SPE-NEXT: efdcmpeq 1, 3, 3
-; SPE-NEXT: crnand 20, 5, 1
-; SPE-NEXT: bc 12, 20, .LBB36_2
-; SPE-NEXT: # %bb.1: # %entry
-; SPE-NEXT: ori 3, 7, 0
-; SPE-NEXT: blr
-; SPE-NEXT: .LBB36_2: # %entry
+; SPE-NEXT: evmergelo 4, 3, 4
+; SPE-NEXT: evmergelo 3, 5, 6
+; SPE-NEXT: efdcmpeq 0, 3, 3
; SPE-NEXT: li 3, 0
+; SPE-NEXT: bclr 4, 1, 0
+; SPE-NEXT: # %bb.1: # %entry
+; SPE-NEXT: efdcmpeq 0, 4, 4
+; SPE-NEXT: bclr 4, 1, 0
+; SPE-NEXT: # %bb.2: # %entry
+; SPE-NEXT: li 3, 1
; SPE-NEXT: blr
;
; EFPU2-LABEL: test_dcmpord:
@@ -1173,17 +1164,15 @@ define i1 @test_dcmpne(double %a, double %b) #0 {
; SPE-LABEL: test_dcmpne:
; SPE: # %bb.0: # %entry
; SPE-NEXT: evmergelo 5, 5, 6
-; SPE-NEXT: evmergelo 3, 3, 4
-; SPE-NEXT: li 7, 1
-; SPE-NEXT: efdcmplt 0, 3, 5
-; SPE-NEXT: efdcmpgt 1, 3, 5
-; SPE-NEXT: crnor 20, 5, 1
-; SPE-NEXT: bc 12, 20, .LBB43_2
+; SPE-NEXT: evmergelo 4, 3, 4
+; SPE-NEXT: li 3, 0
+; SPE-NEXT: efdcmplt 0, 4, 5
+; SPE-NEXT: bc 12, 1, .LBB43_2
; SPE-NEXT: # %bb.1: # %entry
-; SPE-NEXT: ori 3, 7, 0
-; SPE-NEXT: blr
+; SPE-NEXT: efdcmpgt 0, 4, 5
+; SPE-NEXT: bclr 4, 1, 0
; SPE-NEXT: .LBB43_2: # %entry
-; SPE-NEXT: li 3, 0
+; SPE-NEXT: li 3, 1
; SPE-NEXT: blr
;
; EFPU2-LABEL: test_dcmpne:
@@ -1208,20 +1197,19 @@ define i1 @test_dcmpne(double %a, double %b) #0 {
; EFPU2-NEXT: mr 5, 29
; EFPU2-NEXT: mr 6, 30
; EFPU2-NEXT: bl __eqdf2
+; EFPU2-NEXT: mr 4, 3
+; EFPU2-NEXT: li 3, 0
+; EFPU2-NEXT: bc 4, 10, .LBB43_3
+; EFPU2-NEXT: # %bb.1: # %entry
+; EFPU2-NEXT: cmpwi 4, 0
+; EFPU2-NEXT: bc 12, 2, .LBB43_3
+; EFPU2-NEXT: # %bb.2: # %entry
+; EFPU2-NEXT: li 3, 1
+; EFPU2-NEXT: .LBB43_3: # %entry
; EFPU2-NEXT: lwz 30, 40(1) # 4-byte Folded Reload
-; EFPU2-NEXT: cmpwi 3, 0
; EFPU2-NEXT: lwz 29, 36(1) # 4-byte Folded Reload
-; EFPU2-NEXT: li 4, 1
; EFPU2-NEXT: lwz 28, 32(1) # 4-byte Folded Reload
-; EFPU2-NEXT: crorc 20, 2, 10
; EFPU2-NEXT: lwz 12, 24(1)
-; EFPU2-NEXT: bc 12, 20, .LBB43_2
-; EFPU2-NEXT: # %bb.1: # %entry
-; EFPU2-NEXT: ori 3, 4, 0
-; EFPU2-NEXT: b .LBB43_3
-; EFPU2-NEXT: .LBB43_2: # %entry
-; EFPU2-NEXT: li 3, 0
-; EFPU2-NEXT: .LBB43_3: # %entry
; EFPU2-NEXT: lwz 27, 28(1) # 4-byte Folded Reload
; EFPU2-NEXT: mtcrf 32, 12 # cr2
; EFPU2-NEXT: lwz 0, 52(1)
@@ -1404,20 +1392,19 @@ ret:
define i1 @test_dcmpge(double %a, double %b) #0 {
; SPE-LABEL: test_dcmpge:
; SPE: # %bb.0: # %entry
-; SPE-NEXT: evmergelo 3, 3, 4
-; SPE-NEXT: evmergelo 4, 5, 6
-; SPE-NEXT: li 7, 1
-; SPE-NEXT: efdcmpeq 0, 4, 4
-; SPE-NEXT: efdcmpeq 1, 3, 3
-; SPE-NEXT: efdcmplt 5, 3, 4
-; SPE-NEXT: crand 20, 5, 1
-; SPE-NEXT: crorc 20, 21, 20
-; SPE-NEXT: bc 12, 20, .LBB47_2
-; SPE-NEXT: # %bb.1: # %entry
-; SPE-NEXT: ori 3, 7, 0
-; SPE-NEXT: blr
-; SPE-NEXT: .LBB47_2: # %entry
+; SPE-NEXT: evmergelo 4, 3, 4
+; SPE-NEXT: evmergelo 5, 5, 6
; SPE-NEXT: li 3, 0
+; SPE-NEXT: efdcmpeq 0, 5, 5
+; SPE-NEXT: bclr 4, 1, 0
+; SPE-NEXT: # %bb.1: # %entry
+; SPE-NEXT: efdcmpeq 0, 4, 4
+; SPE-NEXT: bclr 4, 1, 0
+; SPE-NEXT: # %bb.2: # %entry
+; SPE-NEXT: efdcmplt 0, 4, 5
+; SPE-NEXT: bclr 12, 1, 0
+; SPE-NEXT: # %bb.3: # %entry
+; SPE-NEXT: li 3, 1
; SPE-NEXT: blr
;
; EFPU2-LABEL: test_dcmpge:
@@ -1507,10 +1494,13 @@ define double @test_dselect(double %a, double %b, i1 %c) #0 {
; EFPU2-LABEL: test_dselect:
; EFPU2: # %bb.0: # %entry
; EFPU2-NEXT: andi. 7, 7, 1
-; EFPU2-NEXT: bclr 12, 1, 0
+; EFPU2-NEXT: bc 12, 1, .LBB49_2
; EFPU2-NEXT: # %bb.1: # %entry
-; EFPU2-NEXT: ori 3, 5, 0
-; EFPU2-NEXT: ori 4, 6, 0
+; EFPU2-NEXT: mr 3, 5
+; EFPU2-NEXT: .LBB49_2: # %entry
+; EFPU2-NEXT: bclr 12, 1, 0
+; EFPU2-NEXT: # %bb.3: # %entry
+; EFPU2-NEXT: mr 4, 6
; EFPU2-NEXT: blr
entry:
%r = select i1 %c, double %a, double %b
diff --git a/llvm/test/CodeGen/PowerPC/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/PowerPC/srem-seteq-illegal-types.ll
index 65068d14e160..b0cc89d1828e 100644
--- a/llvm/test/CodeGen/PowerPC/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/PowerPC/srem-seteq-illegal-types.ll
@@ -11,15 +11,13 @@ define i1 @test_srem_odd(i29 %X) nounwind {
; PPC-NEXT: addi 3, 3, 24493
; PPC-NEXT: lis 4, 82
; PPC-NEXT: addis 3, 3, 41
-; PPC-NEXT: ori 4, 4, 48987
; PPC-NEXT: clrlwi 3, 3, 3
+; PPC-NEXT: ori 4, 4, 48987
; PPC-NEXT: cmplw 3, 4
+; PPC-NEXT: li 3, 1
+; PPC-NEXT: bclr 12, 0, 0
+; PPC-NEXT: # %bb.1:
; PPC-NEXT: li 3, 0
-; PPC-NEXT: li 4, 1
-; PPC-NEXT: bc 12, 0, .LBB0_1
-; PPC-NEXT: blr
-; PPC-NEXT: .LBB0_1:
-; PPC-NEXT: addi 3, 4, 0
; PPC-NEXT: blr
;
; PPC64LE-LABEL: test_srem_odd:
@@ -45,21 +43,20 @@ define i1 @test_srem_odd(i29 %X) nounwind {
define i1 @test_srem_even(i4 %X) nounwind {
; PPC-LABEL: test_srem_even:
; PPC: # %bb.0:
-; PPC-NEXT: slwi 5, 3, 28
-; PPC-NEXT: srawi 5, 5, 28
-; PPC-NEXT: mulli 5, 5, 3
-; PPC-NEXT: rlwinm 6, 5, 25, 31, 31
-; PPC-NEXT: srwi 5, 5, 4
-; PPC-NEXT: add 5, 5, 6
-; PPC-NEXT: mulli 5, 5, 6
-; PPC-NEXT: sub 3, 3, 5
+; PPC-NEXT: slwi 4, 3, 28
+; PPC-NEXT: srawi 4, 4, 28
+; PPC-NEXT: mulli 4, 4, 3
+; PPC-NEXT: rlwinm 5, 4, 25, 31, 31
+; PPC-NEXT: srwi 4, 4, 4
+; PPC-NEXT: add 4, 4, 5
+; PPC-NEXT: mulli 4, 4, 6
+; PPC-NEXT: sub 3, 3, 4
; PPC-NEXT: clrlwi 3, 3, 28
-; PPC-NEXT: li 4, 0
; PPC-NEXT: cmpwi 3, 1
; PPC-NEXT: li 3, 1
; PPC-NEXT: bclr 12, 2, 0
; PPC-NEXT: # %bb.1:
-; PPC-NEXT: ori 3, 4, 0
+; PPC-NEXT: li 3, 0
; PPC-NEXT: blr
;
; PPC64LE-LABEL: test_srem_even:
diff --git a/llvm/test/CodeGen/PowerPC/umulfixsat.ll b/llvm/test/CodeGen/PowerPC/umulfixsat.ll
index bc41da99e3db..081c461c7b63 100644
--- a/llvm/test/CodeGen/PowerPC/umulfixsat.ll
+++ b/llvm/test/CodeGen/PowerPC/umulfixsat.ll
@@ -6,12 +6,13 @@ declare i32 @llvm.umul.fix.sat.i32(i32, i32, i32)
define i32 @func1(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: func1:
; CHECK: # %bb.0:
+; CHECK-NEXT: mulhwu. 5, 3, 4
; CHECK-NEXT: li 5, -1
-; CHECK-NEXT: mulhwu. 6, 3, 4
-; CHECK-NEXT: mullw 3, 3, 4
-; CHECK-NEXT: bclr 12, 2, 0
+; CHECK-NEXT: bne 0, .LBB0_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: ori 3, 5, 0
+; CHECK-NEXT: mullw 5, 3, 4
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: blr
%tmp = call i32 @llvm.umul.fix.sat.i32(i32 %x, i32 %y, i32 0)
ret i32 %tmp
@@ -21,15 +22,14 @@ define i32 @func2(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: func2:
; CHECK: # %bb.0:
; CHECK-NEXT: mulhwu 6, 3, 4
-; CHECK-NEXT: li 5, -1
+; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: cmplwi 6, 1
-; CHECK-NEXT: mullw 3, 3, 4
+; CHECK-NEXT: li 3, -1
+; CHECK-NEXT: bgtlr 0
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mullw 3, 5, 4
; CHECK-NEXT: rotlwi 3, 3, 31
; CHECK-NEXT: rlwimi 3, 6, 31, 0, 0
-; CHECK-NEXT: bc 12, 1, .LBB1_1
-; CHECK-NEXT: blr
-; CHECK-NEXT: .LBB1_1:
-; CHECK-NEXT: addi 3, 5, 0
; CHECK-NEXT: blr
%tmp = call i32 @llvm.umul.fix.sat.i32(i32 %x, i32 %y, i32 1)
ret i32 %tmp
diff --git a/llvm/test/CodeGen/PowerPC/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/PowerPC/umulo-128-legalisation-lowering.ll
index 48098e3a277c..120b5383bd5e 100644
--- a/llvm/test/CodeGen/PowerPC/umulo-128-legalisation-lowering.ll
+++ b/llvm/test/CodeGen/PowerPC/umulo-128-legalisation-lowering.ll
@@ -32,102 +32,110 @@ define { i128, i8 } @muloti_test(i128 %l, i128 %r) unnamed_addr #0 {
;
; PPC32-LABEL: muloti_test:
; PPC32: # %bb.0: # %start
-; PPC32-NEXT: stwu 1, -64(1)
-; PPC32-NEXT: stw 26, 40(1) # 4-byte Folded Spill
-; PPC32-NEXT: mulhwu. 26, 7, 6
-; PPC32-NEXT: mcrf 1, 0
-; PPC32-NEXT: stw 30, 56(1) # 4-byte Folded Spill
+; PPC32-NEXT: stwu 1, -80(1)
+; PPC32-NEXT: mr 11, 7
+; PPC32-NEXT: stw 26, 56(1) # 4-byte Folded Spill
+; PPC32-NEXT: mulhwu. 26, 11, 6
+; PPC32-NEXT: stw 24, 48(1) # 4-byte Folded Spill
; PPC32-NEXT: mfcr 12
-; PPC32-NEXT: cmpwi 7, 5, 0
-; PPC32-NEXT: cmpwi 2, 7, 0
+; PPC32-NEXT: stw 27, 60(1) # 4-byte Folded Spill
+; PPC32-NEXT: mcrf 1, 0
+; PPC32-NEXT: stw 19, 28(1) # 4-byte Folded Spill
+; PPC32-NEXT: mulhwu 27, 6, 10
+; PPC32-NEXT: stw 20, 32(1) # 4-byte Folded Spill
+; PPC32-NEXT: cmpwi 6, 11, 0
+; PPC32-NEXT: stw 21, 36(1) # 4-byte Folded Spill
+; PPC32-NEXT: li 7, 0
+; PPC32-NEXT: stw 22, 40(1) # 4-byte Folded Spill
; PPC32-NEXT: mulhwu. 26, 5, 8
+; PPC32-NEXT: stw 23, 44(1) # 4-byte Folded Spill
; PPC32-NEXT: mcrf 5, 0
-; PPC32-NEXT: stw 22, 24(1) # 4-byte Folded Spill
-; PPC32-NEXT: crnor 20, 30, 10
-; PPC32-NEXT: stw 23, 28(1) # 4-byte Folded Spill
-; PPC32-NEXT: cmpwi 7, 9, 0
-; PPC32-NEXT: mulhwu. 26, 3, 10
-; PPC32-NEXT: mcrf 6, 0
-; PPC32-NEXT: stw 29, 52(1) # 4-byte Folded Spill
-; PPC32-NEXT: cmpwi 2, 3, 0
-; PPC32-NEXT: stw 24, 32(1) # 4-byte Folded Spill
-; PPC32-NEXT: crnor 21, 30, 10
-; PPC32-NEXT: mulhwu. 26, 9, 4
-; PPC32-NEXT: stw 25, 36(1) # 4-byte Folded Spill
+; PPC32-NEXT: stw 25, 52(1) # 4-byte Folded Spill
+; PPC32-NEXT: cmpwi 5, 0
+; PPC32-NEXT: stw 28, 64(1) # 4-byte Folded Spill
+; PPC32-NEXT: mullw 24, 5, 10
+; PPC32-NEXT: stw 29, 68(1) # 4-byte Folded Spill
+; PPC32-NEXT: crnor 20, 2, 26
+; PPC32-NEXT: stw 30, 72(1) # 4-byte Folded Spill
+; PPC32-NEXT: cmpwi 3, 0
+; PPC32-NEXT: stw 12, 24(1)
+; PPC32-NEXT: mulhwu 30, 5, 10
+; PPC32-NEXT: cmpwi 6, 9, 0
+; PPC32-NEXT: crnor 21, 26, 2
; PPC32-NEXT: crorc 20, 20, 6
-; PPC32-NEXT: stw 27, 44(1) # 4-byte Folded Spill
-; PPC32-NEXT: crorc 21, 21, 26
-; PPC32-NEXT: stw 28, 48(1) # 4-byte Folded Spill
-; PPC32-NEXT: mulhwu 30, 6, 10
-; PPC32-NEXT: stw 12, 20(1)
; PPC32-NEXT: crorc 20, 20, 22
-; PPC32-NEXT: crorc 21, 21, 2
-; PPC32-NEXT: li 11, 0
-; PPC32-NEXT: mullw 26, 5, 10
-; PPC32-NEXT: addc 30, 26, 30
-; PPC32-NEXT: mulhwu 29, 5, 10
-; PPC32-NEXT: addze 29, 29
-; PPC32-NEXT: mullw 23, 5, 8
-; PPC32-NEXT: mullw 22, 7, 6
-; PPC32-NEXT: mulhwu 0, 6, 9
; PPC32-NEXT: mulhwu 12, 5, 9
-; PPC32-NEXT: mulhwu 27, 8, 6
-; PPC32-NEXT: mullw 25, 6, 9
-; PPC32-NEXT: mullw 24, 5, 9
-; PPC32-NEXT: mullw 5, 9, 4
-; PPC32-NEXT: add 9, 22, 23
-; PPC32-NEXT: add 9, 27, 9
-; PPC32-NEXT: cmplw 1, 9, 27
-; PPC32-NEXT: cror 20, 20, 4
-; PPC32-NEXT: mullw 23, 3, 10
-; PPC32-NEXT: add 26, 23, 5
-; PPC32-NEXT: addc 5, 25, 30
-; PPC32-NEXT: addze 0, 0
-; PPC32-NEXT: or. 3, 4, 3
-; PPC32-NEXT: mulhwu 28, 4, 10
+; PPC32-NEXT: mullw 26, 5, 9
+; PPC32-NEXT: mullw 22, 5, 8
+; PPC32-NEXT: addc 5, 24, 27
+; PPC32-NEXT: addze 30, 30
+; PPC32-NEXT: mullw 23, 6, 9
+; PPC32-NEXT: addc 5, 23, 5
+; PPC32-NEXT: mullw 21, 11, 6
+; PPC32-NEXT: add 27, 21, 22
+; PPC32-NEXT: mulhwu 28, 8, 6
+; PPC32-NEXT: add 27, 28, 27
+; PPC32-NEXT: cmplw 7, 27, 28
+; PPC32-NEXT: mulhwu. 23, 3, 10
+; PPC32-NEXT: mcrf 6, 0
+; PPC32-NEXT: cror 24, 20, 28
+; PPC32-NEXT: crorc 25, 21, 26
+; PPC32-NEXT: mulhwu 0, 6, 9
+; PPC32-NEXT: mullw 20, 9, 4
+; PPC32-NEXT: mulhwu. 9, 9, 4
; PPC32-NEXT: mcrf 1, 0
-; PPC32-NEXT: addc 3, 29, 0
-; PPC32-NEXT: add 26, 28, 26
-; PPC32-NEXT: cmplw 6, 26, 28
-; PPC32-NEXT: cror 21, 21, 24
-; PPC32-NEXT: mullw 30, 4, 10
-; PPC32-NEXT: or. 4, 8, 7
-; PPC32-NEXT: addze 4, 11
-; PPC32-NEXT: addc 7, 24, 3
-; PPC32-NEXT: crnor 22, 2, 6
-; PPC32-NEXT: mullw 27, 8, 6
-; PPC32-NEXT: adde 8, 12, 4
-; PPC32-NEXT: addc 3, 30, 27
-; PPC32-NEXT: adde 9, 26, 9
-; PPC32-NEXT: addc 4, 7, 3
-; PPC32-NEXT: adde 3, 8, 9
-; PPC32-NEXT: cror 21, 22, 21
-; PPC32-NEXT: cmplw 4, 7
-; PPC32-NEXT: cmplw 1, 3, 8
-; PPC32-NEXT: lwz 12, 20(1)
+; PPC32-NEXT: addze 9, 0
+; PPC32-NEXT: mullw 19, 3, 10
+; PPC32-NEXT: or. 3, 4, 3
+; PPC32-NEXT: mcrf 5, 0
+; PPC32-NEXT: addc 3, 30, 9
+; PPC32-NEXT: add 24, 19, 20
+; PPC32-NEXT: mulhwu 29, 4, 10
+; PPC32-NEXT: add 28, 29, 24
+; PPC32-NEXT: cmplw 2, 28, 29
+; PPC32-NEXT: crorc 20, 25, 6
+; PPC32-NEXT: cror 20, 20, 8
+; PPC32-NEXT: mullw 22, 4, 10
+; PPC32-NEXT: or. 4, 8, 11
+; PPC32-NEXT: addze 4, 7
+; PPC32-NEXT: crnor 21, 2, 22
; PPC32-NEXT: cror 20, 21, 20
-; PPC32-NEXT: crandc 21, 4, 6
-; PPC32-NEXT: crand 22, 6, 0
-; PPC32-NEXT: cror 21, 22, 21
-; PPC32-NEXT: crnor 20, 20, 21
-; PPC32-NEXT: li 7, 1
+; PPC32-NEXT: mullw 25, 8, 6
+; PPC32-NEXT: addc 8, 26, 3
+; PPC32-NEXT: adde 9, 12, 4
+; PPC32-NEXT: addc 3, 22, 25
+; PPC32-NEXT: adde 11, 28, 27
+; PPC32-NEXT: addc 4, 8, 3
+; PPC32-NEXT: adde 3, 9, 11
+; PPC32-NEXT: cmplw 1, 3, 9
+; PPC32-NEXT: cmplw 4, 8
+; PPC32-NEXT: crandc 22, 4, 6
; PPC32-NEXT: mullw 6, 6, 10
-; PPC32-NEXT: bc 12, 20, .LBB0_1
-; PPC32-NEXT: b .LBB0_2
-; PPC32-NEXT: .LBB0_1: # %start
-; PPC32-NEXT: li 7, 0
-; PPC32-NEXT: .LBB0_2: # %start
+; PPC32-NEXT: bc 12, 22, .LBB0_3
+; PPC32-NEXT: # %bb.1: # %start
+; PPC32-NEXT: crand 21, 6, 0
+; PPC32-NEXT: bc 12, 21, .LBB0_3
+; PPC32-NEXT: # %bb.2: # %start
+; PPC32-NEXT: cror 20, 20, 24
+; PPC32-NEXT: bc 4, 20, .LBB0_4
+; PPC32-NEXT: .LBB0_3: # %start
+; PPC32-NEXT: li 7, 1
+; PPC32-NEXT: .LBB0_4: # %start
+; PPC32-NEXT: lwz 12, 24(1)
+; PPC32-NEXT: lwz 30, 72(1) # 4-byte Folded Reload
; PPC32-NEXT: mtcrf 32, 12 # cr2
-; PPC32-NEXT: lwz 30, 56(1) # 4-byte Folded Reload
-; PPC32-NEXT: lwz 29, 52(1) # 4-byte Folded Reload
-; PPC32-NEXT: lwz 28, 48(1) # 4-byte Folded Reload
-; PPC32-NEXT: lwz 27, 44(1) # 4-byte Folded Reload
-; PPC32-NEXT: lwz 26, 40(1) # 4-byte Folded Reload
-; PPC32-NEXT: lwz 25, 36(1) # 4-byte Folded Reload
-; PPC32-NEXT: lwz 24, 32(1) # 4-byte Folded Reload
-; PPC32-NEXT: lwz 23, 28(1) # 4-byte Folded Reload
-; PPC32-NEXT: lwz 22, 24(1) # 4-byte Folded Reload
-; PPC32-NEXT: addi 1, 1, 64
+; PPC32-NEXT: lwz 29, 68(1) # 4-byte Folded Reload
+; PPC32-NEXT: lwz 28, 64(1) # 4-byte Folded Reload
+; PPC32-NEXT: lwz 27, 60(1) # 4-byte Folded Reload
+; PPC32-NEXT: lwz 26, 56(1) # 4-byte Folded Reload
+; PPC32-NEXT: lwz 25, 52(1) # 4-byte Folded Reload
+; PPC32-NEXT: lwz 24, 48(1) # 4-byte Folded Reload
+; PPC32-NEXT: lwz 23, 44(1) # 4-byte Folded Reload
+; PPC32-NEXT: lwz 22, 40(1) # 4-byte Folded Reload
+; PPC32-NEXT: lwz 21, 36(1) # 4-byte Folded Reload
+; PPC32-NEXT: lwz 20, 32(1) # 4-byte Folded Reload
+; PPC32-NEXT: lwz 19, 28(1) # 4-byte Folded Reload
+; PPC32-NEXT: addi 1, 1, 80
; PPC32-NEXT: blr
start:
%0 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %l, i128 %r) #2
diff --git a/llvm/test/CodeGen/PowerPC/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/PowerPC/urem-seteq-illegal-types.ll
index f708da86444b..e5c5356ce50a 100644
--- a/llvm/test/CodeGen/PowerPC/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/PowerPC/urem-seteq-illegal-types.ll
@@ -7,12 +7,11 @@ define i1 @test_urem_odd(i13 %X) nounwind {
; PPC: # %bb.0:
; PPC-NEXT: mulli 3, 3, 3277
; PPC-NEXT: clrlwi 3, 3, 19
-; PPC-NEXT: li 4, 0
; PPC-NEXT: cmplwi 3, 1639
; PPC-NEXT: li 3, 1
; PPC-NEXT: bclr 12, 0, 0
; PPC-NEXT: # %bb.1:
-; PPC-NEXT: ori 3, 4, 0
+; PPC-NEXT: li 3, 0
; PPC-NEXT: blr
;
; PPC64LE-LABEL: test_urem_odd:
@@ -40,12 +39,10 @@ define i1 @test_urem_even(i27 %X) nounwind {
; PPC-NEXT: lis 3, 146
; PPC-NEXT: ori 3, 3, 18725
; PPC-NEXT: cmplw 4, 3
+; PPC-NEXT: li 3, 1
+; PPC-NEXT: bclr 12, 0, 0
+; PPC-NEXT: # %bb.1:
; PPC-NEXT: li 3, 0
-; PPC-NEXT: li 4, 1
-; PPC-NEXT: bc 12, 0, .LBB1_1
-; PPC-NEXT: blr
-; PPC-NEXT: .LBB1_1:
-; PPC-NEXT: addi 3, 4, 0
; PPC-NEXT: blr
;
; PPC64LE-LABEL: test_urem_even:
@@ -72,12 +69,11 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
; PPC: # %bb.0:
; PPC-NEXT: mulli 3, 3, 13
; PPC-NEXT: clrlwi 3, 3, 28
-; PPC-NEXT: li 4, 0
; PPC-NEXT: cmplwi 3, 3
; PPC-NEXT: li 3, 1
; PPC-NEXT: bclr 12, 1, 0
; PPC-NEXT: # %bb.1:
-; PPC-NEXT: ori 3, 4, 0
+; PPC-NEXT: li 3, 0
; PPC-NEXT: blr
;
; PPC64LE-LABEL: test_urem_odd_setne:
@@ -101,12 +97,11 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
; PPC: # %bb.0:
; PPC-NEXT: mulli 3, 3, 307
; PPC-NEXT: clrlwi 3, 3, 23
-; PPC-NEXT: li 4, 0
; PPC-NEXT: cmplwi 3, 1
; PPC-NEXT: li 3, 1
; PPC-NEXT: bclr 12, 1, 0
; PPC-NEXT: # %bb.1:
-; PPC-NEXT: ori 3, 4, 0
+; PPC-NEXT: li 3, 0
; PPC-NEXT: blr
;
; PPC64LE-LABEL: test_urem_negative_odd:
@@ -126,37 +121,33 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind {
; PPC-LABEL: test_urem_vec:
; PPC: # %bb.0:
-; PPC-NEXT: mulli 3, 3, 683
-; PPC-NEXT: rlwinm 7, 3, 31, 22, 31
-; PPC-NEXT: rlwimi 7, 3, 10, 21, 21
+; PPC-NEXT: mr 6, 3
+; PPC-NEXT: mulli 6, 6, 683
+; PPC-NEXT: rlwinm 7, 6, 31, 22, 31
+; PPC-NEXT: rlwimi 7, 6, 10, 21, 21
; PPC-NEXT: mulli 5, 5, 819
-; PPC-NEXT: li 6, 0
-; PPC-NEXT: cmplwi 7, 341
-; PPC-NEXT: mulli 3, 4, 1463
-; PPC-NEXT: addi 4, 5, -1638
-; PPC-NEXT: addi 3, 3, -1463
+; PPC-NEXT: addi 5, 5, -1638
+; PPC-NEXT: clrlwi 5, 5, 21
+; PPC-NEXT: mulli 4, 4, 1463
+; PPC-NEXT: addi 4, 4, -1463
; PPC-NEXT: clrlwi 4, 4, 21
-; PPC-NEXT: clrlwi 3, 3, 21
-; PPC-NEXT: cmplwi 1, 4, 1
-; PPC-NEXT: cmplwi 5, 3, 292
; PPC-NEXT: li 3, 1
+; PPC-NEXT: cmplwi 7, 341
+; PPC-NEXT: cmplwi 1, 5, 1
+; PPC-NEXT: cmplwi 5, 4, 292
+; PPC-NEXT: li 4, 1
; PPC-NEXT: bc 12, 21, .LBB4_2
; PPC-NEXT: # %bb.1:
-; PPC-NEXT: ori 4, 6, 0
-; PPC-NEXT: b .LBB4_3
+; PPC-NEXT: li 4, 0
; PPC-NEXT: .LBB4_2:
-; PPC-NEXT: addi 4, 3, 0
-; PPC-NEXT: .LBB4_3:
-; PPC-NEXT: bc 12, 5, .LBB4_5
-; PPC-NEXT: # %bb.4:
-; PPC-NEXT: ori 5, 6, 0
-; PPC-NEXT: b .LBB4_6
-; PPC-NEXT: .LBB4_5:
-; PPC-NEXT: addi 5, 3, 0
-; PPC-NEXT: .LBB4_6:
+; PPC-NEXT: li 5, 1
+; PPC-NEXT: bc 12, 5, .LBB4_4
+; PPC-NEXT: # %bb.3:
+; PPC-NEXT: li 5, 0
+; PPC-NEXT: .LBB4_4:
; PPC-NEXT: bclr 12, 1, 0
-; PPC-NEXT: # %bb.7:
-; PPC-NEXT: ori 3, 6, 0
+; PPC-NEXT: # %bb.5:
+; PPC-NEXT: li 3, 0
; PPC-NEXT: blr
;
; PPC64LE-LABEL: test_urem_vec:
@@ -244,16 +235,15 @@ define i1 @test_urem_oversized(i66 %X) nounwind {
; PPC-NEXT: cmplw 5, 11
; PPC-NEXT: cmplwi 1, 10, 13
; PPC-NEXT: rlwinm 3, 3, 31, 31, 31
-; PPC-NEXT: crand 20, 6, 0
-; PPC-NEXT: crandc 21, 4, 6
+; PPC-NEXT: crandc 20, 4, 6
+; PPC-NEXT: crand 21, 6, 0
; PPC-NEXT: rlwimi. 3, 6, 1, 30, 30
-; PPC-NEXT: cror 20, 20, 21
+; PPC-NEXT: cror 20, 21, 20
; PPC-NEXT: crnand 20, 2, 20
-; PPC-NEXT: li 3, 1
-; PPC-NEXT: bc 12, 20, .LBB5_1
-; PPC-NEXT: blr
-; PPC-NEXT: .LBB5_1:
; PPC-NEXT: li 3, 0
+; PPC-NEXT: bclr 12, 20, 0
+; PPC-NEXT: # %bb.1:
+; PPC-NEXT: li 3, 1
; PPC-NEXT: blr
;
; PPC64LE-LABEL: test_urem_oversized:
diff --git a/llvm/test/CodeGen/PowerPC/varargs.ll b/llvm/test/CodeGen/PowerPC/varargs.ll
index 2b686bf0d38d..d4aafac5c56e 100644
--- a/llvm/test/CodeGen/PowerPC/varargs.ll
+++ b/llvm/test/CodeGen/PowerPC/varargs.ll
@@ -7,31 +7,28 @@
define ptr @test1(ptr %foo) nounwind {
; P32-LABEL: test1:
; P32: # %bb.0:
-; P32-NEXT: lbz r4, 0(r3)
-; P32-NEXT: lwz r5, 4(r3)
-; P32-NEXT: lwz r6, 8(r3)
-; P32-NEXT: addi r7, r4, 1
-; P32-NEXT: stb r7, 0(r3)
-; P32-NEXT: addi r7, r5, 4
-; P32-NEXT: cmpwi r4, 8
-; P32-NEXT: slwi r4, r4, 2
-; P32-NEXT: add r4, r6, r4
-; P32-NEXT: bc 12, lt, .LBB0_2
+; P32-NEXT: lbz r5, 0(r3)
+; P32-NEXT: lwz r4, 4(r3)
+; P32-NEXT: addi r6, r5, 1
+; P32-NEXT: cmpwi r5, 8
+; P32-NEXT: stb r6, 0(r3)
+; P32-NEXT: mr r6, r4
+; P32-NEXT: bge cr0, .LBB0_3
; P32-NEXT: # %bb.1:
-; P32-NEXT: ori r6, r7, 0
-; P32-NEXT: b .LBB0_3
+; P32-NEXT: stw r6, 4(r3)
+; P32-NEXT: blt cr0, .LBB0_4
; P32-NEXT: .LBB0_2:
-; P32-NEXT: addi r6, r5, 0
+; P32-NEXT: lwz r3, 0(r4)
+; P32-NEXT: blr
; P32-NEXT: .LBB0_3:
+; P32-NEXT: addi r6, r4, 4
; P32-NEXT: stw r6, 4(r3)
-; P32-NEXT: bc 12, lt, .LBB0_5
-; P32-NEXT: # %bb.4:
-; P32-NEXT: ori r3, r5, 0
-; P32-NEXT: b .LBB0_6
-; P32-NEXT: .LBB0_5:
-; P32-NEXT: addi r3, r4, 0
-; P32-NEXT: .LBB0_6:
-; P32-NEXT: lwz r3, 0(r3)
+; P32-NEXT: bge cr0, .LBB0_2
+; P32-NEXT: .LBB0_4:
+; P32-NEXT: lwz r3, 8(r3)
+; P32-NEXT: slwi r4, r5, 2
+; P32-NEXT: add r4, r3, r4
+; P32-NEXT: lwz r3, 0(r4)
; P32-NEXT: blr
;
; P64-LABEL: test1:
diff --git a/llvm/test/CodeGen/PowerPC/wide-scalar-shift-by-byte-multiple-legalization.ll b/llvm/test/CodeGen/PowerPC/wide-scalar-shift-by-byte-multiple-legalization.ll
index 16aa63cac0ab..f6fdb4ae2079 100644
--- a/llvm/test/CodeGen/PowerPC/wide-scalar-shift-by-byte-multiple-legalization.ll
+++ b/llvm/test/CodeGen/PowerPC/wide-scalar-shift-by-byte-multiple-legalization.ll
@@ -160,22 +160,22 @@ define void @ashr_8bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-LABEL: ashr_8bytes:
; LE-32BIT: # %bb.0:
; LE-32BIT-NEXT: lwz 4, 4(4)
-; LE-32BIT-NEXT: lwz 6, 4(3)
-; LE-32BIT-NEXT: lwz 3, 0(3)
+; LE-32BIT-NEXT: lwz 6, 0(3)
; LE-32BIT-NEXT: slwi 4, 4, 3
-; LE-32BIT-NEXT: subfic 7, 4, 32
-; LE-32BIT-NEXT: srw 6, 6, 4
-; LE-32BIT-NEXT: addi 8, 4, -32
-; LE-32BIT-NEXT: slw 7, 3, 7
-; LE-32BIT-NEXT: sraw 4, 3, 4
-; LE-32BIT-NEXT: sraw 3, 3, 8
-; LE-32BIT-NEXT: cmpwi 8, 1
-; LE-32BIT-NEXT: or 6, 6, 7
-; LE-32BIT-NEXT: bc 12, 0, .LBB5_1
-; LE-32BIT-NEXT: b .LBB5_2
-; LE-32BIT-NEXT: .LBB5_1:
-; LE-32BIT-NEXT: addi 3, 6, 0
+; LE-32BIT-NEXT: addi 7, 4, -32
+; LE-32BIT-NEXT: cmpwi 7, 0
+; LE-32BIT-NEXT: ble 0, .LBB5_2
+; LE-32BIT-NEXT: # %bb.1:
+; LE-32BIT-NEXT: sraw 3, 6, 7
+; LE-32BIT-NEXT: b .LBB5_3
; LE-32BIT-NEXT: .LBB5_2:
+; LE-32BIT-NEXT: lwz 3, 4(3)
+; LE-32BIT-NEXT: subfic 7, 4, 32
+; LE-32BIT-NEXT: slw 7, 6, 7
+; LE-32BIT-NEXT: srw 3, 3, 4
+; LE-32BIT-NEXT: or 3, 3, 7
+; LE-32BIT-NEXT: .LBB5_3:
+; LE-32BIT-NEXT: sraw 4, 6, 4
; LE-32BIT-NEXT: stw 4, 0(5)
; LE-32BIT-NEXT: stw 3, 4(5)
; LE-32BIT-NEXT: blr
@@ -357,24 +357,24 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; BE-LABEL: ashr_16bytes:
; BE: # %bb.0:
; BE-NEXT: lwz 4, 12(4)
-; BE-NEXT: ld 6, 8(3)
-; BE-NEXT: ld 3, 0(3)
+; BE-NEXT: ld 6, 0(3)
; BE-NEXT: slwi 4, 4, 3
-; BE-NEXT: subfic 7, 4, 64
-; BE-NEXT: srd 6, 6, 4
-; BE-NEXT: addi 8, 4, -64
-; BE-NEXT: sld 7, 3, 7
-; BE-NEXT: cmpwi 8, 1
-; BE-NEXT: or 6, 6, 7
-; BE-NEXT: srad 7, 3, 8
-; BE-NEXT: srad 3, 3, 4
-; BE-NEXT: bc 12, 0, .LBB8_2
+; BE-NEXT: addi 7, 4, -64
+; BE-NEXT: cmpwi 7, 1
+; BE-NEXT: blt 0, .LBB8_2
; BE-NEXT: # %bb.1:
-; BE-NEXT: ori 6, 7, 0
-; BE-NEXT: b .LBB8_2
+; BE-NEXT: srad 3, 6, 7
+; BE-NEXT: b .LBB8_3
; BE-NEXT: .LBB8_2:
-; BE-NEXT: std 3, 0(5)
-; BE-NEXT: std 6, 8(5)
+; BE-NEXT: ld 3, 8(3)
+; BE-NEXT: subfic 7, 4, 64
+; BE-NEXT: sld 7, 6, 7
+; BE-NEXT: srd 3, 3, 4
+; BE-NEXT: or 3, 3, 7
+; BE-NEXT: .LBB8_3:
+; BE-NEXT: srad 4, 6, 4
+; BE-NEXT: std 3, 8(5)
+; BE-NEXT: std 4, 0(5)
; BE-NEXT: blr
;
; LE-32BIT-LABEL: ashr_16bytes:
diff --git a/llvm/test/CodeGen/PowerPC/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/PowerPC/wide-scalar-shift-legalization.ll
index abfe6a953dd6..044ddf562294 100644
--- a/llvm/test/CodeGen/PowerPC/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/PowerPC/wide-scalar-shift-legalization.ll
@@ -144,21 +144,21 @@ define void @ashr_8bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-32BIT-LABEL: ashr_8bytes:
; LE-32BIT: # %bb.0:
; LE-32BIT-NEXT: lwz 4, 4(4)
-; LE-32BIT-NEXT: lwz 6, 4(3)
-; LE-32BIT-NEXT: lwz 3, 0(3)
-; LE-32BIT-NEXT: subfic 7, 4, 32
-; LE-32BIT-NEXT: srw 6, 6, 4
-; LE-32BIT-NEXT: addi 8, 4, -32
-; LE-32BIT-NEXT: slw 7, 3, 7
-; LE-32BIT-NEXT: sraw 4, 3, 4
-; LE-32BIT-NEXT: sraw 3, 3, 8
-; LE-32BIT-NEXT: cmpwi 8, 1
-; LE-32BIT-NEXT: or 6, 6, 7
-; LE-32BIT-NEXT: bc 12, 0, .LBB5_1
-; LE-32BIT-NEXT: b .LBB5_2
-; LE-32BIT-NEXT: .LBB5_1:
-; LE-32BIT-NEXT: addi 3, 6, 0
+; LE-32BIT-NEXT: lwz 6, 0(3)
+; LE-32BIT-NEXT: addi 7, 4, -32
+; LE-32BIT-NEXT: cmpwi 7, 0
+; LE-32BIT-NEXT: ble 0, .LBB5_2
+; LE-32BIT-NEXT: # %bb.1:
+; LE-32BIT-NEXT: sraw 3, 6, 7
+; LE-32BIT-NEXT: b .LBB5_3
; LE-32BIT-NEXT: .LBB5_2:
+; LE-32BIT-NEXT: lwz 3, 4(3)
+; LE-32BIT-NEXT: subfic 7, 4, 32
+; LE-32BIT-NEXT: slw 7, 6, 7
+; LE-32BIT-NEXT: srw 3, 3, 4
+; LE-32BIT-NEXT: or 3, 3, 7
+; LE-32BIT-NEXT: .LBB5_3:
+; LE-32BIT-NEXT: sraw 4, 6, 4
; LE-32BIT-NEXT: stw 4, 0(5)
; LE-32BIT-NEXT: stw 3, 4(5)
; LE-32BIT-NEXT: blr
@@ -364,23 +364,23 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; BE-LABEL: ashr_16bytes:
; BE: # %bb.0:
; BE-NEXT: lwz 4, 12(4)
-; BE-NEXT: ld 6, 8(3)
-; BE-NEXT: ld 3, 0(3)
-; BE-NEXT: subfic 7, 4, 64
-; BE-NEXT: srd 6, 6, 4
-; BE-NEXT: addi 8, 4, -64
-; BE-NEXT: sld 7, 3, 7
-; BE-NEXT: cmpwi 8, 1
-; BE-NEXT: or 6, 6, 7
-; BE-NEXT: srad 7, 3, 8
-; BE-NEXT: srad 3, 3, 4
-; BE-NEXT: bc 12, 0, .LBB8_2
+; BE-NEXT: ld 6, 0(3)
+; BE-NEXT: addi 7, 4, -64
+; BE-NEXT: cmpwi 7, 1
+; BE-NEXT: blt 0, .LBB8_2
; BE-NEXT: # %bb.1:
-; BE-NEXT: ori 6, 7, 0
-; BE-NEXT: b .LBB8_2
+; BE-NEXT: srad 3, 6, 7
+; BE-NEXT: b .LBB8_3
; BE-NEXT: .LBB8_2:
-; BE-NEXT: std 3, 0(5)
-; BE-NEXT: std 6, 8(5)
+; BE-NEXT: ld 3, 8(3)
+; BE-NEXT: subfic 7, 4, 64
+; BE-NEXT: sld 7, 6, 7
+; BE-NEXT: srd 3, 3, 4
+; BE-NEXT: or 3, 3, 7
+; BE-NEXT: .LBB8_3:
+; BE-NEXT: srad 4, 6, 4
+; BE-NEXT: std 3, 8(5)
+; BE-NEXT: std 4, 0(5)
; BE-NEXT: blr
;
; LE-32BIT-LABEL: ashr_16bytes:
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
index 90d78779b764..18b66499b85f 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
@@ -1,13 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=NOZACAS,RV32IA %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ZACAS,RV32IA-ZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=NOZACAS,RV64IA %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ZACAS,RV64IA-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas,+experimental-zabha -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas,+experimental-zabha -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ZACAS,RV64IA-ZABHA %s
; Test cmpxchg followed by a branch on the cmpxchg success value to see if the
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
index 8df37bf40975..394dffa346ec 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
@@ -3,25 +3,25 @@
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-WMO %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS,RV32IA-WMO-ZACAS %s
; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-TSO %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS,RV32IA-TSO-ZACAS %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-WMO %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS,RV64IA-WMO-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas,+experimental-zabha -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas,+experimental-zabha -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZABHA,RV64IA-WMO-ZABHA %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-TSO %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS,RV64IA-TSO-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zacas,+experimental-zabha -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+zacas,+experimental-zabha -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZABHA,RV64IA-TSO-ZABHA %s
define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index ee802507a02f..fe5300174061 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -12,22 +12,22 @@
; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-NOZACAS,RV64IA-TSO,RV64IA-TSO-NOZACAS %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS,RV32IA-WMO,RV32IA-WMO-ZACAS %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS,RV32IA-TSO,RV32IA-TSO-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS,RV64IA-WMO,RV64IA-WMO-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS,RV64IA-TSO,RV64IA-TSO-ZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zabha -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-WMO,RV64IA-WMO-ZABHA,RV64IA-WMO-ZABHA-NOZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zabha -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-TSO,RV64IA-TSO-ZABHA,RV64IA-TSO-ZABHA-NOZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zabha,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zabha,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-WMO,RV64IA-WMO-ZABHA,RV64IA-WMO-ZABHA-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zabha,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zabha,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-TSO,RV64IA-TSO-ZABHA,RV64IA-TSO-ZABHA-ZACAS %s
define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index 47807f78d176..bdf3b28d2d52 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -3,13 +3,13 @@
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-NOZACAS %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-NOZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS %s
define signext i8 @atomic_load_i8_unordered(ptr %a) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index 13635a94d641..561b0f21dc37 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -111,7 +111,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zvfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV32ZVFBFMIN %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zvfbfwma %s -o - | FileCheck --check-prefixes=CHECK,RV32ZVFBFWMA %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zaamo %s -o - | FileCheck --check-prefix=RV32ZAAMO %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zacas %s -o - | FileCheck --check-prefix=RV32ZACAS %s
+; RUN: llc -mtriple=riscv32 -mattr=+zacas %s -o - | FileCheck --check-prefix=RV32ZACAS %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zalasr %s -o - | FileCheck --check-prefix=RV32ZALASR %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zalrsc %s -o - | FileCheck --check-prefix=RV32ZALRSC %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicfilp %s -o - | FileCheck --check-prefix=RV32ZICFILP %s
@@ -240,7 +240,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zvfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV64ZVFBFMIN %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zvfbfwma %s -o - | FileCheck --check-prefixes=CHECK,RV64ZVFBFWMA %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zaamo %s -o - | FileCheck --check-prefix=RV64ZAAMO %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zacas %s -o - | FileCheck --check-prefix=RV64ZACAS %s
+; RUN: llc -mtriple=riscv64 -mattr=+zacas %s -o - | FileCheck --check-prefix=RV64ZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zalasr %s -o - | FileCheck --check-prefix=RV64ZALASR %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zalrsc %s -o - | FileCheck --check-prefix=RV64ZALRSC %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicfilp %s -o - | FileCheck --check-prefix=RV64ZICFILP %s
diff --git a/llvm/test/CodeGen/RISCV/machine-combiner.ll b/llvm/test/CodeGen/RISCV/machine-combiner.ll
index 7c1792e2f101..cfdefec04600 100644
--- a/llvm/test/CodeGen/RISCV/machine-combiner.ll
+++ b/llvm/test/CodeGen/RISCV/machine-combiner.ll
@@ -1096,10 +1096,10 @@ declare double @llvm.maxnum.f64(double, double)
define double @test_fmadd_strategy(double %a0, double %a1, double %a2, double %a3, i64 %flag) {
; CHECK_LOCAL-LABEL: test_fmadd_strategy:
; CHECK_LOCAL: # %bb.0: # %entry
-; CHECK_LOCAL-NEXT: fmv.d fa5, fa0
; CHECK_LOCAL-NEXT: fsub.d fa4, fa0, fa1
-; CHECK_LOCAL-NEXT: fmul.d fa0, fa4, fa2
; CHECK_LOCAL-NEXT: andi a0, a0, 1
+; CHECK_LOCAL-NEXT: fmv.d fa5, fa0
+; CHECK_LOCAL-NEXT: fmul.d fa0, fa4, fa2
; CHECK_LOCAL-NEXT: beqz a0, .LBB76_2
; CHECK_LOCAL-NEXT: # %bb.1: # %entry
; CHECK_LOCAL-NEXT: fmul.d fa4, fa5, fa1
@@ -1110,10 +1110,10 @@ define double @test_fmadd_strategy(double %a0, double %a1, double %a2, double %a
;
; CHECK_GLOBAL-LABEL: test_fmadd_strategy:
; CHECK_GLOBAL: # %bb.0: # %entry
-; CHECK_GLOBAL-NEXT: fmv.d fa5, fa0
; CHECK_GLOBAL-NEXT: fsub.d fa4, fa0, fa1
-; CHECK_GLOBAL-NEXT: fmul.d fa0, fa4, fa2
; CHECK_GLOBAL-NEXT: andi a0, a0, 1
+; CHECK_GLOBAL-NEXT: fmv.d fa5, fa0
+; CHECK_GLOBAL-NEXT: fmul.d fa0, fa4, fa2
; CHECK_GLOBAL-NEXT: beqz a0, .LBB76_2
; CHECK_GLOBAL-NEXT: # %bb.1: # %entry
; CHECK_GLOBAL-NEXT: fmul.d fa5, fa5, fa1
diff --git a/llvm/test/CodeGen/RISCV/pr69586.ll b/llvm/test/CodeGen/RISCV/pr69586.ll
index 2d5fce2ca497..15daf2c57790 100644
--- a/llvm/test/CodeGen/RISCV/pr69586.ll
+++ b/llvm/test/CodeGen/RISCV/pr69586.ll
@@ -146,19 +146,19 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: slli a6, a3, 10
; NOREMAT-NEXT: sd a6, 176(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: add a6, a0, a6
-; NOREMAT-NEXT: vle32.v v0, (a6)
+; NOREMAT-NEXT: vle32.v v6, (a6)
; NOREMAT-NEXT: vle32.v v20, (a6)
; NOREMAT-NEXT: li a6, 19
; NOREMAT-NEXT: slli a6, a6, 9
; NOREMAT-NEXT: sd a6, 168(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: li a7, 19
; NOREMAT-NEXT: add a6, a0, a6
-; NOREMAT-NEXT: vle32.v v2, (a6)
+; NOREMAT-NEXT: vle32.v v4, (a6)
; NOREMAT-NEXT: vle32.v v22, (a6)
; NOREMAT-NEXT: slli a5, a5, 11
; NOREMAT-NEXT: sd a5, 160(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: add a5, a0, a5
-; NOREMAT-NEXT: vle32.v v4, (a5)
+; NOREMAT-NEXT: vle32.v v2, (a5)
; NOREMAT-NEXT: vle32.v v12, (a5)
; NOREMAT-NEXT: li s10, 21
; NOREMAT-NEXT: slli a5, s10, 9
@@ -184,25 +184,25 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: add a5, a0, a5
; NOREMAT-NEXT: vle32.v v30, (a5)
; NOREMAT-NEXT: vle32.v v10, (a5)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v18, v0
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v18, v6
; NOREMAT-NEXT: li s3, 25
; NOREMAT-NEXT: slli a5, s3, 9
; NOREMAT-NEXT: sd a5, 128(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: add a5, a0, a5
-; NOREMAT-NEXT: vle32.v v0, (a5)
+; NOREMAT-NEXT: vle32.v v6, (a5)
; NOREMAT-NEXT: vle32.v v18, (a5)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v2
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v4
; NOREMAT-NEXT: slli a5, s2, 10
; NOREMAT-NEXT: sd a5, 120(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: add a5, a0, a5
-; NOREMAT-NEXT: vle32.v v2, (a5)
+; NOREMAT-NEXT: vle32.v v4, (a5)
; NOREMAT-NEXT: vle32.v v20, (a5)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v22, v4
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v22, v2
; NOREMAT-NEXT: li t5, 27
; NOREMAT-NEXT: slli a5, t5, 9
; NOREMAT-NEXT: sd a5, 112(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: add a5, a0, a5
-; NOREMAT-NEXT: vle32.v v4, (a5)
+; NOREMAT-NEXT: vle32.v v2, (a5)
; NOREMAT-NEXT: vle32.v v22, (a5)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v24
; NOREMAT-NEXT: slli a4, a4, 11
@@ -235,33 +235,33 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: add a4, a0, a4
; NOREMAT-NEXT: vle32.v v30, (a4)
; NOREMAT-NEXT: vle32.v v16, (a4)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v10, v0
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v10, v6
; NOREMAT-NEXT: lui a6, 4
; NOREMAT-NEXT: add a4, a0, a6
-; NOREMAT-NEXT: vle32.v v0, (a4)
+; NOREMAT-NEXT: vle32.v v6, (a4)
; NOREMAT-NEXT: vle32.v v8, (a4)
; NOREMAT-NEXT: csrr a4, vlenb
; NOREMAT-NEXT: slli a4, a4, 1
; NOREMAT-NEXT: add a4, sp, a4
; NOREMAT-NEXT: addi a4, a4, 288
; NOREMAT-NEXT: vs2r.v v8, (a4) # Unknown-size Folded Spill
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v18, v2
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v18, v4
; NOREMAT-NEXT: addiw a4, a6, 512
; NOREMAT-NEXT: sd a4, 72(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: add a4, a0, a4
-; NOREMAT-NEXT: vle32.v v2, (a4)
+; NOREMAT-NEXT: vle32.v v4, (a4)
; NOREMAT-NEXT: vle32.v v18, (a4)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v4
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v2
; NOREMAT-NEXT: slli a4, t0, 10
; NOREMAT-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: add a4, a0, a4
-; NOREMAT-NEXT: vle32.v v4, (a4)
+; NOREMAT-NEXT: vle32.v v2, (a4)
; NOREMAT-NEXT: vle32.v v20, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v22, v24
; NOREMAT-NEXT: addiw a4, a6, 1536
; NOREMAT-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: add a4, a0, a4
-; NOREMAT-NEXT: vle32.v v6, (a4)
+; NOREMAT-NEXT: vle32.v v0, (a4)
; NOREMAT-NEXT: vle32.v v22, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v26
; NOREMAT-NEXT: slli a3, a3, 11
@@ -289,18 +289,18 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: add a3, a0, a3
; NOREMAT-NEXT: vle32.v v10, (a3)
; NOREMAT-NEXT: vle32.v v14, (a3)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v16, v0
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v16, v6
; NOREMAT-NEXT: addiw a3, s1, -512
; NOREMAT-NEXT: sd a3, 24(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: add a3, a0, a3
-; NOREMAT-NEXT: vle32.v v0, (a3)
+; NOREMAT-NEXT: vle32.v v6, (a3)
; NOREMAT-NEXT: vle32.v v16, (a3)
; NOREMAT-NEXT: csrr a3, vlenb
; NOREMAT-NEXT: slli a3, a3, 1
; NOREMAT-NEXT: add a3, sp, a3
; NOREMAT-NEXT: addi a3, a3, 288
; NOREMAT-NEXT: vl2r.v v26, (a3) # Unknown-size Folded Reload
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v26, v2
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v26, v4
; NOREMAT-NEXT: add a3, a0, s1
; NOREMAT-NEXT: vle32.v v26, (a3)
; NOREMAT-NEXT: vle32.v v28, (a3)
@@ -309,27 +309,27 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: add a3, sp, a3
; NOREMAT-NEXT: addi a3, a3, 288
; NOREMAT-NEXT: vs2r.v v28, (a3) # Unknown-size Folded Spill
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v18, v4
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v18, v2
; NOREMAT-NEXT: addiw ra, s1, 512
; NOREMAT-NEXT: add a3, a0, ra
; NOREMAT-NEXT: vle32.v v28, (a3)
; NOREMAT-NEXT: vle32.v v30, (a3)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v6
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v0
; NOREMAT-NEXT: slli s11, s10, 10
; NOREMAT-NEXT: add a3, a0, s11
-; NOREMAT-NEXT: vle32.v v2, (a3)
+; NOREMAT-NEXT: vle32.v v4, (a3)
; NOREMAT-NEXT: vle32.v v18, (a3)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v22, v12
; NOREMAT-NEXT: addiw s10, s1, 1536
; NOREMAT-NEXT: add a3, a0, s10
-; NOREMAT-NEXT: vle32.v v4, (a3)
+; NOREMAT-NEXT: vle32.v v2, (a3)
; NOREMAT-NEXT: vle32.v v20, (a3)
; NOREMAT-NEXT: addi a3, sp, 288
; NOREMAT-NEXT: vl2r.v v12, (a3) # Unknown-size Folded Reload
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v8
; NOREMAT-NEXT: slli s9, s8, 11
; NOREMAT-NEXT: add a3, a0, s9
-; NOREMAT-NEXT: vle32.v v6, (a3)
+; NOREMAT-NEXT: vle32.v v0, (a3)
; NOREMAT-NEXT: vle32.v v12, (a3)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v24, v10
; NOREMAT-NEXT: lui t0, 6
@@ -337,7 +337,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: add a3, a0, s8
; NOREMAT-NEXT: vle32.v v8, (a3)
; NOREMAT-NEXT: vle32.v v22, (a3)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v14, v0
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v14, v6
; NOREMAT-NEXT: slli s7, s6, 10
; NOREMAT-NEXT: add a3, a0, s7
; NOREMAT-NEXT: vle32.v v10, (a3)
@@ -345,7 +345,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: sf.vc.vv 3, 0, v16, v26
; NOREMAT-NEXT: addiw s6, t0, -512
; NOREMAT-NEXT: add a3, a0, s6
-; NOREMAT-NEXT: vle32.v v0, (a3)
+; NOREMAT-NEXT: vle32.v v6, (a3)
; NOREMAT-NEXT: vle32.v v16, (a3)
; NOREMAT-NEXT: csrr a3, vlenb
; NOREMAT-NEXT: slli a3, a3, 2
@@ -361,7 +361,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: add a3, sp, a3
; NOREMAT-NEXT: addi a3, a3, 288
; NOREMAT-NEXT: vs2r.v v26, (a3) # Unknown-size Folded Spill
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v2
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v4
; NOREMAT-NEXT: addiw s5, t0, 512
; NOREMAT-NEXT: add a3, a0, s5
; NOREMAT-NEXT: vle32.v v26, (a3)
@@ -371,12 +371,12 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: add a3, sp, a3
; NOREMAT-NEXT: addi a3, a3, 288
; NOREMAT-NEXT: vs2r.v v28, (a3) # Unknown-size Folded Spill
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v18, v4
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v18, v2
; NOREMAT-NEXT: slli s4, s3, 10
; NOREMAT-NEXT: add a3, a0, s4
; NOREMAT-NEXT: vle32.v v28, (a3)
; NOREMAT-NEXT: vle32.v v18, (a3)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v6
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v0
; NOREMAT-NEXT: addiw s3, t0, 1536
; NOREMAT-NEXT: add a3, a0, s3
; NOREMAT-NEXT: vle32.v v30, (a3)
@@ -384,23 +384,23 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v8
; NOREMAT-NEXT: slli s2, s2, 11
; NOREMAT-NEXT: add a3, a0, s2
-; NOREMAT-NEXT: vle32.v v2, (a3)
+; NOREMAT-NEXT: vle32.v v4, (a3)
; NOREMAT-NEXT: vle32.v v12, (a3)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v22, v10
; NOREMAT-NEXT: lui a3, 7
; NOREMAT-NEXT: addiw s0, a3, -1536
; NOREMAT-NEXT: add a4, a0, s0
-; NOREMAT-NEXT: vle32.v v4, (a4)
+; NOREMAT-NEXT: vle32.v v2, (a4)
; NOREMAT-NEXT: vle32.v v22, (a4)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v14, v0
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v14, v6
; NOREMAT-NEXT: slli t6, t5, 10
; NOREMAT-NEXT: add a4, a0, t6
-; NOREMAT-NEXT: vle32.v v6, (a4)
+; NOREMAT-NEXT: vle32.v v0, (a4)
; NOREMAT-NEXT: vle32.v v14, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v16, v24
; NOREMAT-NEXT: addiw t5, a3, -512
; NOREMAT-NEXT: add a4, a0, t5
-; NOREMAT-NEXT: vle32.v v0, (a4)
+; NOREMAT-NEXT: vle32.v v6, (a4)
; NOREMAT-NEXT: vle32.v v16, (a4)
; NOREMAT-NEXT: csrr a4, vlenb
; NOREMAT-NEXT: slli a4, a4, 2
@@ -426,42 +426,42 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: add a4, a0, t3
; NOREMAT-NEXT: vle32.v v18, (a4)
; NOREMAT-NEXT: vle32.v v28, (a4)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v2
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v4
; NOREMAT-NEXT: addiw t2, a3, 1536
; NOREMAT-NEXT: add a4, a0, t2
; NOREMAT-NEXT: vle32.v v20, (a4)
; NOREMAT-NEXT: vle32.v v30, (a4)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v4
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v2
; NOREMAT-NEXT: slli t1, a2, 11
; NOREMAT-NEXT: add a2, a0, t1
; NOREMAT-NEXT: vle32.v v12, (a2)
-; NOREMAT-NEXT: vle32.v v2, (a2)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v22, v6
+; NOREMAT-NEXT: vle32.v v4, (a2)
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v22, v0
; NOREMAT-NEXT: lui a2, 8
; NOREMAT-NEXT: addiw a7, a2, -1536
; NOREMAT-NEXT: add a4, a0, a7
; NOREMAT-NEXT: vle32.v v22, (a4)
-; NOREMAT-NEXT: vle32.v v4, (a4)
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v14, v0
+; NOREMAT-NEXT: vle32.v v2, (a4)
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v14, v6
; NOREMAT-NEXT: slli a6, a5, 10
; NOREMAT-NEXT: add a4, a0, a6
; NOREMAT-NEXT: vle32.v v14, (a4)
-; NOREMAT-NEXT: vle32.v v0, (a4)
+; NOREMAT-NEXT: vle32.v v6, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v16, v26
; NOREMAT-NEXT: addiw a5, a2, -512
; NOREMAT-NEXT: add a4, a0, a5
; NOREMAT-NEXT: vle32.v v16, (a4)
; NOREMAT-NEXT: vle32.v v26, (a4)
; NOREMAT-NEXT: add a0, a0, a2
-; NOREMAT-NEXT: vle32.v v6, (a0)
+; NOREMAT-NEXT: vle32.v v0, (a0)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v8, v10
; NOREMAT-NEXT: sf.vc.vv 3, 0, v24, v18
; NOREMAT-NEXT: sf.vc.vv 3, 0, v28, v20
; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v12
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v2, v22
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v4, v14
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v0, v16
-; NOREMAT-NEXT: sf.vc.vv 3, 0, v26, v6
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v4, v22
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v2, v14
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v6, v16
+; NOREMAT-NEXT: sf.vc.vv 3, 0, v26, v0
; NOREMAT-NEXT: sf.vc.v.i 2, 0, v8, 0
; NOREMAT-NEXT: addi a0, a1, 1024
; NOREMAT-NEXT: vse32.v v8, (a0)
@@ -952,17 +952,17 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; REMAT-NEXT: li a2, 13
; REMAT-NEXT: slli a2, a2, 10
; REMAT-NEXT: add a2, a0, a2
-; REMAT-NEXT: vle32.v v0, (a2)
+; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
; REMAT-NEXT: li a2, 27
; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
-; REMAT-NEXT: vle32.v v2, (a2)
+; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v20, (a2)
; REMAT-NEXT: li a2, 7
; REMAT-NEXT: slli a2, a2, 11
; REMAT-NEXT: add a2, a0, a2
-; REMAT-NEXT: vle32.v v4, (a2)
+; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v22, (a2)
; REMAT-NEXT: li a2, 29
; REMAT-NEXT: slli a2, a2, 9
@@ -986,23 +986,23 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v16, v0
+; REMAT-NEXT: sf.vc.vv 3, 0, v16, v6
; REMAT-NEXT: lui a2, 4
; REMAT-NEXT: addiw a2, a2, 512
; REMAT-NEXT: add a2, a0, a2
-; REMAT-NEXT: vle32.v v0, (a2)
+; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v18, v2
+; REMAT-NEXT: sf.vc.vv 3, 0, v18, v4
; REMAT-NEXT: li a2, 17
; REMAT-NEXT: slli a2, a2, 10
; REMAT-NEXT: add a2, a0, a2
-; REMAT-NEXT: vle32.v v2, (a2)
+; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v20, v4
+; REMAT-NEXT: sf.vc.vv 3, 0, v20, v2
; REMAT-NEXT: lui a2, 4
; REMAT-NEXT: addiw a2, a2, 1536
; REMAT-NEXT: add a2, a0, a2
-; REMAT-NEXT: vle32.v v4, (a2)
+; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v20, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v22, v24
; REMAT-NEXT: li a2, 9
@@ -1028,22 +1028,22 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; REMAT-NEXT: add a2, a0, ra
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v14, v0
+; REMAT-NEXT: sf.vc.vv 3, 0, v14, v6
; REMAT-NEXT: lui s11, 5
; REMAT-NEXT: add a2, a0, s11
-; REMAT-NEXT: vle32.v v0, (a2)
+; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v16, v2
+; REMAT-NEXT: sf.vc.vv 3, 0, v16, v4
; REMAT-NEXT: lui s10, 5
; REMAT-NEXT: addiw s10, s10, 512
; REMAT-NEXT: add a2, a0, s10
-; REMAT-NEXT: vle32.v v2, (a2)
+; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v18, v4
+; REMAT-NEXT: sf.vc.vv 3, 0, v18, v2
; REMAT-NEXT: li s9, 21
; REMAT-NEXT: slli s9, s9, 10
; REMAT-NEXT: add a2, a0, s9
-; REMAT-NEXT: vle32.v v4, (a2)
+; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v20, v24
; REMAT-NEXT: lui s8, 5
@@ -1069,28 +1069,28 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; REMAT-NEXT: add a2, a0, s5
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v10, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v12, v0
+; REMAT-NEXT: sf.vc.vv 3, 0, v12, v6
; REMAT-NEXT: lui s4, 6
; REMAT-NEXT: addiw s4, s4, -512
; REMAT-NEXT: add a2, a0, s4
-; REMAT-NEXT: vle32.v v0, (a2)
+; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v14, v2
+; REMAT-NEXT: sf.vc.vv 3, 0, v14, v4
; REMAT-NEXT: lui s3, 6
; REMAT-NEXT: add a2, a0, s3
-; REMAT-NEXT: vle32.v v2, (a2)
+; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v16, v4
+; REMAT-NEXT: sf.vc.vv 3, 0, v16, v2
; REMAT-NEXT: lui s2, 6
; REMAT-NEXT: addiw s2, s2, 512
; REMAT-NEXT: add a2, a0, s2
-; REMAT-NEXT: vle32.v v4, (a2)
+; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v18, v24
; REMAT-NEXT: li s1, 25
; REMAT-NEXT: slli s1, s1, 10
; REMAT-NEXT: add a2, a0, s1
-; REMAT-NEXT: vle32.v v6, (a2)
+; REMAT-NEXT: vle32.v v0, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v20, v26
; REMAT-NEXT: lui s0, 6
@@ -1110,24 +1110,24 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; REMAT-NEXT: add a2, a0, t5
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v24, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v10, v0
+; REMAT-NEXT: sf.vc.vv 3, 0, v10, v6
; REMAT-NEXT: li t4, 27
; REMAT-NEXT: slli t4, t4, 10
; REMAT-NEXT: add a2, a0, t4
-; REMAT-NEXT: vle32.v v0, (a2)
+; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v10, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v12, v2
+; REMAT-NEXT: sf.vc.vv 3, 0, v12, v4
; REMAT-NEXT: lui t3, 7
; REMAT-NEXT: addiw t3, t3, -512
; REMAT-NEXT: add a2, a0, t3
-; REMAT-NEXT: vle32.v v2, (a2)
+; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v14, v4
+; REMAT-NEXT: sf.vc.vv 3, 0, v14, v2
; REMAT-NEXT: lui t2, 7
; REMAT-NEXT: add a2, a0, t2
-; REMAT-NEXT: vle32.v v4, (a2)
+; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v8, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v16, v6
+; REMAT-NEXT: sf.vc.vv 3, 0, v16, v0
; REMAT-NEXT: lui t1, 7
; REMAT-NEXT: addiw t1, t1, 512
; REMAT-NEXT: add a2, a0, t1
@@ -1151,35 +1151,35 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; REMAT-NEXT: add a2, a0, a6
; REMAT-NEXT: vle32.v v22, (a2)
; REMAT-NEXT: vle32.v v30, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v24, v0
+; REMAT-NEXT: sf.vc.vv 3, 0, v24, v6
; REMAT-NEXT: lui a5, 8
; REMAT-NEXT: addiw a5, a5, -1536
; REMAT-NEXT: add a2, a0, a5
; REMAT-NEXT: vle32.v v24, (a2)
-; REMAT-NEXT: vle32.v v0, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v10, v2
+; REMAT-NEXT: vle32.v v6, (a2)
+; REMAT-NEXT: sf.vc.vv 3, 0, v10, v4
; REMAT-NEXT: li a4, 31
; REMAT-NEXT: slli a4, a4, 10
; REMAT-NEXT: add a2, a0, a4
; REMAT-NEXT: vle32.v v10, (a2)
-; REMAT-NEXT: vle32.v v2, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v12, v4
+; REMAT-NEXT: vle32.v v4, (a2)
+; REMAT-NEXT: sf.vc.vv 3, 0, v12, v2
; REMAT-NEXT: lui a3, 8
; REMAT-NEXT: addiw a3, a3, -512
; REMAT-NEXT: add a2, a0, a3
; REMAT-NEXT: vle32.v v12, (a2)
-; REMAT-NEXT: vle32.v v4, (a2)
+; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: lui a2, 8
; REMAT-NEXT: add a0, a0, a2
-; REMAT-NEXT: vle32.v v6, (a0)
+; REMAT-NEXT: vle32.v v0, (a0)
; REMAT-NEXT: sf.vc.vv 3, 0, v8, v14
; REMAT-NEXT: sf.vc.vv 3, 0, v16, v18
; REMAT-NEXT: sf.vc.vv 3, 0, v26, v20
; REMAT-NEXT: sf.vc.vv 3, 0, v28, v22
; REMAT-NEXT: sf.vc.vv 3, 0, v30, v24
-; REMAT-NEXT: sf.vc.vv 3, 0, v0, v10
-; REMAT-NEXT: sf.vc.vv 3, 0, v2, v12
-; REMAT-NEXT: sf.vc.vv 3, 0, v4, v6
+; REMAT-NEXT: sf.vc.vv 3, 0, v6, v10
+; REMAT-NEXT: sf.vc.vv 3, 0, v4, v12
+; REMAT-NEXT: sf.vc.vv 3, 0, v2, v0
; REMAT-NEXT: sf.vc.v.i 2, 0, v8, 0
; REMAT-NEXT: addi a0, a1, 1024
; REMAT-NEXT: vse32.v v8, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
index 1fe91c721f4d..1d025a2f776f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
@@ -18,15 +18,15 @@ define void @test(ptr %addr) {
; CHECK-NEXT: add a2, a0, a1
; CHECK-NEXT: vl1re64.v v8, (a2)
; CHECK-NEXT: slli a2, a1, 1
-; CHECK-NEXT: vl1re64.v v9, (a0)
-; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: vl1re64.v v9, (a3)
; CHECK-NEXT: vl1re64.v v10, (a0)
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs1r.v v9, (a0)
; CHECK-NEXT: add a2, a0, a2
-; CHECK-NEXT: vs1r.v v10, (a2)
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vs1r.v v8, (a0)
+; CHECK-NEXT: vs1r.v v9, (a2)
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: vs1r.v v8, (a1)
+; CHECK-NEXT: vs1r.v v10, (a0)
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
index a9a680d54d58..64031f8a9359 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
@@ -16,13 +16,13 @@ define <vscale x 1 x double> @test(ptr %addr, i64 %vl) {
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: csrrs a2, vlenb, zero
-; CHECK-NEXT: vl1re64.v v8, (a0)
-; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: vl1re64.v v8, (a3)
; CHECK-NEXT: vl1re64.v v9, (a0)
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs1r.v v8, (a0)
; CHECK-NEXT: add a2, a0, a2
-; CHECK-NEXT: vs1r.v v9, (a2)
+; CHECK-NEXT: vs1r.v v8, (a2)
+; CHECK-NEXT: vs1r.v v9, (a0)
; CHECK-NEXT: vl1re64.v v8, (a2)
; CHECK-NEXT: vl1re64.v v9, (a0)
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index a2d02b6bb641..76aa2b913c65 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -474,7 +474,7 @@ define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_6(<vscale x 12 x half> %in)
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v12, v9, a0
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v12, v10, a0
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
index be0c68f443af..2874db6debd7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
@@ -148,17 +148,17 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: vsetivli zero, 8, e64, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 0
+; CHECK-NEXT: vmv.v.i v7, 0
; CHECK-NEXT: lui a1, 1047552
; CHECK-NEXT: addiw a1, a1, 1
; CHECK-NEXT: slli a1, a1, 23
; CHECK-NEXT: addi a1, a1, 1
; CHECK-NEXT: slli a1, a1, 18
-; CHECK-NEXT: vslide1down.vx v0, v0, a1
+; CHECK-NEXT: vslide1down.vx v0, v7, a1
; CHECK-NEXT: lui a1, 4
-; CHECK-NEXT: vmv.s.x v1, a1
+; CHECK-NEXT: vmv.s.x v7, a1
; CHECK-NEXT: vsetivli zero, 7, e64, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v1, 6
+; CHECK-NEXT: vslideup.vi v0, v7, 6
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: addi sp, s0, -1536
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
index 2c2301bee468..c273dcdfbca1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
@@ -453,7 +453,7 @@ define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v1, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB34_2
; CHECK-NEXT: # %bb.1:
@@ -467,7 +467,7 @@ define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vrsub.vi v24, v16, 0, v0.t
; CHECK-NEXT: vmax.vv v16, v16, v24, v0.t
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
index 08f7e2058ad2..c41624138599 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
@@ -1535,7 +1535,7 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
; RV32-NEXT: sw a1, 44(sp)
@@ -1638,7 +1638,7 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a2
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a2, 40
; RV32-NEXT: mul a0, a0, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
index c49b1a7ad186..b9c611bf3e54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
@@ -1,6 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-vector-bits-max=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-KNOWNVLEN128
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA
+
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA
+
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS %s
define void @extract_v2i8_v4i8_0(ptr %x, ptr %y) {
; CHECK-LABEL: extract_v2i8_v4i8_0:
@@ -63,22 +69,22 @@ define void @extract_v2i8_v8i8_6(ptr %x, ptr %y) {
}
define void @extract_v1i32_v8i32_4(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v1i32_v8i32_4:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vle32.v v8, (a0)
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-V-NEXT: vse32.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v1i32_v8i32_4:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 4
+; VLA-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; VLA-NEXT: vse32.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v1i32_v8i32_4:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vl2re32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v9, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v1i32_v8i32_4:
+; VLS: # %bb.0:
+; VLS-NEXT: vl2re32.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; VLS-NEXT: vse32.v v9, (a1)
+; VLS-NEXT: ret
%a = load <8 x i32>, ptr %x
%c = call <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %a, i64 4)
store <1 x i32> %c, ptr %y
@@ -86,24 +92,24 @@ define void @extract_v1i32_v8i32_4(ptr %x, ptr %y) {
}
define void @extract_v1i32_v8i32_5(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v1i32_v8i32_5:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vle32.v v8, (a0)
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 5
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-V-NEXT: vse32.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v1i32_v8i32_5:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 5
+; VLA-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; VLA-NEXT: vse32.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v1i32_v8i32_5:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vl2re32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v9, 1
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v8, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v1i32_v8i32_5:
+; VLS: # %bb.0:
+; VLS-NEXT: vl2re32.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v9, 1
+; VLS-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; VLS-NEXT: vse32.v v8, (a1)
+; VLS-NEXT: ret
%a = load <8 x i32>, ptr %x
%c = call <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %a, i64 5)
store <1 x i32> %c, ptr %y
@@ -111,20 +117,20 @@ define void @extract_v1i32_v8i32_5(ptr %x, ptr %y) {
}
define void @extract_v2i32_v8i32_0(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i32_v8i32_0:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vle32.v v8, (a0)
-; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-V-NEXT: vse32.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i32_v8i32_0:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vse32.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_v8i32_0:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vl2re32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v8, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i32_v8i32_0:
+; VLS: # %bb.0:
+; VLS-NEXT: vl2re32.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vse32.v v8, (a1)
+; VLS-NEXT: ret
%a = load <8 x i32>, ptr %x
%c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 0)
store <2 x i32> %c, ptr %y
@@ -132,24 +138,24 @@ define void @extract_v2i32_v8i32_0(ptr %x, ptr %y) {
}
define void @extract_v2i32_v8i32_2(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i32_v8i32_2:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vle32.v v8, (a0)
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-V-NEXT: vse32.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i32_v8i32_2:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 2
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vse32.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_v8i32_2:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vl2re32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v8, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i32_v8i32_2:
+; VLS: # %bb.0:
+; VLS-NEXT: vl2re32.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v8, 2
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vse32.v v8, (a1)
+; VLS-NEXT: ret
%a = load <8 x i32>, ptr %x
%c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 2)
store <2 x i32> %c, ptr %y
@@ -157,22 +163,22 @@ define void @extract_v2i32_v8i32_2(ptr %x, ptr %y) {
}
define void @extract_v2i32_v8i32_4(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i32_v8i32_4:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vle32.v v8, (a0)
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-V-NEXT: vse32.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i32_v8i32_4:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 2, e32, m2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 4
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vse32.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_v8i32_4:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vl2re32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v9, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i32_v8i32_4:
+; VLS: # %bb.0:
+; VLS-NEXT: vl2re32.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vse32.v v9, (a1)
+; VLS-NEXT: ret
%a = load <8 x i32>, ptr %x
%c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 4)
store <2 x i32> %c, ptr %y
@@ -180,24 +186,24 @@ define void @extract_v2i32_v8i32_4(ptr %x, ptr %y) {
}
define void @extract_v2i32_v8i32_6(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i32_v8i32_6:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vle32.v v8, (a0)
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 6
-; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-V-NEXT: vse32.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i32_v8i32_6:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 2, e32, m2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 6
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vse32.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_v8i32_6:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vl2re32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v9, 2
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v8, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i32_v8i32_6:
+; VLS: # %bb.0:
+; VLS-NEXT: vl2re32.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v9, 2
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vse32.v v8, (a1)
+; VLS-NEXT: ret
%a = load <8 x i32>, ptr %x
%c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 6)
store <2 x i32> %c, ptr %y
@@ -230,59 +236,59 @@ define void @extract_v2i32_nxv16i32_2(<vscale x 16 x i32> %x, ptr %y) {
}
define void @extract_v2i32_nxv16i32_4(<vscale x 16 x i32> %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i32_nxv16i32_4:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-V-NEXT: vse32.v v8, (a0)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i32_nxv16i32_4:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e32, m2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 4
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vse32.v v8, (a0)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_nxv16i32_4:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v9, (a0)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i32_nxv16i32_4:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vse32.v v9, (a0)
+; VLS-NEXT: ret
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 4)
store <2 x i32> %c, ptr %y
ret void
}
define void @extract_v2i32_nxv16i32_6(<vscale x 16 x i32> %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i32_nxv16i32_6:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 6
-; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-V-NEXT: vse32.v v8, (a0)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i32_nxv16i32_6:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e32, m2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 6
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vse32.v v8, (a0)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_nxv16i32_6:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v9, 2
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i32_nxv16i32_6:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v9, 2
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vse32.v v8, (a0)
+; VLS-NEXT: ret
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 6)
store <2 x i32> %c, ptr %y
ret void
}
define void @extract_v2i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i32_nxv16i32_8:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m4, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 8
-; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-V-NEXT: vse32.v v8, (a0)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i32_nxv16i32_8:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e32, m4, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 8
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vse32.v v8, (a0)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i32_nxv16i32_8:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vse32.v v10, (a0)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i32_nxv16i32_8:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vse32.v v10, (a0)
+; VLS-NEXT: ret
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
store <2 x i32> %c, ptr %y
ret void
@@ -339,40 +345,40 @@ define void @extract_v2i8_nxv2i8_6(<vscale x 2 x i8> %x, ptr %y) {
}
define void @extract_v8i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
-; CHECK-V-LABEL: extract_v8i32_nxv16i32_8:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 8
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vse32.v v8, (a0)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v8i32_nxv16i32_8:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m4, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 8
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vse32.v v8, (a0)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v8i32_nxv16i32_8:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vs2r.v v10, (a0)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v8i32_nxv16i32_8:
+; VLS: # %bb.0:
+; VLS-NEXT: vs2r.v v10, (a0)
+; VLS-NEXT: ret
%c = call <8 x i32> @llvm.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
store <8 x i32> %c, ptr %y
ret void
}
define void @extract_v8i1_v64i1_0(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v8i1_v64i1_0:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: li a2, 64
-; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
-; CHECK-V-NEXT: vlm.v v8, (a0)
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vsm.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v8i1_v64i1_0:
+; VLA: # %bb.0:
+; VLA-NEXT: li a2, 64
+; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; VLA-NEXT: vlm.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vsm.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v8i1_v64i1_0:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vlm.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v8i1_v64i1_0:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
+; VLS-NEXT: vlm.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vsm.v v8, (a1)
+; VLS-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 0)
store <8 x i1> %c, ptr %y
@@ -380,26 +386,26 @@ define void @extract_v8i1_v64i1_0(ptr %x, ptr %y) {
}
define void @extract_v8i1_v64i1_8(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v8i1_v64i1_8:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: li a2, 64
-; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
-; CHECK-V-NEXT: vlm.v v8, (a0)
-; CHECK-V-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vsm.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v8i1_v64i1_8:
+; VLA: # %bb.0:
+; VLA-NEXT: li a2, 64
+; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; VLA-NEXT: vlm.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 1
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vsm.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v8i1_v64i1_8:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vlm.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v8i1_v64i1_8:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
+; VLS-NEXT: vlm.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v8, 1
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vsm.v v8, (a1)
+; VLS-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 8)
store <8 x i1> %c, ptr %y
@@ -407,26 +413,26 @@ define void @extract_v8i1_v64i1_8(ptr %x, ptr %y) {
}
define void @extract_v8i1_v64i1_48(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v8i1_v64i1_48:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: li a2, 64
-; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
-; CHECK-V-NEXT: vlm.v v8, (a0)
-; CHECK-V-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 6
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vsm.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v8i1_v64i1_48:
+; VLA: # %bb.0:
+; VLA-NEXT: li a2, 64
+; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; VLA-NEXT: vlm.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 6
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vsm.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v8i1_v64i1_48:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vlm.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 6
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v8i1_v64i1_48:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
+; VLS-NEXT: vlm.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v8, 6
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vsm.v v8, (a1)
+; VLS-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 48)
store <8 x i1> %c, ptr %y
@@ -508,38 +514,38 @@ define void @extract_v8i1_nxv64i1_192(<vscale x 64 x i1> %x, ptr %y) {
}
define void @extract_v2i1_v64i1_0(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i1_v64i1_0:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: li a2, 64
-; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
-; CHECK-V-NEXT: vlm.v v0, (a0)
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmv.v.i v9, 0
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-V-NEXT: vmv.v.v v9, v8
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmsne.vi v8, v9, 0
-; CHECK-V-NEXT: vsm.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i1_v64i1_0:
+; VLA: # %bb.0:
+; VLA-NEXT: li a2, 64
+; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; VLA-NEXT: vlm.v v0, (a0)
+; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmv.v.i v9, 0
+; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmsne.vi v8, v9, 0
+; VLA-NEXT: vsm.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_v64i1_0:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vlm.v v0, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i1_v64i1_0:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
+; VLS-NEXT: vlm.v v0, (a0)
+; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmv.v.i v9, 0
+; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLS-NEXT: vmv.v.v v9, v8
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmsne.vi v8, v9, 0
+; VLS-NEXT: vsm.v v8, (a1)
+; VLS-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 0)
store <2 x i1> %c, ptr %y
@@ -547,48 +553,48 @@ define void @extract_v2i1_v64i1_0(ptr %x, ptr %y) {
}
define void @extract_v2i1_v64i1_2(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i1_v64i1_2:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: li a2, 64
-; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
-; CHECK-V-NEXT: vlm.v v0, (a0)
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: vsetivli zero, 2, e8, m1, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-V-NEXT: vmsne.vi v0, v8, 0
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmv.v.i v9, 0
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-V-NEXT: vmv.v.v v9, v8
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmsne.vi v8, v9, 0
-; CHECK-V-NEXT: vsm.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i1_v64i1_2:
+; VLA: # %bb.0:
+; VLA-NEXT: li a2, 64
+; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; VLA-NEXT: vlm.v v0, (a0)
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: vsetivli zero, 2, e8, m1, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 2
+; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLA-NEXT: vmsne.vi v0, v8, 0
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmv.v.i v9, 0
+; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmsne.vi v8, v9, 0
+; VLA-NEXT: vsm.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_v64i1_2:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vlm.v v0, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, m1, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v0, v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i1_v64i1_2:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
+; VLS-NEXT: vlm.v v0, (a0)
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 2, e8, m1, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v8, 2
+; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLS-NEXT: vmsne.vi v0, v8, 0
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmv.v.i v9, 0
+; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLS-NEXT: vmv.v.v v9, v8
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmsne.vi v8, v9, 0
+; VLS-NEXT: vsm.v v8, (a1)
+; VLS-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 2)
store <2 x i1> %c, ptr %y
@@ -596,49 +602,49 @@ define void @extract_v2i1_v64i1_2(ptr %x, ptr %y) {
}
define void @extract_v2i1_v64i1_42(ptr %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i1_v64i1_42:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: li a2, 64
-; CHECK-V-NEXT: vsetvli zero, a2, e8, m4, ta, ma
-; CHECK-V-NEXT: vlm.v v0, (a0)
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: li a0, 42
-; CHECK-V-NEXT: vsetivli zero, 2, e8, m4, ta, ma
-; CHECK-V-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-V-NEXT: vmsne.vi v0, v8, 0
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmv.v.i v9, 0
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-V-NEXT: vmv.v.v v9, v8
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmsne.vi v8, v9, 0
-; CHECK-V-NEXT: vsm.v v8, (a1)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i1_v64i1_42:
+; VLA: # %bb.0:
+; VLA-NEXT: li a2, 64
+; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; VLA-NEXT: vlm.v v0, (a0)
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: li a0, 42
+; VLA-NEXT: vsetivli zero, 2, e8, m4, ta, ma
+; VLA-NEXT: vslidedown.vx v8, v8, a0
+; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLA-NEXT: vmsne.vi v0, v8, 0
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmv.v.i v9, 0
+; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmsne.vi v8, v9, 0
+; VLA-NEXT: vsm.v v8, (a1)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_v64i1_42:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetvli a2, zero, e8, m4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vlm.v v0, (a0)
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, m1, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v10, 10
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v0, v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a1)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i1_v64i1_42:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
+; VLS-NEXT: vlm.v v0, (a0)
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 2, e8, m1, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v10, 10
+; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLS-NEXT: vmsne.vi v0, v8, 0
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmv.v.i v9, 0
+; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLS-NEXT: vmv.v.v v9, v8
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmsne.vi v8, v9, 0
+; VLS-NEXT: vsm.v v8, (a1)
+; VLS-NEXT: ret
%a = load <64 x i1>, ptr %x
%c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 42)
store <2 x i1> %c, ptr %y
@@ -665,45 +671,45 @@ define void @extract_v2i1_nxv2i1_0(<vscale x 2 x i1> %x, ptr %y) {
}
define void @extract_v2i1_nxv2i1_2(<vscale x 2 x i1> %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i1_nxv2i1_2:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-V-NEXT: vmsne.vi v0, v8, 0
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmv.v.i v9, 0
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-V-NEXT: vmv.v.v v9, v8
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmsne.vi v8, v9, 0
-; CHECK-V-NEXT: vsm.v v8, (a0)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i1_nxv2i1_2:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 2
+; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLA-NEXT: vmsne.vi v0, v8, 0
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmv.v.i v9, 0
+; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmsne.vi v8, v9, 0
+; VLA-NEXT: vsm.v v8, (a0)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_nxv2i1_2:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v0, v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i1_nxv2i1_2:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v8, 2
+; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLS-NEXT: vmsne.vi v0, v8, 0
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmv.v.i v9, 0
+; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLS-NEXT: vmv.v.v v9, v8
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmsne.vi v8, v9, 0
+; VLS-NEXT: vsm.v v8, (a0)
+; VLS-NEXT: ret
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %x, i64 2)
store <2 x i1> %c, ptr %y
ret void
@@ -754,91 +760,91 @@ define void @extract_v2i1_nxv64i1_2(<vscale x 64 x i1> %x, ptr %y) {
}
define void @extract_v2i1_nxv64i1_42(<vscale x 64 x i1> %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i1_nxv64i1_42:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetvli a1, zero, e8, m8, ta, ma
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: li a1, 42
-; CHECK-V-NEXT: vsetivli zero, 2, e8, m4, ta, ma
-; CHECK-V-NEXT: vslidedown.vx v8, v8, a1
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-V-NEXT: vmsne.vi v0, v8, 0
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmv.v.i v9, 0
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-V-NEXT: vmv.v.v v9, v8
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmsne.vi v8, v9, 0
-; CHECK-V-NEXT: vsm.v v8, (a0)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i1_nxv64i1_42:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetvli a1, zero, e8, m8, ta, ma
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: li a1, 42
+; VLA-NEXT: vsetivli zero, 2, e8, m4, ta, ma
+; VLA-NEXT: vslidedown.vx v8, v8, a1
+; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLA-NEXT: vmsne.vi v0, v8, 0
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmv.v.i v9, 0
+; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmsne.vi v8, v9, 0
+; VLA-NEXT: vsm.v v8, (a0)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_nxv64i1_42:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetvli a1, zero, e8, m8, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, m1, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v10, 10
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v0, v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i1_nxv64i1_42:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetvli a1, zero, e8, m8, ta, ma
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 2, e8, m1, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v10, 10
+; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLS-NEXT: vmsne.vi v0, v8, 0
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmv.v.i v9, 0
+; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLS-NEXT: vmv.v.v v9, v8
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmsne.vi v8, v9, 0
+; VLS-NEXT: vsm.v v8, (a0)
+; VLS-NEXT: ret
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 42)
store <2 x i1> %c, ptr %y
ret void
}
define void @extract_v2i1_nxv32i1_26(<vscale x 32 x i1> %x, ptr %y) {
-; CHECK-V-LABEL: extract_v2i1_nxv32i1_26:
-; CHECK-V: # %bb.0:
-; CHECK-V-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: vsetivli zero, 2, e8, m2, ta, ma
-; CHECK-V-NEXT: vslidedown.vi v8, v8, 26
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-V-NEXT: vmsne.vi v0, v8, 0
-; CHECK-V-NEXT: vmv.v.i v8, 0
-; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmv.v.i v9, 0
-; CHECK-V-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-V-NEXT: vmv.v.v v9, v8
-; CHECK-V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-V-NEXT: vmsne.vi v8, v9, 0
-; CHECK-V-NEXT: vsm.v v8, (a0)
-; CHECK-V-NEXT: ret
+; VLA-LABEL: extract_v2i1_nxv32i1_26:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: vsetivli zero, 2, e8, m2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 26
+; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLA-NEXT: vmsne.vi v0, v8, 0
+; VLA-NEXT: vmv.v.i v8, 0
+; VLA-NEXT: vmerge.vim v8, v8, 1, v0
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmv.v.i v9, 0
+; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vmsne.vi v8, v9, 0
+; VLA-NEXT: vsm.v v8, (a0)
+; VLA-NEXT: ret
;
-; CHECK-KNOWNVLEN128-LABEL: extract_v2i1_nxv32i1_26:
-; CHECK-KNOWNVLEN128: # %bb.0:
-; CHECK-KNOWNVLEN128-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, m1, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vslidedown.vi v8, v9, 10
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v0, v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v8, 0
-; CHECK-KNOWNVLEN128-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.i v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-KNOWNVLEN128-NEXT: vmv.v.v v9, v8
-; CHECK-KNOWNVLEN128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-KNOWNVLEN128-NEXT: vmsne.vi v8, v9, 0
-; CHECK-KNOWNVLEN128-NEXT: vsm.v v8, (a0)
-; CHECK-KNOWNVLEN128-NEXT: ret
+; VLS-LABEL: extract_v2i1_nxv32i1_26:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 2, e8, m1, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v9, 10
+; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; VLS-NEXT: vmsne.vi v0, v8, 0
+; VLS-NEXT: vmv.v.i v8, 0
+; VLS-NEXT: vmerge.vim v8, v8, 1, v0
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmv.v.i v9, 0
+; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
+; VLS-NEXT: vmv.v.v v9, v8
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vmsne.vi v8, v9, 0
+; VLS-NEXT: vsm.v v8, (a0)
+; VLS-NEXT: ret
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %x, i64 26)
store <2 x i1> %c, ptr %y
ret void
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
index 4a5ef21efdb9..3b7480117d37 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
@@ -581,18 +581,18 @@ define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
@@ -609,9 +609,9 @@ define <16 x double> @vfmax_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v24
; CHECK-NEXT: ret
@@ -632,7 +632,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vmv1r.v v2, v0
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
@@ -643,7 +643,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v1, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
@@ -663,7 +663,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vmfeq.vv v26, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a0, vlenb
@@ -674,11 +674,11 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vmfeq.vv v26, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
@@ -691,7 +691,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -708,11 +708,11 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: vmerge.vvm v24, v16, v8, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmax.vv v16, v16, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
@@ -758,7 +758,7 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: .LBB25_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmv8r.v v16, v24
; CHECK-NEXT: vmerge.vvm v24, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
@@ -766,7 +766,7 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -792,9 +792,9 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v1, v8, v8
+; CHECK-NEXT: vmfeq.vv v7, v8, v8
; CHECK-NEXT: vmerge.vvm v24, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: vfmax.vv v16, v16, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll
index d0ba28fc30f4..02c2fafc8978 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll
@@ -266,9 +266,9 @@ define <16 x double> @vfmax_v16f64_vv(<16 x double> %a, <16 x double> %b) nounwi
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
index 8ea08bdedd6d..57275df57a31 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
@@ -581,18 +581,18 @@ define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
@@ -609,9 +609,9 @@ define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v24
; CHECK-NEXT: ret
@@ -632,7 +632,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vmv1r.v v2, v0
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
@@ -643,7 +643,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v1, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
@@ -663,7 +663,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vmfeq.vv v26, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a0, vlenb
@@ -674,11 +674,11 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vmfeq.vv v26, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
@@ -691,7 +691,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -708,11 +708,11 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: vmerge.vvm v24, v16, v8, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v16, v16, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
@@ -758,7 +758,7 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: .LBB25_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmv8r.v v16, v24
; CHECK-NEXT: vmerge.vvm v24, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
@@ -766,7 +766,7 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -792,9 +792,9 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v1, v8, v8
+; CHECK-NEXT: vmfeq.vv v7, v8, v8
; CHECK-NEXT: vmerge.vvm v24, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: vfmin.vv v16, v16, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll
index 10e972963d4e..b15d697f0754 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll
@@ -266,9 +266,9 @@ define <16 x double> @vfmin_v16f64_vv(<16 x double> %a, <16 x double> %b) nounwi
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index efb1f720f2d0..9f0240c53b21 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -1,9 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA,RV32VLA
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA,RV64VLA
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA,RV32VLA
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA,RV64VLA
+
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS,RV32VLS %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS,RV64VLS %s
define <vscale x 8 x i32> @insert_nxv8i32_v2i32_0(<vscale x 8 x i32> %vec, ptr %svp) {
; CHECK-LABEL: insert_nxv8i32_v2i32_0:
@@ -45,26 +48,40 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_6(<vscale x 8 x i32> %vec, ptr %
}
define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, ptr %svp) {
-; CHECK-LABEL: insert_nxv8i32_v8i32_0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, ma
-; CHECK-NEXT: vmv.v.v v8, v12
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_nxv8i32_v8i32_0:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v12, (a0)
+; VLA-NEXT: vsetivli zero, 8, e32, m4, tu, ma
+; VLA-NEXT: vmv.v.v v8, v12
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_nxv8i32_v8i32_0:
+; VLS: # %bb.0:
+; VLS-NEXT: vl2re32.v v12, (a0)
+; VLS-NEXT: vsetivli zero, 8, e32, m4, tu, ma
+; VLS-NEXT: vmv.v.v v8, v12
+; VLS-NEXT: ret
%sv = load <8 x i32>, ptr %svp
%v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 0)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @insert_nxv8i32_v8i32_8(<vscale x 8 x i32> %vec, ptr %svp) {
-; CHECK-LABEL: insert_nxv8i32_v8i32_8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v12, 8
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_nxv8i32_v8i32_8:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v12, (a0)
+; VLA-NEXT: vsetivli zero, 16, e32, m4, tu, ma
+; VLA-NEXT: vslideup.vi v8, v12, 8
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_nxv8i32_v8i32_8:
+; VLS: # %bb.0:
+; VLS-NEXT: vl2re32.v v12, (a0)
+; VLS-NEXT: vsetivli zero, 16, e32, m4, tu, ma
+; VLS-NEXT: vslideup.vi v8, v12, 8
+; VLS-NEXT: ret
%sv = load <8 x i32>, ptr %svp
%v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 8)
ret <vscale x 8 x i32> %v
@@ -82,17 +99,27 @@ define <vscale x 8 x i32> @insert_nxv8i32_undef_v2i32_0(ptr %svp) {
}
define void @insert_v4i32_v2i32_0(ptr %vp, ptr %svp) {
-; CHECK-LABEL: insert_v4i32_v2i32_0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a1)
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; CHECK-NEXT: vmv.v.v v9, v8
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v9, (a0)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v4i32_v2i32_0:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vle32.v v8, (a1)
+; VLA-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; VLA-NEXT: vle32.v v9, (a0)
+; VLA-NEXT: vsetivli zero, 2, e32, m1, tu, ma
+; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; VLA-NEXT: vse32.v v9, (a0)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v4i32_v2i32_0:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vle32.v v8, (a1)
+; VLS-NEXT: vl1re32.v v9, (a0)
+; VLS-NEXT: vsetivli zero, 2, e32, m1, tu, ma
+; VLS-NEXT: vmv.v.v v9, v8
+; VLS-NEXT: vs1r.v v9, (a0)
+; VLS-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%vec = load <4 x i32>, ptr %vp
%v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 0)
@@ -101,15 +128,25 @@ define void @insert_v4i32_v2i32_0(ptr %vp, ptr %svp) {
}
define void @insert_v4i32_v2i32_2(ptr %vp, ptr %svp) {
-; CHECK-LABEL: insert_v4i32_v2i32_2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a1)
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vslideup.vi v9, v8, 2
-; CHECK-NEXT: vse32.v v9, (a0)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v4i32_v2i32_2:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vle32.v v8, (a1)
+; VLA-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; VLA-NEXT: vle32.v v9, (a0)
+; VLA-NEXT: vslideup.vi v9, v8, 2
+; VLA-NEXT: vse32.v v9, (a0)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v4i32_v2i32_2:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vle32.v v8, (a1)
+; VLS-NEXT: vl1re32.v v9, (a0)
+; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; VLS-NEXT: vslideup.vi v9, v8, 2
+; VLS-NEXT: vs1r.v v9, (a0)
+; VLS-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%vec = load <4 x i32>, ptr %vp
%v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 2)
@@ -118,13 +155,20 @@ define void @insert_v4i32_v2i32_2(ptr %vp, ptr %svp) {
}
define void @insert_v4i32_undef_v2i32_0(ptr %vp, ptr %svp) {
-; CHECK-LABEL: insert_v4i32_undef_v2i32_0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a1)
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v8, (a0)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v4i32_undef_v2i32_0:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vle32.v v8, (a1)
+; VLA-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; VLA-NEXT: vse32.v v8, (a0)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v4i32_undef_v2i32_0:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vle32.v v8, (a1)
+; VLS-NEXT: vs1r.v v8, (a0)
+; VLS-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> undef, <2 x i32> %sv, i64 0)
store <4 x i32> %v, ptr %vp
@@ -132,17 +176,27 @@ define void @insert_v4i32_undef_v2i32_0(ptr %vp, ptr %svp) {
}
define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) {
-; CHECK-LABEL: insert_v8i32_v2i32_0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a1)
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m2, tu, ma
-; CHECK-NEXT: vmv.v.v v10, v8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v8i32_v2i32_0:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vle32.v v8, (a1)
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v10, (a0)
+; VLA-NEXT: vsetivli zero, 2, e32, m2, tu, ma
+; VLA-NEXT: vmv.v.v v10, v8
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vse32.v v10, (a0)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v8i32_v2i32_0:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vle32.v v8, (a1)
+; VLS-NEXT: vl2re32.v v10, (a0)
+; VLS-NEXT: vsetivli zero, 2, e32, m2, tu, ma
+; VLS-NEXT: vmv.v.v v10, v8
+; VLS-NEXT: vs2r.v v10, (a0)
+; VLS-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%vec = load <8 x i32>, ptr %vp
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 0)
@@ -151,17 +205,27 @@ define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) {
}
define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
-; CHECK-LABEL: insert_v8i32_v2i32_2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a1)
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 2
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v8i32_v2i32_2:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vle32.v v8, (a1)
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v10, (a0)
+; VLA-NEXT: vsetivli zero, 4, e32, m2, tu, ma
+; VLA-NEXT: vslideup.vi v10, v8, 2
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vse32.v v10, (a0)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v8i32_v2i32_2:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vl2re32.v v8, (a0)
+; VLS-NEXT: vle32.v v10, (a1)
+; VLS-NEXT: vsetivli zero, 4, e32, m2, tu, ma
+; VLS-NEXT: vslideup.vi v8, v10, 2
+; VLS-NEXT: vs2r.v v8, (a0)
+; VLS-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%vec = load <8 x i32>, ptr %vp
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 2)
@@ -170,15 +234,25 @@ define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
}
define void @insert_v8i32_v2i32_6(ptr %vp, ptr %svp) {
-; CHECK-LABEL: insert_v8i32_v2i32_6:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a1)
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vslideup.vi v10, v8, 6
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v8i32_v2i32_6:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vle32.v v8, (a1)
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v10, (a0)
+; VLA-NEXT: vslideup.vi v10, v8, 6
+; VLA-NEXT: vse32.v v10, (a0)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v8i32_v2i32_6:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vl2re32.v v8, (a0)
+; VLS-NEXT: vle32.v v10, (a1)
+; VLS-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLS-NEXT: vslideup.vi v8, v10, 6
+; VLS-NEXT: vs2r.v v8, (a0)
+; VLS-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%vec = load <8 x i32>, ptr %vp
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 6)
@@ -187,14 +261,23 @@ define void @insert_v8i32_v2i32_6(ptr %vp, ptr %svp) {
}
define void @insert_v8i32_undef_v2i32_6(ptr %vp, ptr %svp) {
-; CHECK-LABEL: insert_v8i32_undef_v2i32_6:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a1)
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
-; CHECK-NEXT: vse32.v v10, (a0)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v8i32_undef_v2i32_6:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vle32.v v8, (a1)
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vslideup.vi v10, v8, 6
+; VLA-NEXT: vse32.v v10, (a0)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v8i32_undef_v2i32_6:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLS-NEXT: vle32.v v8, (a1)
+; VLS-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLS-NEXT: vslideup.vi v10, v8, 6
+; VLS-NEXT: vs2r.v v10, (a0)
+; VLS-NEXT: ret
%sv = load <2 x i32>, ptr %svp
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> undef, <2 x i32> %sv, i64 6)
store <8 x i32> %v, ptr %vp
@@ -239,18 +322,30 @@ define void @insert_v4i16_v2i16_2(ptr %vp, ptr %svp) {
}
define void @insert_v32i1_v8i1_0(ptr %vp, ptr %svp) {
-; CHECK-LABEL: insert_v32i1_v8i1_0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a2, 32
-; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; CHECK-NEXT: vlm.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vlm.v v9, (a1)
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, tu, ma
-; CHECK-NEXT: vmv.v.v v8, v9
-; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; CHECK-NEXT: vsm.v v8, (a0)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v32i1_v8i1_0:
+; VLA: # %bb.0:
+; VLA-NEXT: li a2, 32
+; VLA-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; VLA-NEXT: vlm.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vlm.v v9, (a1)
+; VLA-NEXT: vsetivli zero, 1, e8, mf4, tu, ma
+; VLA-NEXT: vmv.v.v v8, v9
+; VLA-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; VLA-NEXT: vsm.v v8, (a0)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v32i1_v8i1_0:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetvli a2, zero, e8, m2, ta, ma
+; VLS-NEXT: vlm.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vlm.v v9, (a1)
+; VLS-NEXT: vsetivli zero, 1, e8, mf4, tu, ma
+; VLS-NEXT: vmv.v.v v8, v9
+; VLS-NEXT: vsetvli a1, zero, e8, m2, ta, ma
+; VLS-NEXT: vsm.v v8, (a0)
+; VLS-NEXT: ret
%v = load <32 x i1>, ptr %vp
%sv = load <8 x i1>, ptr %svp
%c = call <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 0)
@@ -259,18 +354,30 @@ define void @insert_v32i1_v8i1_0(ptr %vp, ptr %svp) {
}
define void @insert_v32i1_v8i1_16(ptr %vp, ptr %svp) {
-; CHECK-LABEL: insert_v32i1_v8i1_16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a2, 32
-; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; CHECK-NEXT: vlm.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vlm.v v9, (a1)
-; CHECK-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
-; CHECK-NEXT: vsm.v v8, (a0)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v32i1_v8i1_16:
+; VLA: # %bb.0:
+; VLA-NEXT: li a2, 32
+; VLA-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; VLA-NEXT: vlm.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vlm.v v9, (a1)
+; VLA-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
+; VLA-NEXT: vslideup.vi v8, v9, 2
+; VLA-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; VLA-NEXT: vsm.v v8, (a0)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v32i1_v8i1_16:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetvli a2, zero, e8, m2, ta, ma
+; VLS-NEXT: vlm.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vlm.v v9, (a1)
+; VLS-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
+; VLS-NEXT: vslideup.vi v8, v9, 2
+; VLS-NEXT: vsetvli a1, zero, e8, m2, ta, ma
+; VLS-NEXT: vsm.v v8, (a0)
+; VLS-NEXT: ret
%v = load <32 x i1>, ptr %vp
%sv = load <8 x i1>, ptr %svp
%c = call <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 16)
@@ -358,22 +465,36 @@ define <vscale x 2 x i16> @insert_nxv2i16_v2i16_2(<vscale x 2 x i16> %v, ptr %sv
}
define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, ptr %svp) {
-; CHECK-LABEL: insert_nxv2i1_v4i1_0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; CHECK-NEXT: vlm.v v8, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
-; CHECK-NEXT: vmv.v.v v9, v8
-; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v9, 0
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_nxv2i1_v4i1_0:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; VLA-NEXT: vlm.v v8, (a0)
+; VLA-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; VLA-NEXT: vmv.v.i v9, 0
+; VLA-NEXT: vmerge.vim v9, v9, 1, v0
+; VLA-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; VLA-NEXT: vmv.v.i v10, 0
+; VLA-NEXT: vmv1r.v v0, v8
+; VLA-NEXT: vmerge.vim v8, v10, 1, v0
+; VLA-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
+; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; VLA-NEXT: vmsne.vi v0, v9, 0
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_nxv2i1_v4i1_0:
+; VLS: # %bb.0:
+; VLS-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; VLS-NEXT: vlm.v v8, (a0)
+; VLS-NEXT: vmv.v.i v9, 0
+; VLS-NEXT: vmerge.vim v10, v9, 1, v0
+; VLS-NEXT: vmv1r.v v0, v8
+; VLS-NEXT: vmerge.vim v8, v9, 1, v0
+; VLS-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
+; VLS-NEXT: vmv.v.v v10, v8
+; VLS-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; VLS-NEXT: vmsne.vi v0, v10, 0
+; VLS-NEXT: ret
%sv = load <4 x i1>, ptr %svp
%c = call <vscale x 2 x i1> @llvm.vector.insert.v4i1.nxv2i1(<vscale x 2 x i1> %v, <4 x i1> %sv, i64 0)
ret <vscale x 2 x i1> %c
@@ -408,15 +529,24 @@ define <vscale x 8 x i1> @insert_nxv8i1_v8i1_16(<vscale x 8 x i1> %v, ptr %svp)
declare <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64>, <2 x i64>, i64)
define void @insert_v2i64_nxv16i64(ptr %psv0, ptr %psv1, ptr %out) {
-; CHECK-LABEL: insert_v2i64_nxv16i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vle64.v v16, (a1)
-; CHECK-NEXT: vsetivli zero, 6, e64, m8, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v16, 4
-; CHECK-NEXT: vs8r.v v8, (a2)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v2i64_nxv16i64:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; VLA-NEXT: vle64.v v8, (a0)
+; VLA-NEXT: vle64.v v16, (a1)
+; VLA-NEXT: vsetivli zero, 6, e64, m8, tu, ma
+; VLA-NEXT: vslideup.vi v8, v16, 4
+; VLA-NEXT: vs8r.v v8, (a2)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v2i64_nxv16i64:
+; VLS: # %bb.0:
+; VLS-NEXT: vl1re64.v v8, (a0)
+; VLS-NEXT: vl1re64.v v16, (a1)
+; VLS-NEXT: vsetivli zero, 6, e64, m8, tu, ma
+; VLS-NEXT: vslideup.vi v8, v16, 4
+; VLS-NEXT: vs8r.v v8, (a2)
+; VLS-NEXT: ret
%sv0 = load <2 x i64>, ptr %psv0
%sv1 = load <2 x i64>, ptr %psv1
%v0 = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
@@ -426,12 +556,18 @@ define void @insert_v2i64_nxv16i64(ptr %psv0, ptr %psv1, ptr %out) {
}
define void @insert_v2i64_nxv16i64_lo0(ptr %psv, ptr %out) {
-; CHECK-LABEL: insert_v2i64_nxv16i64_lo0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vs8r.v v8, (a1)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v2i64_nxv16i64_lo0:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; VLA-NEXT: vle64.v v8, (a0)
+; VLA-NEXT: vs8r.v v8, (a1)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v2i64_nxv16i64_lo0:
+; VLS: # %bb.0:
+; VLS-NEXT: vl1re64.v v8, (a0)
+; VLS-NEXT: vs8r.v v8, (a1)
+; VLS-NEXT: ret
%sv = load <2 x i64>, ptr %psv
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 0)
store <vscale x 16 x i64> %v, ptr %out
@@ -439,14 +575,22 @@ define void @insert_v2i64_nxv16i64_lo0(ptr %psv, ptr %out) {
}
define void @insert_v2i64_nxv16i64_lo2(ptr %psv, ptr %out) {
-; CHECK-LABEL: insert_v2i64_nxv16i64_lo2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, ma
-; CHECK-NEXT: vslideup.vi v16, v8, 2
-; CHECK-NEXT: vs8r.v v16, (a1)
-; CHECK-NEXT: ret
+; VLA-LABEL: insert_v2i64_nxv16i64_lo2:
+; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; VLA-NEXT: vle64.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 4, e64, m8, ta, ma
+; VLA-NEXT: vslideup.vi v16, v8, 2
+; VLA-NEXT: vs8r.v v16, (a1)
+; VLA-NEXT: ret
+;
+; VLS-LABEL: insert_v2i64_nxv16i64_lo2:
+; VLS: # %bb.0:
+; VLS-NEXT: vl1re64.v v8, (a0)
+; VLS-NEXT: vsetivli zero, 4, e64, m8, ta, ma
+; VLS-NEXT: vslideup.vi v16, v8, 2
+; VLS-NEXT: vs8r.v v16, (a1)
+; VLS-NEXT: ret
%sv = load <2 x i64>, ptr %psv
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)
store <vscale x 16 x i64> %v, ptr %out
@@ -521,6 +665,127 @@ define void @insert_v2i64_nxv16i64_hi(ptr %psv, ptr %out) {
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+; RV32VLA-LABEL: insert_v2i64_nxv16i64_hi:
+; RV32VLA: # %bb.0:
+; RV32VLA-NEXT: addi sp, sp, -80
+; RV32VLA-NEXT: .cfi_def_cfa_offset 80
+; RV32VLA-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32VLA-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32VLA-NEXT: .cfi_offset ra, -4
+; RV32VLA-NEXT: .cfi_offset s0, -8
+; RV32VLA-NEXT: addi s0, sp, 80
+; RV32VLA-NEXT: .cfi_def_cfa s0, 0
+; RV32VLA-NEXT: csrr a2, vlenb
+; RV32VLA-NEXT: slli a2, a2, 4
+; RV32VLA-NEXT: sub sp, sp, a2
+; RV32VLA-NEXT: andi sp, sp, -64
+; RV32VLA-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32VLA-NEXT: vle64.v v8, (a0)
+; RV32VLA-NEXT: addi a0, sp, 128
+; RV32VLA-NEXT: vse64.v v8, (a0)
+; RV32VLA-NEXT: csrr a0, vlenb
+; RV32VLA-NEXT: slli a0, a0, 3
+; RV32VLA-NEXT: addi a2, sp, 64
+; RV32VLA-NEXT: add a3, a2, a0
+; RV32VLA-NEXT: vl8re64.v v8, (a3)
+; RV32VLA-NEXT: vl8re64.v v16, (a2)
+; RV32VLA-NEXT: add a0, a1, a0
+; RV32VLA-NEXT: vs8r.v v8, (a0)
+; RV32VLA-NEXT: vs8r.v v16, (a1)
+; RV32VLA-NEXT: addi sp, s0, -80
+; RV32VLA-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32VLA-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32VLA-NEXT: addi sp, sp, 80
+; RV32VLA-NEXT: ret
+;
+; RV64VLA-LABEL: insert_v2i64_nxv16i64_hi:
+; RV64VLA: # %bb.0:
+; RV64VLA-NEXT: addi sp, sp, -80
+; RV64VLA-NEXT: .cfi_def_cfa_offset 80
+; RV64VLA-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64VLA-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64VLA-NEXT: .cfi_offset ra, -8
+; RV64VLA-NEXT: .cfi_offset s0, -16
+; RV64VLA-NEXT: addi s0, sp, 80
+; RV64VLA-NEXT: .cfi_def_cfa s0, 0
+; RV64VLA-NEXT: csrr a2, vlenb
+; RV64VLA-NEXT: slli a2, a2, 4
+; RV64VLA-NEXT: sub sp, sp, a2
+; RV64VLA-NEXT: andi sp, sp, -64
+; RV64VLA-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64VLA-NEXT: vle64.v v8, (a0)
+; RV64VLA-NEXT: addi a0, sp, 128
+; RV64VLA-NEXT: vse64.v v8, (a0)
+; RV64VLA-NEXT: csrr a0, vlenb
+; RV64VLA-NEXT: slli a0, a0, 3
+; RV64VLA-NEXT: addi a2, sp, 64
+; RV64VLA-NEXT: add a3, a2, a0
+; RV64VLA-NEXT: vl8re64.v v8, (a3)
+; RV64VLA-NEXT: vl8re64.v v16, (a2)
+; RV64VLA-NEXT: add a0, a1, a0
+; RV64VLA-NEXT: vs8r.v v8, (a0)
+; RV64VLA-NEXT: vs8r.v v16, (a1)
+; RV64VLA-NEXT: addi sp, s0, -80
+; RV64VLA-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64VLA-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64VLA-NEXT: addi sp, sp, 80
+; RV64VLA-NEXT: ret
+;
+; RV32VLS-LABEL: insert_v2i64_nxv16i64_hi:
+; RV32VLS: # %bb.0:
+; RV32VLS-NEXT: addi sp, sp, -80
+; RV32VLS-NEXT: .cfi_def_cfa_offset 80
+; RV32VLS-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32VLS-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32VLS-NEXT: .cfi_offset ra, -4
+; RV32VLS-NEXT: .cfi_offset s0, -8
+; RV32VLS-NEXT: addi s0, sp, 80
+; RV32VLS-NEXT: .cfi_def_cfa s0, 0
+; RV32VLS-NEXT: addi sp, sp, -256
+; RV32VLS-NEXT: andi sp, sp, -64
+; RV32VLS-NEXT: vl1re64.v v8, (a0)
+; RV32VLS-NEXT: addi a0, sp, 128
+; RV32VLS-NEXT: vs1r.v v8, (a0)
+; RV32VLS-NEXT: addi a0, sp, 64
+; RV32VLS-NEXT: addi a2, sp, 192
+; RV32VLS-NEXT: vl8re64.v v8, (a2)
+; RV32VLS-NEXT: vl8re64.v v16, (a0)
+; RV32VLS-NEXT: addi a0, a1, 128
+; RV32VLS-NEXT: vs8r.v v8, (a0)
+; RV32VLS-NEXT: vs8r.v v16, (a1)
+; RV32VLS-NEXT: addi sp, s0, -80
+; RV32VLS-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32VLS-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32VLS-NEXT: addi sp, sp, 80
+; RV32VLS-NEXT: ret
+;
+; RV64VLS-LABEL: insert_v2i64_nxv16i64_hi:
+; RV64VLS: # %bb.0:
+; RV64VLS-NEXT: addi sp, sp, -80
+; RV64VLS-NEXT: .cfi_def_cfa_offset 80
+; RV64VLS-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64VLS-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64VLS-NEXT: .cfi_offset ra, -8
+; RV64VLS-NEXT: .cfi_offset s0, -16
+; RV64VLS-NEXT: addi s0, sp, 80
+; RV64VLS-NEXT: .cfi_def_cfa s0, 0
+; RV64VLS-NEXT: addi sp, sp, -256
+; RV64VLS-NEXT: andi sp, sp, -64
+; RV64VLS-NEXT: vl1re64.v v8, (a0)
+; RV64VLS-NEXT: addi a0, sp, 128
+; RV64VLS-NEXT: vs1r.v v8, (a0)
+; RV64VLS-NEXT: addi a0, sp, 192
+; RV64VLS-NEXT: vl8re64.v v8, (a0)
+; RV64VLS-NEXT: addi a0, sp, 64
+; RV64VLS-NEXT: vl8re64.v v16, (a0)
+; RV64VLS-NEXT: addi a0, a1, 128
+; RV64VLS-NEXT: vs8r.v v8, (a0)
+; RV64VLS-NEXT: vs8r.v v16, (a1)
+; RV64VLS-NEXT: addi sp, s0, -80
+; RV64VLS-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64VLS-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64VLS-NEXT: addi sp, sp, 80
+; RV64VLS-NEXT: ret
%sv = load <2 x i64>, ptr %psv
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 8)
store <vscale x 16 x i64> %v, ptr %out
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index e27ff0a573d5..f98cb343a2ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -181,7 +181,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 12
-; RV32-NEXT: vmv.s.x v1, a4
+; RV32-NEXT: vmv.s.x v3, a4
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v16, v16, 16
; RV32-NEXT: csrr a4, vlenb
@@ -191,12 +191,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v3
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: slli a4, a4, 2
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs1r.v v1, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vs1r.v v3, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vslideup.vi v8, v16, 10, v0.t
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 20
@@ -271,7 +271,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vslideup.vi v8, v16, 2
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v3
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a3, 24
; RV32-NEXT: mul a1, a1, a3
@@ -339,8 +339,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v1, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vl1r.v v3, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vmv1r.v v0, v3
; RV32-NEXT: vslideup.vi v12, v16, 6, v0.t
; RV32-NEXT: vmv.v.v v4, v12
; RV32-NEXT: lui a1, %hi(.LCPI6_5)
@@ -394,7 +394,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vrgatherei16.vv v28, v24, v8
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v3
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a3, 24
; RV32-NEXT: mul a1, a1, a3
@@ -435,23 +435,23 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; RV32-NEXT: lui a1, %hi(.LCPI6_10)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_10)
-; RV32-NEXT: vle16.v v2, (a1)
+; RV32-NEXT: vle16.v v4, (a1)
; RV32-NEXT: lui a1, 15
-; RV32-NEXT: vmv.s.x v5, a1
+; RV32-NEXT: vmv.s.x v6, a1
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vslideup.vi v28, v24, 6
-; RV32-NEXT: vmv1r.v v0, v5
+; RV32-NEXT: vmv1r.v v0, v6
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a3, 24
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v28, v8, v2, v0.t
+; RV32-NEXT: vrgatherei16.vv v28, v8, v4, v0.t
; RV32-NEXT: lui a1, %hi(.LCPI6_11)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_11)
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
@@ -460,12 +460,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vle16.v v0, (a1)
; RV32-NEXT: vle16.v v24, (a3)
; RV32-NEXT: li a1, 1008
-; RV32-NEXT: vmv.s.x v4, a1
+; RV32-NEXT: vmv.s.x v7, a1
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v4, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs1r.v v7, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a3, 40
; RV32-NEXT: mul a1, a1, a3
@@ -473,7 +473,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vrgatherei16.vv v8, v16, v0
-; RV32-NEXT: vmv1r.v v0, v4
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a3, 48
; RV32-NEXT: mul a1, a1, a3
@@ -487,7 +487,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_13)
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: vmv1r.v v0, v5
+; RV32-NEXT: vmv1r.v v0, v6
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
@@ -617,25 +617,25 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vrgather.vi v8, v16, 4
; RV64-NEXT: li a1, 128
-; RV64-NEXT: vmv.s.x v4, a1
+; RV64-NEXT: vmv.s.x v0, a1
; RV64-NEXT: vsetivli zero, 8, e64, m8, ta, ma
; RV64-NEXT: vslidedown.vi v24, v16, 8
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 19
-; RV64-NEXT: mul a1, a1, a2
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT: vmv1r.v v0, v4
+; RV64-NEXT: vmv1r.v v28, v0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a2, a1, 1
; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v4, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vrgather.vi v8, v24, 2, v0.t
-; RV64-NEXT: vmv.v.v v20, v8
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 19
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vmv.v.v v4, v8
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV64-NEXT: li a1, 6
; RV64-NEXT: vid.v v8
@@ -646,12 +646,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v24, v2
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v8, v16, v2
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64-NEXT: li a1, 56
; RV64-NEXT: vmv.s.x v1, a1
-; RV64-NEXT: vadd.vi v16, v2, -16
+; RV64-NEXT: vadd.vi v30, v2, -16
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV64-NEXT: vmv1r.v v0, v1
; RV64-NEXT: csrr a1, vlenb
@@ -659,16 +659,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v24, v16, v0.t
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v8, v16, v30, v0.t
; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v20, v8
+; RV64-NEXT: vmv.v.v v4, v8
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a2, a1, 4
; RV64-NEXT: sub a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vs4r.v v4, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 27
@@ -676,36 +676,29 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v8, v16, 5
-; RV64-NEXT: vmv1r.v v0, v4
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 19
-; RV64-NEXT: mul a1, a1, a2
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v8, v16, 3, v0.t
-; RV64-NEXT: vmv.v.v v4, v8
+; RV64-NEXT: vrgather.vi v4, v16, 5
+; RV64-NEXT: vmv1r.v v0, v28
+; RV64-NEXT: vrgather.vi v4, v24, 3, v0.t
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vs2r.v v2, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vadd.vi v24, v2, 1
+; RV64-NEXT: vadd.vi v16, v2, 1
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 43
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v16, v24
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v8, v24, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v24, v2, -15
+; RV64-NEXT: vadd.vi v16, v2, -15
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 11
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs2r.v v24, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vs2r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV64-NEXT: vmv1r.v v0, v1
; RV64-NEXT: csrr a1, vlenb
@@ -713,14 +706,14 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 11
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl2r.v v2, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v24, v2, v0.t
+; RV64-NEXT: vrgatherei16.vv v8, v16, v2, v0.t
; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma
; RV64-NEXT: vmv.v.v v4, v8
; RV64-NEXT: csrr a1, vlenb
@@ -732,16 +725,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vl2r.v v2, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vadd.vi v4, v2, 2
+; RV64-NEXT: vadd.vi v6, v2, 2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vrgatherei16.vv v8, v16, v4
+; RV64-NEXT: vrgatherei16.vv v8, v24, v6
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64-NEXT: li a1, 24
-; RV64-NEXT: vmv.s.x v4, a1
-; RV64-NEXT: vadd.vi v16, v2, -14
+; RV64-NEXT: vmv.s.x v7, a1
+; RV64-NEXT: vadd.vi v26, v2, -14
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT: vmv1r.v v0, v4
-; RV64-NEXT: vrgatherei16.vv v8, v24, v16, v0.t
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vrgatherei16.vv v8, v16, v26, v0.t
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vmv.v.i v12, 6
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
@@ -750,16 +743,15 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vmv4r.v v24, v16
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vrgatherei16.vv v16, v24, v12
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a2, a1, 1
; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v1, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vmv1r.v v0, v1
+; RV64-NEXT: vl1r.v v6, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vmv1r.v v0, v6
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 19
; RV64-NEXT: mul a1, a1, a2
@@ -786,16 +778,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vrgatherei16.vv v8, v16, v28
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v16, v2, -13
+; RV64-NEXT: vadd.vi v28, v2, -13
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT: vmv1r.v v0, v4
+; RV64-NEXT: vmv1r.v v0, v7
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 35
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v24, v16, v0.t
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; RV64-NEXT: lui a1, 16
; RV64-NEXT: addi a1, a1, 7
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
@@ -806,24 +798,24 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v24, v16, v12
-; RV64-NEXT: vmv1r.v v0, v1
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v16, v24, v12
+; RV64-NEXT: vmv1r.v v0, v6
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 19
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v24, v16, 5, v0.t
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vi v16, v24, 5, v0.t
; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v24, v8
+; RV64-NEXT: vmv.v.v v16, v8
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a2, a1, 1
; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v24, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: lui a1, 96
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vmv.v.x v8, a1
@@ -840,19 +832,19 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v4, v24, 2
-; RV64-NEXT: vrgatherei16.vv v4, v16, v8, v0.t
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vi v4, v16, 2
+; RV64-NEXT: vrgatherei16.vv v4, v24, v8, v0.t
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v26, v2, 4
+; RV64-NEXT: vadd.vi v16, v2, 4
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 43
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v16, v26
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v8, v24, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64-NEXT: li a1, 28
; RV64-NEXT: vmv.s.x v1, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index 5407eadb160b..648fb785cf15 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -606,7 +606,7 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v1, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
@@ -632,15 +632,15 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfabs.v v16, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v1, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v16, fa5, v0.t
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vfcvt.x.f.v v16, v24, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: fsflags a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index d12bd651a10c..855e280164a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -2002,11 +2002,11 @@ define float @vreduce_fminimum_v64f32(ptr %x) {
; CHECK-NEXT: vle32.v v16, (a0)
; CHECK-NEXT: vle32.v v24, (a1)
; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v16, v8, v16
@@ -2177,14 +2177,14 @@ define float @vreduce_fminimum_v128f32(ptr %x) {
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmerge.vvm v16, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v24, v24, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -2195,14 +2195,14 @@ define float @vreduce_fminimum_v128f32(ptr %x) {
; CHECK-NEXT: vmfeq.vv v0, v24, v24
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
@@ -2627,11 +2627,11 @@ define double @vreduce_fminimum_v32f64(ptr %x) {
; CHECK-NEXT: vle64.v v16, (a0)
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v16, v8, v16
@@ -2782,14 +2782,14 @@ define double @vreduce_fminimum_v64f64(ptr %x) {
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmerge.vvm v16, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v24, v24, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -2800,14 +2800,14 @@ define double @vreduce_fminimum_v64f64(ptr %x) {
; CHECK-NEXT: vmfeq.vv v0, v24, v24
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
@@ -3330,11 +3330,11 @@ define float @vreduce_fmaximum_v64f32(ptr %x) {
; CHECK-NEXT: vle32.v v16, (a0)
; CHECK-NEXT: vle32.v v24, (a1)
; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmax.vv v16, v8, v16
@@ -3505,14 +3505,14 @@ define float @vreduce_fmaximum_v128f32(ptr %x) {
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmerge.vvm v16, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v24, v24, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -3523,14 +3523,14 @@ define float @vreduce_fmaximum_v128f32(ptr %x) {
; CHECK-NEXT: vmfeq.vv v0, v24, v24
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
@@ -3955,11 +3955,11 @@ define double @vreduce_fmaximum_v32f64(ptr %x) {
; CHECK-NEXT: vle64.v v16, (a0)
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmax.vv v16, v8, v16
@@ -4110,14 +4110,14 @@ define double @vreduce_fmaximum_v64f64(ptr %x) {
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmerge.vvm v16, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v24, v24, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -4128,14 +4128,14 @@ define double @vreduce_fmaximum_v64f64(ptr %x) {
; CHECK-NEXT: vmfeq.vv v0, v24, v24
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
index 2ff2529e259a..6ba90b00fdba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
@@ -1163,7 +1163,7 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
; ZVFH32-NEXT: addi a0, sp, 16
; ZVFH32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFH32-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; ZVFH32-NEXT: vslidedown.vi v1, v0, 8
+; ZVFH32-NEXT: vslidedown.vi v7, v0, 8
; ZVFH32-NEXT: mv a0, a2
; ZVFH32-NEXT: bltu a2, a3, .LBB43_2
; ZVFH32-NEXT: # %bb.1:
@@ -1172,13 +1172,13 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
; ZVFH32-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH32-NEXT: addi a0, sp, 16
; ZVFH32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFH32-NEXT: vmfeq.vv v2, v8, v24, v0.t
+; ZVFH32-NEXT: vmfeq.vv v6, v8, v24, v0.t
; ZVFH32-NEXT: addi a0, a2, -64
; ZVFH32-NEXT: sltu a1, a2, a0
; ZVFH32-NEXT: addi a1, a1, -1
; ZVFH32-NEXT: and a0, a1, a0
; ZVFH32-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH32-NEXT: vmv1r.v v0, v1
+; ZVFH32-NEXT: vmv1r.v v0, v7
; ZVFH32-NEXT: csrr a0, vlenb
; ZVFH32-NEXT: slli a0, a0, 3
; ZVFH32-NEXT: add a0, sp, a0
@@ -1186,8 +1186,8 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
; ZVFH32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFH32-NEXT: vmfeq.vv v24, v16, v8, v0.t
; ZVFH32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; ZVFH32-NEXT: vslideup.vi v2, v24, 8
-; ZVFH32-NEXT: vmv.v.v v0, v2
+; ZVFH32-NEXT: vslideup.vi v6, v24, 8
+; ZVFH32-NEXT: vmv.v.v v0, v6
; ZVFH32-NEXT: csrr a0, vlenb
; ZVFH32-NEXT: slli a0, a0, 4
; ZVFH32-NEXT: add sp, sp, a0
@@ -1216,7 +1216,7 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
; ZVFH64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFH64-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; ZVFH64-NEXT: mv a0, a2
-; ZVFH64-NEXT: vslidedown.vi v1, v0, 8
+; ZVFH64-NEXT: vslidedown.vi v7, v0, 8
; ZVFH64-NEXT: bltu a2, a3, .LBB43_2
; ZVFH64-NEXT: # %bb.1:
; ZVFH64-NEXT: li a0, 64
@@ -1224,13 +1224,13 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
; ZVFH64-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH64-NEXT: addi a0, sp, 16
; ZVFH64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFH64-NEXT: vmfeq.vv v2, v8, v24, v0.t
+; ZVFH64-NEXT: vmfeq.vv v6, v8, v24, v0.t
; ZVFH64-NEXT: addi a0, a2, -64
; ZVFH64-NEXT: sltu a1, a2, a0
; ZVFH64-NEXT: addi a1, a1, -1
; ZVFH64-NEXT: and a0, a1, a0
; ZVFH64-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH64-NEXT: vmv1r.v v0, v1
+; ZVFH64-NEXT: vmv1r.v v0, v7
; ZVFH64-NEXT: csrr a0, vlenb
; ZVFH64-NEXT: slli a0, a0, 3
; ZVFH64-NEXT: add a0, sp, a0
@@ -1238,8 +1238,8 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
; ZVFH64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFH64-NEXT: vmfeq.vv v24, v16, v8, v0.t
; ZVFH64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; ZVFH64-NEXT: vslideup.vi v2, v24, 8
-; ZVFH64-NEXT: vmv.v.v v0, v2
+; ZVFH64-NEXT: vslideup.vi v6, v24, 8
+; ZVFH64-NEXT: vmv.v.v v0, v6
; ZVFH64-NEXT: csrr a0, vlenb
; ZVFH64-NEXT: slli a0, a0, 4
; ZVFH64-NEXT: add sp, sp, a0
@@ -2918,7 +2918,7 @@ define <32 x i1> @fcmp_oeq_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v1, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: addi a0, sp, 16
@@ -2932,13 +2932,13 @@ define <32 x i1> @fcmp_oeq_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v2, v8, v24, v0.t
+; CHECK-NEXT: vmfeq.vv v6, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -2946,8 +2946,8 @@ define <32 x i1> @fcmp_oeq_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmfeq.vv v24, v16, v8, v0.t
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; CHECK-NEXT: vslideup.vi v2, v24, 2
-; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: vslideup.vi v6, v24, 2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index e558d45a3b2d..8cf069e66e8f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -634,7 +634,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
@@ -653,13 +653,13 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: and a2, a4, a2
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; CHECK-NEXT: vmseq.vv v2, v16, v8, v0.t
+; CHECK-NEXT: vmseq.vv v6, v16, v8, v0.t
; CHECK-NEXT: bltu a3, a1, .LBB51_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: .LBB51_2:
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -669,7 +669,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vmv1r.v v8, v2
+; CHECK-NEXT: vmv1r.v v8, v6
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
@@ -1336,7 +1336,7 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 4
+; RV32-NEXT: vslidedown.vi v7, v0, 4
; RV32-NEXT: mv a0, a2
; RV32-NEXT: bltu a2, a3, .LBB99_2
; RV32-NEXT: # %bb.1:
@@ -1345,13 +1345,13 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmseq.vv v2, v8, v24, v0.t
+; RV32-NEXT: vmseq.vv v6, v8, v24, v0.t
; RV32-NEXT: addi a0, a2, -32
; RV32-NEXT: sltu a1, a2, a0
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a0, a1, a0
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
@@ -1359,8 +1359,8 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmseq.vv v24, v16, v8, v0.t
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: vslideup.vi v2, v24, 4
-; RV32-NEXT: vmv1r.v v0, v2
+; RV32-NEXT: vslideup.vi v6, v24, 4
+; RV32-NEXT: vmv1r.v v0, v6
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
@@ -1389,7 +1389,7 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64-NEXT: mv a0, a2
-; RV64-NEXT: vslidedown.vi v1, v0, 4
+; RV64-NEXT: vslidedown.vi v7, v0, 4
; RV64-NEXT: bltu a2, a3, .LBB99_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a0, 32
@@ -1397,13 +1397,13 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vmseq.vv v2, v8, v24, v0.t
+; RV64-NEXT: vmseq.vv v6, v8, v24, v0.t
; RV64-NEXT: addi a0, a2, -32
; RV64-NEXT: sltu a1, a2, a0
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a0, a1, a0
; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; RV64-NEXT: vmv1r.v v0, v1
+; RV64-NEXT: vmv1r.v v0, v7
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
@@ -1411,8 +1411,8 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vmseq.vv v24, v16, v8, v0.t
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: vslideup.vi v2, v24, 4
-; RV64-NEXT: vmv1r.v v0, v2
+; RV64-NEXT: vslideup.vi v6, v24, 4
+; RV64-NEXT: vmv1r.v v0, v6
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
index 6ef5aa846d6d..6a7ec6dc5bd7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
@@ -1,6 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc < %s -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLA %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLA %s
+
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS %s
define <8 x i32> @concat_2xv4i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: concat_2xv4i32:
@@ -100,12 +103,12 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
; CHECK-NEXT: vmv1r.v v20, v14
; CHECK-NEXT: vmv1r.v v24, v13
; CHECK-NEXT: vmv1r.v v28, v11
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmv1r.v v4, v9
+; CHECK-NEXT: vmv1r.v v4, v10
+; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v4, 2
+; CHECK-NEXT: vslideup.vi v8, v0, 2
; CHECK-NEXT: vsetivli zero, 6, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v0, 4
+; CHECK-NEXT: vslideup.vi v8, v4, 4
; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, ma
; CHECK-NEXT: vslideup.vi v8, v28, 6
; CHECK-NEXT: vsetivli zero, 10, e32, m4, tu, ma
@@ -128,31 +131,51 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
}
define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) {
-; CHECK-LABEL: concat_2xv16i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v16, 16
-; CHECK-NEXT: ret
+; VLA-LABEL: concat_2xv16i32:
+; VLA: # %bb.0:
+; VLA-NEXT: vmv4r.v v16, v12
+; VLA-NEXT: li a0, 32
+; VLA-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; VLA-NEXT: vslideup.vi v8, v16, 16
+; VLA-NEXT: ret
+;
+; VLS-LABEL: concat_2xv16i32:
+; VLS: # %bb.0:
+; VLS-NEXT: vmv4r.v v16, v12
+; VLS-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; VLS-NEXT: vslideup.vi v8, v16, 16
+; VLS-NEXT: ret
%ab = shufflevector <16 x i32> %a, <16 x i32> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
ret <32 x i32> %ab
}
define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
-; CHECK-LABEL: concat_4xv8i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vmv2r.v v16, v14
-; CHECK-NEXT: vmv2r.v v24, v12
-; CHECK-NEXT: vmv2r.v v0, v10
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v0, 8
-; CHECK-NEXT: vsetivli zero, 24, e32, m8, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v24, 16
-; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v16, 24
-; CHECK-NEXT: ret
+; VLA-LABEL: concat_4xv8i32:
+; VLA: # %bb.0:
+; VLA-NEXT: vmv2r.v v16, v14
+; VLA-NEXT: vmv2r.v v24, v12
+; VLA-NEXT: vmv2r.v v0, v10
+; VLA-NEXT: vsetivli zero, 16, e32, m8, tu, ma
+; VLA-NEXT: vslideup.vi v8, v0, 8
+; VLA-NEXT: vsetivli zero, 24, e32, m8, tu, ma
+; VLA-NEXT: vslideup.vi v8, v24, 16
+; VLA-NEXT: li a0, 32
+; VLA-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; VLA-NEXT: vslideup.vi v8, v16, 24
+; VLA-NEXT: ret
+;
+; VLS-LABEL: concat_4xv8i32:
+; VLS: # %bb.0:
+; VLS-NEXT: vmv2r.v v16, v14
+; VLS-NEXT: vmv2r.v v24, v12
+; VLS-NEXT: vmv2r.v v0, v10
+; VLS-NEXT: vsetivli zero, 16, e32, m8, tu, ma
+; VLS-NEXT: vslideup.vi v8, v0, 8
+; VLS-NEXT: vsetivli zero, 24, e32, m8, tu, ma
+; VLS-NEXT: vslideup.vi v8, v24, 16
+; VLS-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; VLS-NEXT: vslideup.vi v8, v16, 24
+; VLS-NEXT: ret
%ab = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%cd = shufflevector <8 x i32> %c, <8 x i32> %d, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%abcd = shufflevector <16 x i32> %ab, <16 x i32> %cd, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -160,82 +183,128 @@ define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x
}
define <32 x i32> @concat_8xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %g, <4 x i32> %h) {
-; CHECK-LABEL: concat_8xv4i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 5
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vmv1r.v v16, v15
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 0
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a1, a1, a0
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v16, v14
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v16, v13
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: vmv1r.v v24, v10
-; CHECK-NEXT: vmv1r.v v16, v9
-; CHECK-NEXT: vsetivli zero, 8, e32, m8, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v16, 4
-; CHECK-NEXT: vsetivli zero, 12, e32, m8, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v24, 8
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v0, 12
-; CHECK-NEXT: vsetivli zero, 20, e32, m8, tu, ma
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vslideup.vi v8, v16, 16
-; CHECK-NEXT: vsetivli zero, 24, e32, m8, tu, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vslideup.vi v8, v16, 20
-; CHECK-NEXT: vsetivli zero, 28, e32, m8, tu, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vslideup.vi v8, v16, 24
-; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 0
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a1, a1, a0
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vslideup.vi v8, v16, 28
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 5
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+; VLA-LABEL: concat_8xv4i32:
+; VLA: # %bb.0:
+; VLA-NEXT: addi sp, sp, -16
+; VLA-NEXT: .cfi_def_cfa_offset 16
+; VLA-NEXT: csrr a0, vlenb
+; VLA-NEXT: slli a0, a0, 5
+; VLA-NEXT: sub sp, sp, a0
+; VLA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; VLA-NEXT: vmv1r.v v16, v15
+; VLA-NEXT: csrr a0, vlenb
+; VLA-NEXT: li a1, 0
+; VLA-NEXT: slli a0, a0, 3
+; VLA-NEXT: add a1, a1, a0
+; VLA-NEXT: slli a0, a0, 1
+; VLA-NEXT: add a0, a0, a1
+; VLA-NEXT: add a0, sp, a0
+; VLA-NEXT: addi a0, a0, 16
+; VLA-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; VLA-NEXT: vmv1r.v v16, v14
+; VLA-NEXT: csrr a0, vlenb
+; VLA-NEXT: slli a0, a0, 4
+; VLA-NEXT: add a0, sp, a0
+; VLA-NEXT: addi a0, a0, 16
+; VLA-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; VLA-NEXT: vmv1r.v v16, v13
+; VLA-NEXT: csrr a0, vlenb
+; VLA-NEXT: slli a0, a0, 3
+; VLA-NEXT: add a0, sp, a0
+; VLA-NEXT: addi a0, a0, 16
+; VLA-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; VLA-NEXT: vmv1r.v v16, v12
+; VLA-NEXT: addi a0, sp, 16
+; VLA-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; VLA-NEXT: vmv1r.v v0, v11
+; VLA-NEXT: vmv1r.v v24, v10
+; VLA-NEXT: vmv1r.v v16, v9
+; VLA-NEXT: vsetivli zero, 8, e32, m8, tu, ma
+; VLA-NEXT: vslideup.vi v8, v16, 4
+; VLA-NEXT: vsetivli zero, 12, e32, m8, tu, ma
+; VLA-NEXT: vslideup.vi v8, v24, 8
+; VLA-NEXT: vsetivli zero, 16, e32, m8, tu, ma
+; VLA-NEXT: vslideup.vi v8, v0, 12
+; VLA-NEXT: vsetivli zero, 20, e32, m8, tu, ma
+; VLA-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; VLA-NEXT: vslideup.vi v8, v16, 16
+; VLA-NEXT: vsetivli zero, 24, e32, m8, tu, ma
+; VLA-NEXT: csrr a0, vlenb
+; VLA-NEXT: slli a0, a0, 3
+; VLA-NEXT: add a0, sp, a0
+; VLA-NEXT: addi a0, a0, 16
+; VLA-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; VLA-NEXT: vslideup.vi v8, v16, 20
+; VLA-NEXT: vsetivli zero, 28, e32, m8, tu, ma
+; VLA-NEXT: csrr a0, vlenb
+; VLA-NEXT: slli a0, a0, 4
+; VLA-NEXT: add a0, sp, a0
+; VLA-NEXT: addi a0, a0, 16
+; VLA-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; VLA-NEXT: vslideup.vi v8, v16, 24
+; VLA-NEXT: li a0, 32
+; VLA-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; VLA-NEXT: csrr a0, vlenb
+; VLA-NEXT: li a1, 0
+; VLA-NEXT: slli a0, a0, 3
+; VLA-NEXT: add a1, a1, a0
+; VLA-NEXT: slli a0, a0, 1
+; VLA-NEXT: add a0, a0, a1
+; VLA-NEXT: add a0, sp, a0
+; VLA-NEXT: addi a0, a0, 16
+; VLA-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; VLA-NEXT: vslideup.vi v8, v16, 28
+; VLA-NEXT: csrr a0, vlenb
+; VLA-NEXT: slli a0, a0, 5
+; VLA-NEXT: add sp, sp, a0
+; VLA-NEXT: addi sp, sp, 16
+; VLA-NEXT: ret
+;
+; VLS-LABEL: concat_8xv4i32:
+; VLS: # %bb.0:
+; VLS-NEXT: addi sp, sp, -16
+; VLS-NEXT: .cfi_def_cfa_offset 16
+; VLS-NEXT: addi sp, sp, -512
+; VLS-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; VLS-NEXT: vmv1r.v v16, v15
+; VLS-NEXT: addi a0, sp, 400
+; VLS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; VLS-NEXT: vmv1r.v v16, v14
+; VLS-NEXT: addi a0, sp, 272
+; VLS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; VLS-NEXT: vmv1r.v v16, v13
+; VLS-NEXT: addi a0, sp, 144
+; VLS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; VLS-NEXT: vmv1r.v v16, v12
+; VLS-NEXT: addi a0, sp, 16
+; VLS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; VLS-NEXT: vmv1r.v v0, v11
+; VLS-NEXT: vmv1r.v v24, v10
+; VLS-NEXT: vmv1r.v v16, v9
+; VLS-NEXT: vsetivli zero, 8, e32, m8, tu, ma
+; VLS-NEXT: vslideup.vi v8, v16, 4
+; VLS-NEXT: vsetivli zero, 12, e32, m8, tu, ma
+; VLS-NEXT: vslideup.vi v8, v24, 8
+; VLS-NEXT: vsetivli zero, 16, e32, m8, tu, ma
+; VLS-NEXT: vslideup.vi v8, v0, 12
+; VLS-NEXT: vsetivli zero, 20, e32, m8, tu, ma
+; VLS-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; VLS-NEXT: vslideup.vi v8, v16, 16
+; VLS-NEXT: vsetivli zero, 24, e32, m8, tu, ma
+; VLS-NEXT: addi a0, sp, 144
+; VLS-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; VLS-NEXT: vslideup.vi v8, v16, 20
+; VLS-NEXT: vsetivli zero, 28, e32, m8, tu, ma
+; VLS-NEXT: addi a0, sp, 272
+; VLS-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; VLS-NEXT: vslideup.vi v8, v16, 24
+; VLS-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; VLS-NEXT: addi a0, sp, 400
+; VLS-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; VLS-NEXT: vslideup.vi v8, v16, 28
+; VLS-NEXT: addi sp, sp, 512
+; VLS-NEXT: addi sp, sp, 16
+; VLS-NEXT: ret
%ab = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%cd = shufflevector <4 x i32> %c, <4 x i32> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%abcd = shufflevector <8 x i32> %ab, <8 x i32> %cd, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
index 34b0789d801a..e7b747372391 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
@@ -230,7 +230,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: slli a2, a2, 6
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 64 * vlenb
-; CHECK-NEXT: vmv1r.v v4, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 5
; CHECK-NEXT: add a2, sp, a2
@@ -243,7 +243,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v1, v0, 8
+; CHECK-NEXT: vslidedown.vi v5, v0, 8
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v26, v0, 4
; CHECK-NEXT: addi a2, a1, 512
@@ -256,7 +256,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v27, v1, 4
+; CHECK-NEXT: vslidedown.vi v27, v5, 4
; CHECK-NEXT: addi a2, a1, 640
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a2)
@@ -346,7 +346,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: li a6, 16
; CHECK-NEXT: .LBB16_6:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v20, v1, 2
+; CHECK-NEXT: vslidedown.vi v20, v5, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a5)
; CHECK-NEXT: addi a1, a1, 256
@@ -382,9 +382,9 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: .LBB16_10:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v5, v4, 2
+; CHECK-NEXT: vslidedown.vi v6, v7, 2
; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v5
; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: bltu a7, a3, .LBB16_12
@@ -444,7 +444,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a1, a4, a1
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v5
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: add a1, sp, a1
@@ -456,7 +456,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: li a7, 16
; CHECK-NEXT: .LBB16_14:
; CHECK-NEXT: vsetvli zero, a7, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v4
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a2, 40
; CHECK-NEXT: mul a1, a1, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 6e8360869ddc..e15253b67275 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -1529,7 +1529,7 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-LABEL: vadd_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
@@ -1546,7 +1546,7 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1650,14 +1650,14 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-LABEL: vadd_vx_v32i64_evl27:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
index f9b67b83f872..f83968d54b2c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
@@ -333,7 +333,7 @@ define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v1, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
@@ -349,7 +349,7 @@ define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v16, v16, v24, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
index 65776339de07..d7b89ee054af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
@@ -913,7 +913,7 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v1, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a2)
; CHECK-NEXT: addi a1, sp, 16
@@ -940,7 +940,7 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
index ca033c26dba1..86218ddb04bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
@@ -425,7 +425,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v1, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
@@ -441,7 +441,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmax.vv v16, v16, v24, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
index 1f1efdaf1ee5..8b8049ea6c62 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
@@ -425,7 +425,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v1, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
@@ -441,7 +441,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v16, v16, v24, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
index 28ab179048ca..4af566cb5f55 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
@@ -677,7 +677,7 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v1, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a2)
; CHECK-NEXT: addi a1, sp, 16
@@ -704,7 +704,7 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index e98a988c8767..6af5ba185b8b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -1092,7 +1092,7 @@ define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-LABEL: vmax_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
@@ -1109,7 +1109,7 @@ define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vmax.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index cc33b40c43a8..12c6410068c6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -1091,7 +1091,7 @@ define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-LABEL: vmaxu_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
@@ -1108,7 +1108,7 @@ define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vmaxu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index 512b7941481f..f5b9421d28c3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -1092,7 +1092,7 @@ define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-LABEL: vmin_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
@@ -1109,7 +1109,7 @@ define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vmin.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index 993b0364b07f..d07580efceb5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -1091,7 +1091,7 @@ define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-LABEL: vminu_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
@@ -1108,7 +1108,7 @@ define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vminu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index beff4157b14b..4d2f55b172e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -2460,7 +2460,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_v32f64:
; RV32: # %bb.0:
-; RV32-NEXT: vmv1r.v v1, v0
+; RV32-NEXT: vmv1r.v v7, v0
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wi v16, v8, 0
@@ -2484,7 +2484,7 @@ define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB96_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
index 6c5dd0403dff..d7ed20f4e098 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -1542,7 +1542,7 @@ define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-LABEL: vsadd_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
@@ -1559,7 +1559,7 @@ define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1642,14 +1642,14 @@ define <32 x i64> @vsadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-LABEL: vsadd_vx_v32i64_evl12:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v8, v8, v24, v0.t
; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1673,14 +1673,14 @@ define <32 x i64> @vsadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-LABEL: vsadd_vx_v32i64_evl27:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v8, v8, v24, v0.t
; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
index 6227f8abe599..ea248010ef09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -1538,7 +1538,7 @@ define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV32-LABEL: vsaddu_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
@@ -1555,7 +1555,7 @@ define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1638,14 +1638,14 @@ define <32 x i64> @vsaddu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-LABEL: vsaddu_vx_v32i64_evl12:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v8, v8, v24, v0.t
; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1669,14 +1669,14 @@ define <32 x i64> @vsaddu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-LABEL: vsaddu_vx_v32i64_evl27:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v8, v8, v24, v0.t
; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
index 6360cf49d8d4..32b8d10d8717 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -1582,7 +1582,7 @@ define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-LABEL: vssub_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
@@ -1599,7 +1599,7 @@ define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1684,14 +1684,14 @@ define <32 x i64> @vssub_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-LABEL: vssub_vx_v32i64_evl12:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v8, v8, v24, v0.t
; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1716,14 +1716,14 @@ define <32 x i64> @vssub_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-LABEL: vssub_vx_v32i64_evl27:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v8, v8, v24, v0.t
; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
index 6ea975887123..60c16ef543a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -1577,7 +1577,7 @@ define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV32-LABEL: vssubu_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
@@ -1594,7 +1594,7 @@ define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1679,14 +1679,14 @@ define <32 x i64> @vssubu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-LABEL: vssubu_vx_v32i64_evl12:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v8, v8, v24, v0.t
; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1711,14 +1711,14 @@ define <32 x i64> @vssubu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-LABEL: vssubu_vx_v32i64_evl27:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v8, v8, v24, v0.t
; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
-; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
index c954c9a6d0d1..386f23f68c35 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
@@ -175,11 +175,11 @@ define <vscale x 16 x half> @vfmax_nxv16f16_vv(<vscale x 16 x half> %a, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v24, v24
-; ZVFHMIN-NEXT: vmfeq.vv v1, v16, v16
+; ZVFHMIN-NEXT: vmfeq.vv v7, v16, v16
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmax.vv v16, v8, v16
@@ -201,9 +201,9 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8
-; ZVFH-NEXT: vmfeq.vv v1, v16, v16
+; ZVFH-NEXT: vmfeq.vv v7, v16, v16
; ZVFH-NEXT: vmerge.vvm v24, v8, v16, v0
-; ZVFH-NEXT: vmv1r.v v0, v1
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vmerge.vvm v8, v16, v8, v0
; ZVFH-NEXT: vfmax.vv v8, v8, v24
; ZVFH-NEXT: ret
@@ -225,9 +225,9 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
-; ZVFHMIN-NEXT: vmfeq.vv v1, v24, v24
+; ZVFHMIN-NEXT: vmfeq.vv v3, v24, v24
; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v24, v0
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v8, v0
; ZVFHMIN-NEXT: vfmax.vv v24, v8, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
@@ -243,14 +243,14 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
-; ZVFHMIN-NEXT: vmfeq.vv v1, v8, v8
+; ZVFHMIN-NEXT: vmfeq.vv v7, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v8, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v16, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
@@ -346,9 +346,9 @@ define <vscale x 16 x float> @vfmax_nxv16f32_vv(<vscale x 16 x float> %a, <vscal
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v24
; CHECK-NEXT: ret
@@ -414,9 +414,9 @@ define <vscale x 8 x double> @vfmax_nxv8f64_vv(<vscale x 8 x double> %a, <vscale
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index f4350a1e1ced..7774e3e4775a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -336,7 +336,7 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -348,11 +348,11 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmax.vv v16, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
@@ -393,11 +393,11 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v1, v24, v24
+; ZVFHMIN-NEXT: vmfeq.vv v7, v24, v24
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmax.vv v16, v8, v16
@@ -425,18 +425,18 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: slli a1, a1, 3
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFH-NEXT: vmv1r.v v1, v0
+; ZVFH-NEXT: vmv1r.v v7, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v25, v8, v8, v0.t
; ZVFH-NEXT: vmv1r.v v0, v25
; ZVFH-NEXT: vmerge.vvm v24, v8, v16, v0
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFH-NEXT: vmv1r.v v0, v1
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vmfeq.vv v25, v16, v16, v0.t
; ZVFH-NEXT: vmv1r.v v0, v25
; ZVFH-NEXT: vmerge.vvm v8, v16, v8, v0
-; ZVFH-NEXT: vmv1r.v v0, v1
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFH-NEXT: vfmax.vv v8, v8, v16, v0.t
; ZVFH-NEXT: csrr a0, vlenb
@@ -611,9 +611,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8
-; ZVFH-NEXT: vmfeq.vv v1, v16, v16
+; ZVFH-NEXT: vmfeq.vv v7, v16, v16
; ZVFH-NEXT: vmerge.vvm v24, v8, v16, v0
-; ZVFH-NEXT: vmv1r.v v0, v1
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vmerge.vvm v8, v16, v8, v0
; ZVFH-NEXT: vfmax.vv v8, v8, v24
; ZVFH-NEXT: ret
@@ -636,7 +636,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v1, v24, a2
+; ZVFHMIN-NEXT: vslidedown.vx v7, v24, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
@@ -647,7 +647,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv4r.v v8, v16
@@ -668,11 +668,11 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmerge.vvm v16, v24, v16, v0
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -702,9 +702,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v1, v16, v16
+; ZVFHMIN-NEXT: vmfeq.vv v3, v16, v16
; ZVFHMIN-NEXT: vmerge.vvm v24, v8, v16, v0
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v8, v0
; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
@@ -992,18 +992,18 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
@@ -1020,9 +1020,9 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v24
; CHECK-NEXT: ret
@@ -1073,7 +1073,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v1, v24
+; CHECK-NEXT: vmv1r.v v7, v24
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmfeq.vv v26, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
@@ -1090,7 +1090,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v17, v24, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a3, 24
@@ -1102,7 +1102,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -1123,8 +1123,8 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl1r.v v1, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
@@ -1145,11 +1145,11 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -1202,9 +1202,9 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: and a0, a3, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
; CHECK-NEXT: vfmax.vv v8, v16, v8
; CHECK-NEXT: addi a0, sp, 16
@@ -1225,9 +1225,9 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v1, v8, v8
+; CHECK-NEXT: vmfeq.vv v7, v8, v8
; CHECK-NEXT: vmerge.vvm v24, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
; CHECK-NEXT: vfmax.vv v8, v8, v24
; CHECK-NEXT: addi a0, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
index 567068fdfb1c..48baa12aa2e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
@@ -175,11 +175,11 @@ define <vscale x 16 x half> @vfmin_nxv16f16_vv(<vscale x 16 x half> %a, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v24, v24
-; ZVFHMIN-NEXT: vmfeq.vv v1, v16, v16
+; ZVFHMIN-NEXT: vmfeq.vv v7, v16, v16
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmin.vv v16, v8, v16
@@ -201,9 +201,9 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8
-; ZVFH-NEXT: vmfeq.vv v1, v16, v16
+; ZVFH-NEXT: vmfeq.vv v7, v16, v16
; ZVFH-NEXT: vmerge.vvm v24, v8, v16, v0
-; ZVFH-NEXT: vmv1r.v v0, v1
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vmerge.vvm v8, v16, v8, v0
; ZVFH-NEXT: vfmin.vv v8, v8, v24
; ZVFH-NEXT: ret
@@ -225,9 +225,9 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
-; ZVFHMIN-NEXT: vmfeq.vv v1, v24, v24
+; ZVFHMIN-NEXT: vmfeq.vv v3, v24, v24
; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v24, v0
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v8, v0
; ZVFHMIN-NEXT: vfmin.vv v24, v8, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
@@ -243,14 +243,14 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
-; ZVFHMIN-NEXT: vmfeq.vv v1, v8, v8
+; ZVFHMIN-NEXT: vmfeq.vv v7, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v8, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v16, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
@@ -346,9 +346,9 @@ define <vscale x 16 x float> @vfmin_nxv16f32_vv(<vscale x 16 x float> %a, <vscal
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v24
; CHECK-NEXT: ret
@@ -414,9 +414,9 @@ define <vscale x 8 x double> @vfmin_nxv8f64_vv(<vscale x 8 x double> %a, <vscale
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 1f8af732e3f9..4e98d0581f89 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -336,7 +336,7 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -348,11 +348,11 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmin.vv v16, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
@@ -393,11 +393,11 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v1, v24, v24
+; ZVFHMIN-NEXT: vmfeq.vv v7, v24, v24
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmin.vv v16, v8, v16
@@ -425,18 +425,18 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: slli a1, a1, 3
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFH-NEXT: vmv1r.v v1, v0
+; ZVFH-NEXT: vmv1r.v v7, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v25, v8, v8, v0.t
; ZVFH-NEXT: vmv1r.v v0, v25
; ZVFH-NEXT: vmerge.vvm v24, v8, v16, v0
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFH-NEXT: vmv1r.v v0, v1
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vmfeq.vv v25, v16, v16, v0.t
; ZVFH-NEXT: vmv1r.v v0, v25
; ZVFH-NEXT: vmerge.vvm v8, v16, v8, v0
-; ZVFH-NEXT: vmv1r.v v0, v1
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFH-NEXT: vfmin.vv v8, v8, v16, v0.t
; ZVFH-NEXT: csrr a0, vlenb
@@ -611,9 +611,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8
-; ZVFH-NEXT: vmfeq.vv v1, v16, v16
+; ZVFH-NEXT: vmfeq.vv v7, v16, v16
; ZVFH-NEXT: vmerge.vvm v24, v8, v16, v0
-; ZVFH-NEXT: vmv1r.v v0, v1
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vmerge.vvm v8, v16, v8, v0
; ZVFH-NEXT: vfmin.vv v8, v8, v24
; ZVFH-NEXT: ret
@@ -636,7 +636,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v1, v24, a2
+; ZVFHMIN-NEXT: vslidedown.vx v7, v24, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
@@ -647,7 +647,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv4r.v v8, v16
@@ -668,11 +668,11 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmerge.vvm v16, v24, v16, v0
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -702,9 +702,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v1, v16, v16
+; ZVFHMIN-NEXT: vmfeq.vv v3, v16, v16
; ZVFHMIN-NEXT: vmerge.vvm v24, v8, v16, v0
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v8, v0
; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
@@ -992,18 +992,18 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
@@ -1020,9 +1020,9 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8
-; CHECK-NEXT: vmfeq.vv v1, v16, v16
+; CHECK-NEXT: vmfeq.vv v7, v16, v16
; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v24
; CHECK-NEXT: ret
@@ -1073,7 +1073,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v1, v24
+; CHECK-NEXT: vmv1r.v v7, v24
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmfeq.vv v26, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
@@ -1090,7 +1090,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v17, v24, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a3, 24
@@ -1102,7 +1102,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -1123,8 +1123,8 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl1r.v v1, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
@@ -1145,11 +1145,11 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -1202,9 +1202,9 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: and a0, a3, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v1, v24, v24
+; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
; CHECK-NEXT: vfmin.vv v8, v16, v8
; CHECK-NEXT: addi a0, sp, 16
@@ -1225,9 +1225,9 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v1, v8, v8
+; CHECK-NEXT: vmfeq.vv v7, v8, v8
; CHECK-NEXT: vmerge.vvm v24, v16, v8, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
; CHECK-NEXT: vfmin.vv v8, v8, v24
; CHECK-NEXT: addi a0, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
index 0f3f57a0dec5..b15896580d42 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
@@ -76,7 +76,7 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_3(<vscale x 4 x i8> %vec, <vscale
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a1, a1, a0
; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
@@ -227,7 +227,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_1(<vscale x 16 x i32> %vec,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 1)
@@ -306,7 +306,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_7(<vscale x 16 x i8> %vec, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: sub a1, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 7)
@@ -319,7 +319,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_15(<vscale x 16 x i8> %vec, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: sub a1, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vslideup.vx v9, v10, a1
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 15)
@@ -344,7 +344,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_2(<vscale x 32 x half> %vec
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 2)
@@ -357,7 +357,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_26(<vscale x 32 x half> %ve
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v14, v16, a0
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 26)
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 139e1ea262b7..dc67c64f3ffd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -1755,9 +1755,9 @@ define void @mscatter_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double
define void @mscatter_baseidx_nxv16i8_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double> %val1, ptr %base, <vscale x 16 x i8> %idxs, <vscale x 16 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv16i8_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vl2r.v v2, (a1)
+; RV32-NEXT: vl2r.v v6, (a1)
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf4 v24, v2
+; RV32-NEXT: vsext.vf4 v24, v6
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
@@ -1771,12 +1771,12 @@ define void @mscatter_baseidx_nxv16i8_nxv16f64(<vscale x 8 x double> %val0, <vsc
;
; RV64-LABEL: mscatter_baseidx_nxv16i8_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vl2r.v v2, (a1)
+; RV64-NEXT: vl2r.v v6, (a1)
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf8 v24, v2
+; RV64-NEXT: vsext.vf8 v24, v6
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
-; RV64-NEXT: vsext.vf8 v8, v3
+; RV64-NEXT: vsext.vf8 v8, v7
; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 7c354c3714c6..126836cd9390 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -468,7 +468,7 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -507,13 +507,13 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v1, v16, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v7, v16, fa5, v0.t
; ZVFHMIN-NEXT: frflags a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: fsflags a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 243dc19a2558..897bfdea69f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -2219,7 +2219,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; ZVFH-NEXT: vslidedown.vx v0, v24, a1
; ZVFH-NEXT: vsetvli zero, a4, e16, m8, ta, ma
-; ZVFH-NEXT: vmfeq.vv v1, v16, v8, v0.t
+; ZVFH-NEXT: vmfeq.vv v7, v16, v8, v0.t
; ZVFH-NEXT: bltu a2, a3, .LBB85_2
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a2, a3
@@ -2235,9 +2235,9 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFH-NEXT: vmfeq.vv v16, v8, v24, v0.t
; ZVFH-NEXT: add a0, a1, a1
-; ZVFH-NEXT: vsetvli zero, a0, e8, m1, tu, ma
-; ZVFH-NEXT: vslideup.vx v16, v1, a1
-; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; ZVFH-NEXT: vslideup.vx v16, v7, a1
+; ZVFH-NEXT: vmv.v.v v0, v16
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 4
; ZVFH-NEXT: add sp, sp, a0
@@ -2280,7 +2280,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: add t0, sp, t0
; ZVFHMIN-NEXT: addi t0, t0, 16
; ZVFHMIN-NEXT: vs1r.v v0, (t0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1
+; ZVFHMIN-NEXT: vslidedown.vx v7, v0, a1
; ZVFHMIN-NEXT: srli a3, a3, 2
; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
; ZVFHMIN-NEXT: csrr a0, vlenb
@@ -2291,8 +2291,8 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs1r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a3
+; ZVFHMIN-NEXT: vs1r.v v7, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a3
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 1
@@ -2331,14 +2331,14 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: vsetvli zero, a6, e32, m8, ta, ma
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vmfeq.vv v2, v16, v8, v0.t
+; ZVFHMIN-NEXT: vmfeq.vv v6, v16, v8, v0.t
; ZVFHMIN-NEXT: add a0, a3, a3
; ZVFHMIN-NEXT: bltu a2, a5, .LBB85_4
; ZVFHMIN-NEXT: # %bb.3:
; ZVFHMIN-NEXT: mv a2, a5
; ZVFHMIN-NEXT: .LBB85_4:
-; ZVFHMIN-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
-; ZVFHMIN-NEXT: vslideup.vx v2, v26, a3
+; ZVFHMIN-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslideup.vx v6, v26, a3
; ZVFHMIN-NEXT: sub a5, a2, a4
; ZVFHMIN-NEXT: sltu a6, a2, a5
; ZVFHMIN-NEXT: addi a6, a6, -1
@@ -2348,7 +2348,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: add a6, sp, a6
; ZVFHMIN-NEXT: addi a6, a6, 16
; ZVFHMIN-NEXT: vl1r.v v8, (a6) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vmv1r.v v1, v8
+; ZVFHMIN-NEXT: vmv1r.v v7, v8
; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3
; ZVFHMIN-NEXT: vsetvli a6, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a6, vlenb
@@ -2378,7 +2378,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: add a5, sp, a5
; ZVFHMIN-NEXT: addi a5, a5, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vmfeq.vv v3, v16, v8, v0.t
+; ZVFHMIN-NEXT: vmfeq.vv v5, v16, v8, v0.t
; ZVFHMIN-NEXT: bltu a2, a4, .LBB85_6
; ZVFHMIN-NEXT: # %bb.5:
; ZVFHMIN-NEXT: mv a2, a4
@@ -2393,14 +2393,14 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
-; ZVFHMIN-NEXT: vslideup.vx v8, v3, a3
+; ZVFHMIN-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslideup.vx v8, v5, a3
; ZVFHMIN-NEXT: add a0, a1, a1
-; ZVFHMIN-NEXT: vsetvli zero, a0, e8, m1, tu, ma
-; ZVFHMIN-NEXT: vslideup.vx v8, v2, a1
-; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; ZVFHMIN-NEXT: vslideup.vx v8, v6, a1
+; ZVFHMIN-NEXT: vmv.v.v v0, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 34
; ZVFHMIN-NEXT: mul a0, a0, a1
@@ -3516,7 +3516,7 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: slli t1, a3, 3
; CHECK-NEXT: srli a4, a3, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v1, v0, a4
+; CHECK-NEXT: vslidedown.vx v7, v0, a4
; CHECK-NEXT: srli a1, a3, 3
; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; CHECK-NEXT: add a5, a2, t1
@@ -3548,7 +3548,7 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v2, v16, v8, v0.t
+; CHECK-NEXT: vmfeq.vv v6, v16, v8, v0.t
; CHECK-NEXT: bltu a7, a3, .LBB171_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a7, a3
@@ -3567,7 +3567,7 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v18, v1, a1
+; CHECK-NEXT: vslidedown.vx v18, v7, a1
; CHECK-NEXT: vl8re64.v v8, (t0)
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
@@ -3597,14 +3597,14 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: sltu a2, a6, a0
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a0, a2, a0
-; CHECK-NEXT: vslideup.vx v17, v2, a1
+; CHECK-NEXT: vslideup.vx v17, v6, a1
; CHECK-NEXT: mv a2, a0
; CHECK-NEXT: bltu a0, a3, .LBB171_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB171_6:
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
@@ -3637,7 +3637,7 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: slli a0, a1, 1
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v17, v16, a0
; CHECK-NEXT: vmv1r.v v0, v17
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
index e77966d8c43b..aee255196ce2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
@@ -3387,7 +3387,7 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: srli a0, a0, 3
; RV32-NEXT: add a1, a0, a0
-; RV32-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
+; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; RV32-NEXT: vslideup.vx v0, v24, a0
; RV32-NEXT: ret
;
@@ -3400,7 +3400,7 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: srli a0, a0, 3
; RV64-NEXT: add a1, a0, a0
-; RV64-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
+; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; RV64-NEXT: vslideup.vx v0, v24, a0
; RV64-NEXT: ret
;
@@ -3413,7 +3413,7 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
; ZVFHMIN32-NEXT: csrr a0, vlenb
; ZVFHMIN32-NEXT: srli a0, a0, 3
; ZVFHMIN32-NEXT: add a1, a0, a0
-; ZVFHMIN32-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; ZVFHMIN32-NEXT: vslideup.vx v0, v24, a0
; ZVFHMIN32-NEXT: ret
;
@@ -3426,7 +3426,7 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
; ZVFHMIN64-NEXT: csrr a0, vlenb
; ZVFHMIN64-NEXT: srli a0, a0, 3
; ZVFHMIN64-NEXT: add a1, a0, a0
-; ZVFHMIN64-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; ZVFHMIN64-NEXT: vslideup.vx v0, v24, a0
; ZVFHMIN64-NEXT: ret
%vc = fcmp oeq <vscale x 16 x double> %va, zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index 007afe12b8e4..b1f1a4dceccf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -1184,7 +1184,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a0, a2, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmseq.vv v1, v16, v8, v0.t
+; CHECK-NEXT: vmseq.vv v7, v16, v8, v0.t
; CHECK-NEXT: bltu a3, a1, .LBB96_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a1
@@ -1200,7 +1200,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vmv1r.v v8, v1
+; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
@@ -2408,7 +2408,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vslidedown.vx v0, v24, a1
; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
-; CHECK-NEXT: vmseq.vv v1, v16, v8, v0.t
+; CHECK-NEXT: vmseq.vv v7, v16, v8, v0.t
; CHECK-NEXT: bltu a2, a3, .LBB189_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a3
@@ -2424,8 +2424,8 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t
; CHECK-NEXT: add a0, a1, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vx v16, v1, a1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v16, v7, a1
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
@@ -2459,7 +2459,7 @@ define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b,
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: add a0, a2, a2
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v16, v25, a2
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: ret
@@ -2492,7 +2492,7 @@ define <vscale x 32 x i1> @icmp_eq_vx_swap_nxv32i32(<vscale x 32 x i32> %va, i32
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: add a0, a2, a2
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v16, v25, a2
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
index a2ac684604b9..5f35a4e50a95 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
@@ -3235,7 +3235,7 @@ define <vscale x 16 x i1> @icmp_eq_vi_nx16i64(<vscale x 16 x i64> %va) {
; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; CHECK-NEXT: vmseq.vi v24, v16, 0
; CHECK-NEXT: vmseq.vi v0, v8, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vslideup.vx v0, v24, a0
; CHECK-NEXT: ret
%vc = icmp eq <vscale x 16 x i64> %va, zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 191f047131fb..5d09c39dfd6e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -2849,6 +2849,498 @@ for.body: ; preds = %for.body.preheader,
br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_min(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_min:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB46_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmin.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB46_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_min_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_min_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB47_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmin.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB47_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_max(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_max:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB48_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmax.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB48_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_max_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_max_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB49_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmax.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB49_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_umin(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_umin:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB50_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vminu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB50_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_umin_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_umin_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB51_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vminu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB51_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_umax(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_umax:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB52_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmaxu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB52_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_umax_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_umax_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB53_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmaxu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB53_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_sadd_sat(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_sadd_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB54_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsadd.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB54_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_sadd_sat_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_sadd_sat_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB55_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsadd.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB55_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_ssub_sat(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_ssub_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB56_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB56_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_uadd_sat(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_uadd_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB57_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsaddu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB57_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_uadd_sat_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_uadd_sat_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB58_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsaddu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB58_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_usub_sat(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_usub_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB59_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB59_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @sink_splat_vp_mul(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
@@ -2857,7 +3349,7 @@ define void @sink_splat_vp_mul(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB46_1: # %vector.body
+; CHECK-NEXT: .LBB60_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -2865,7 +3357,7 @@ define void @sink_splat_vp_mul(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB46_1
+; CHECK-NEXT: bne a0, a3, .LBB60_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -2895,7 +3387,7 @@ define void @sink_splat_vp_add(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB47_1: # %vector.body
+; CHECK-NEXT: .LBB61_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -2903,7 +3395,7 @@ define void @sink_splat_vp_add(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB47_1
+; CHECK-NEXT: bne a0, a3, .LBB61_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -2931,7 +3423,7 @@ define void @sink_splat_vp_add_commute(ptr nocapture %a, i32 signext %x, <4 x i1
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB48_1: # %vector.body
+; CHECK-NEXT: .LBB62_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -2939,7 +3431,7 @@ define void @sink_splat_vp_add_commute(ptr nocapture %a, i32 signext %x, <4 x i1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB48_1
+; CHECK-NEXT: bne a0, a3, .LBB62_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -2969,7 +3461,7 @@ define void @sink_splat_vp_sub(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB49_1: # %vector.body
+; CHECK-NEXT: .LBB63_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -2977,7 +3469,7 @@ define void @sink_splat_vp_sub(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB49_1
+; CHECK-NEXT: bne a0, a3, .LBB63_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3005,7 +3497,7 @@ define void @sink_splat_vp_rsub(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB50_1: # %vector.body
+; CHECK-NEXT: .LBB64_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3013,7 +3505,7 @@ define void @sink_splat_vp_rsub(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB50_1
+; CHECK-NEXT: bne a0, a3, .LBB64_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3043,7 +3535,7 @@ define void @sink_splat_vp_shl(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB51_1: # %vector.body
+; CHECK-NEXT: .LBB65_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3051,7 +3543,7 @@ define void @sink_splat_vp_shl(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB51_1
+; CHECK-NEXT: bne a0, a3, .LBB65_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3081,7 +3573,7 @@ define void @sink_splat_vp_lshr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB52_1: # %vector.body
+; CHECK-NEXT: .LBB66_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3089,7 +3581,7 @@ define void @sink_splat_vp_lshr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB52_1
+; CHECK-NEXT: bne a0, a3, .LBB66_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3119,7 +3611,7 @@ define void @sink_splat_vp_ashr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB53_1: # %vector.body
+; CHECK-NEXT: .LBB67_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3127,7 +3619,7 @@ define void @sink_splat_vp_ashr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB53_1
+; CHECK-NEXT: bne a0, a3, .LBB67_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3157,7 +3649,7 @@ define void @sink_splat_vp_fmul(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB54_1: # %vector.body
+; CHECK-NEXT: .LBB68_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3165,7 +3657,7 @@ define void @sink_splat_vp_fmul(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB54_1
+; CHECK-NEXT: bne a0, a2, .LBB68_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3195,7 +3687,7 @@ define void @sink_splat_vp_fdiv(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB55_1: # %vector.body
+; CHECK-NEXT: .LBB69_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3203,7 +3695,7 @@ define void @sink_splat_vp_fdiv(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB55_1
+; CHECK-NEXT: bne a0, a2, .LBB69_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3231,7 +3723,7 @@ define void @sink_splat_vp_frdiv(ptr nocapture %a, float %x, <4 x i1> %m, i32 ze
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB56_1: # %vector.body
+; CHECK-NEXT: .LBB70_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3239,7 +3731,7 @@ define void @sink_splat_vp_frdiv(ptr nocapture %a, float %x, <4 x i1> %m, i32 ze
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB56_1
+; CHECK-NEXT: bne a0, a2, .LBB70_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3269,7 +3761,7 @@ define void @sink_splat_vp_fadd(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB57_1: # %vector.body
+; CHECK-NEXT: .LBB71_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3277,7 +3769,7 @@ define void @sink_splat_vp_fadd(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB57_1
+; CHECK-NEXT: bne a0, a2, .LBB71_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3307,7 +3799,7 @@ define void @sink_splat_vp_fsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB58_1: # %vector.body
+; CHECK-NEXT: .LBB72_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3315,7 +3807,7 @@ define void @sink_splat_vp_fsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB58_1
+; CHECK-NEXT: bne a0, a2, .LBB72_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3345,7 +3837,7 @@ define void @sink_splat_vp_frsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 ze
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB59_1: # %vector.body
+; CHECK-NEXT: .LBB73_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3353,7 +3845,7 @@ define void @sink_splat_vp_frsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 ze
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB59_1
+; CHECK-NEXT: bne a0, a2, .LBB73_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3383,7 +3875,7 @@ define void @sink_splat_vp_udiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB60_1: # %vector.body
+; CHECK-NEXT: .LBB74_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3391,7 +3883,7 @@ define void @sink_splat_vp_udiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB60_1
+; CHECK-NEXT: bne a0, a3, .LBB74_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3421,7 +3913,7 @@ define void @sink_splat_vp_sdiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB61_1: # %vector.body
+; CHECK-NEXT: .LBB75_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3429,7 +3921,7 @@ define void @sink_splat_vp_sdiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB61_1
+; CHECK-NEXT: bne a0, a3, .LBB75_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3459,7 +3951,7 @@ define void @sink_splat_vp_urem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB62_1: # %vector.body
+; CHECK-NEXT: .LBB76_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3467,7 +3959,7 @@ define void @sink_splat_vp_urem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB62_1
+; CHECK-NEXT: bne a0, a3, .LBB76_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3497,7 +3989,7 @@ define void @sink_splat_vp_srem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB63_1: # %vector.body
+; CHECK-NEXT: .LBB77_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3505,7 +3997,7 @@ define void @sink_splat_vp_srem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB63_1
+; CHECK-NEXT: bne a0, a3, .LBB77_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3536,7 +4028,7 @@ define void @sink_splat_vp_srem_commute(ptr nocapture %a, i32 signext %x, <4 x i
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: lui a1, 1
; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: .LBB64_1: # %vector.body
+; CHECK-NEXT: .LBB78_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v9, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3544,7 +4036,7 @@ define void @sink_splat_vp_srem_commute(ptr nocapture %a, i32 signext %x, <4 x i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a1, .LBB64_1
+; CHECK-NEXT: bne a0, a1, .LBB78_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3574,7 +4066,7 @@ define void @sink_splat_vp_fma(ptr noalias nocapture %a, ptr nocapture readonly
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a1, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB65_1: # %vector.body
+; CHECK-NEXT: .LBB79_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v9, (a1)
@@ -3584,7 +4076,7 @@ define void @sink_splat_vp_fma(ptr noalias nocapture %a, ptr nocapture readonly
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a1, a3, .LBB65_1
+; CHECK-NEXT: bne a1, a3, .LBB79_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3614,7 +4106,7 @@ define void @sink_splat_vp_fma_commute(ptr noalias nocapture %a, ptr nocapture r
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a1, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB66_1: # %vector.body
+; CHECK-NEXT: .LBB80_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v9, (a1)
@@ -3624,7 +4116,7 @@ define void @sink_splat_vp_fma_commute(ptr noalias nocapture %a, ptr nocapture r
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a1, a3, .LBB66_1
+; CHECK-NEXT: bne a1, a3, .LBB80_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3655,13 +4147,13 @@ define void @sink_splat_mul_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB67_1: # %vector.body
+; CHECK-NEXT: .LBB81_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmul.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB67_1
+; CHECK-NEXT: bne a0, a2, .LBB81_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3689,13 +4181,13 @@ define void @sink_splat_add_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB68_1: # %vector.body
+; CHECK-NEXT: .LBB82_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vadd.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB68_1
+; CHECK-NEXT: bne a0, a2, .LBB82_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3723,13 +4215,13 @@ define void @sink_splat_sub_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB69_1: # %vector.body
+; CHECK-NEXT: .LBB83_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vsub.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB69_1
+; CHECK-NEXT: bne a0, a2, .LBB83_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3757,13 +4249,13 @@ define void @sink_splat_rsub_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB70_1: # %vector.body
+; CHECK-NEXT: .LBB84_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vrsub.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB70_1
+; CHECK-NEXT: bne a0, a2, .LBB84_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3791,13 +4283,13 @@ define void @sink_splat_and_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB71_1: # %vector.body
+; CHECK-NEXT: .LBB85_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vand.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB71_1
+; CHECK-NEXT: bne a0, a2, .LBB85_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3825,13 +4317,13 @@ define void @sink_splat_or_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB72_1: # %vector.body
+; CHECK-NEXT: .LBB86_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vor.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB72_1
+; CHECK-NEXT: bne a0, a2, .LBB86_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3859,13 +4351,13 @@ define void @sink_splat_xor_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB73_1: # %vector.body
+; CHECK-NEXT: .LBB87_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vxor.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB73_1
+; CHECK-NEXT: bne a0, a2, .LBB87_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3894,13 +4386,13 @@ define void @sink_splat_mul_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB74_1: # %vector.body
+; CHECK-NEXT: .LBB88_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vmul.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB74_1
+; CHECK-NEXT: bne a0, a2, .LBB88_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3929,13 +4421,13 @@ define void @sink_splat_add_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB75_1: # %vector.body
+; CHECK-NEXT: .LBB89_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vadd.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB75_1
+; CHECK-NEXT: bne a0, a2, .LBB89_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3964,13 +4456,13 @@ define void @sink_splat_sub_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB76_1: # %vector.body
+; CHECK-NEXT: .LBB90_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsub.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB76_1
+; CHECK-NEXT: bne a0, a2, .LBB90_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3999,13 +4491,13 @@ define void @sink_splat_rsub_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB77_1: # %vector.body
+; CHECK-NEXT: .LBB91_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vrsub.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB77_1
+; CHECK-NEXT: bne a0, a2, .LBB91_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4034,13 +4526,13 @@ define void @sink_splat_and_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB78_1: # %vector.body
+; CHECK-NEXT: .LBB92_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vand.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB78_1
+; CHECK-NEXT: bne a0, a2, .LBB92_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4069,13 +4561,13 @@ define void @sink_splat_or_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB79_1: # %vector.body
+; CHECK-NEXT: .LBB93_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vor.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB79_1
+; CHECK-NEXT: bne a0, a2, .LBB93_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4104,13 +4596,13 @@ define void @sink_splat_xor_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB80_1: # %vector.body
+; CHECK-NEXT: .LBB94_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vxor.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB80_1
+; CHECK-NEXT: bne a0, a2, .LBB94_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4138,13 +4630,13 @@ define void @sink_splat_mul_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB81_1: # %vector.body
+; CHECK-NEXT: .LBB95_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vmul.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB81_1
+; CHECK-NEXT: bne a0, a2, .LBB95_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4172,13 +4664,13 @@ define void @sink_splat_add_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB82_1: # %vector.body
+; CHECK-NEXT: .LBB96_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vadd.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB82_1
+; CHECK-NEXT: bne a0, a2, .LBB96_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4206,13 +4698,13 @@ define void @sink_splat_sub_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB83_1: # %vector.body
+; CHECK-NEXT: .LBB97_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsub.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB83_1
+; CHECK-NEXT: bne a0, a2, .LBB97_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4240,13 +4732,13 @@ define void @sink_splat_rsub_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB84_1: # %vector.body
+; CHECK-NEXT: .LBB98_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vrsub.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB84_1
+; CHECK-NEXT: bne a0, a2, .LBB98_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4274,13 +4766,13 @@ define void @sink_splat_and_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB85_1: # %vector.body
+; CHECK-NEXT: .LBB99_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vand.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB85_1
+; CHECK-NEXT: bne a0, a2, .LBB99_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4308,13 +4800,13 @@ define void @sink_splat_or_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB86_1: # %vector.body
+; CHECK-NEXT: .LBB100_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vor.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB86_1
+; CHECK-NEXT: bne a0, a2, .LBB100_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4342,13 +4834,13 @@ define void @sink_splat_xor_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB87_1: # %vector.body
+; CHECK-NEXT: .LBB101_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vxor.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB87_1
+; CHECK-NEXT: bne a0, a2, .LBB101_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4380,7 +4872,7 @@ define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: .LBB88_1: # %vector.body
+; CHECK-NEXT: .LBB102_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v10, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -4389,7 +4881,7 @@ define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0), v0.t
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB88_1
+; CHECK-NEXT: bne a0, a3, .LBB102_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4421,7 +4913,7 @@ define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zer
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: .LBB89_1: # %vector.body
+; CHECK-NEXT: .LBB103_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v10, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -4430,7 +4922,7 @@ define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zer
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0), v0.t
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB89_1
+; CHECK-NEXT: bne a0, a2, .LBB103_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4451,3 +4943,485 @@ vector.body: ; preds = %vector.body, %entry
for.cond.cleanup: ; preds = %vector.body
ret void
}
+
+declare <4 x i32> @llvm.vp.smin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_min(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_min:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB104_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB104_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_min_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_min_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB105_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB105_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.smax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_max(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_max:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB106_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmax.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB106_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_max_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_max_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB107_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmax.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB107_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_umin_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_umin_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB108_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB108_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.umax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_umax(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_umax:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB109_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmaxu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB109_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_umax_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_umax_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB110_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmaxu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB110_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_sadd_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_sadd_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB111_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB111_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_sadd_sat_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_sadd_sat_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB112_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB112_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_ssub_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_ssub_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a3, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB113_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a3, a3, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a3, .LBB113_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_uadd_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_uadd_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB114_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB114_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_uadd_sat_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_uadd_sat_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB115_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB115_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_usub_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_usub_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a3, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB116_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a3, a3, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a3, .LBB116_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
index f3574200054f..4de71b6ce06f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
@@ -255,7 +255,7 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -283,7 +283,7 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index c23c10205e6e..4aae8b8bd1dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -22,7 +22,7 @@ define void @vector_interleave_store_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vsc
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a1, a1, 2
; CHECK-NEXT: add a2, a1, a1
-; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v9, v8, a1
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-NEXT: vsm.v v9, (a0)
@@ -112,16 +112,16 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
; CHECK-NEXT: vid.v v24
; CHECK-NEXT: vand.vi v26, v24, 1
; CHECK-NEXT: vmsne.vi v0, v26, 0
-; CHECK-NEXT: vsrl.vi v2, v24, 1
+; CHECK-NEXT: vsrl.vi v6, v24, 1
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a3, a3, 3
; CHECK-NEXT: add a3, sp, a3
; CHECK-NEXT: addi a3, a3, 16
; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vx v2, v2, a2, v0.t
+; CHECK-NEXT: vadd.vx v6, v6, a2, v0.t
; CHECK-NEXT: vmv4r.v v12, v16
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v24, v8, v2
+; CHECK-NEXT: vrgatherei16.vv v24, v8, v6
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a2, vlenb
@@ -130,7 +130,7 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vrgatherei16.vv v8, v16, v2
+; CHECK-NEXT: vrgatherei16.vv v8, v16, v6
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vs8r.v v8, (a1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index e84fd1b1a703..1acc0fec8fe5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -24,7 +24,7 @@ define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v0, v8, a0
; CHECK-NEXT: ret
;
@@ -44,7 +44,7 @@ define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1>
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: srli a0, a0, 2
; ZVBB-NEXT: add a1, a0, a0
-; ZVBB-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; ZVBB-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; ZVBB-NEXT: vslideup.vx v0, v8, a0
; ZVBB-NEXT: ret
%res = call <vscale x 32 x i1> @llvm.experimental.vector.interleave2.nxv32i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b)
@@ -376,9 +376,9 @@ define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v10, v8, a0
-; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16:
@@ -391,9 +391,9 @@ define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half
; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVBB-NEXT: vslidedown.vx v8, v10, a0
; ZVBB-NEXT: add a1, a0, a0
-; ZVBB-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; ZVBB-NEXT: vslideup.vx v10, v8, a0
-; ZVBB-NEXT: vmv1r.v v8, v10
+; ZVBB-NEXT: vmv.v.v v8, v10
; ZVBB-NEXT: ret
%res = call <vscale x 4 x half> @llvm.experimental.vector.interleave2.nxv4f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
ret <vscale x 4 x half> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 00cb54c61a7a..4168f5cd5079 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -591,7 +591,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -619,7 +619,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -699,7 +699,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfadd_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v3, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
@@ -727,7 +727,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: .LBB24_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index a49c0fd08ffe..396e99bc5e4f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -535,7 +535,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -563,7 +563,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -643,7 +643,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfdiv_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v3, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
@@ -671,7 +671,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index c18602c98e6b..9ab907bfcca6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -1204,7 +1204,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
; CHECK-NEXT: mul a1, a1, a3
@@ -1267,7 +1267,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB92_2:
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add a0, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
index 1f716a9abcc5..6e3ee2a31218 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
@@ -410,14 +410,14 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index c3aaf743af17..72101d62567b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -255,7 +255,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -283,7 +283,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index f18fa85e68d1..15fa24a35b7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -255,7 +255,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -283,7 +283,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index 46b14153447c..bb9d3cfed300 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -535,7 +535,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -563,7 +563,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfmul.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -643,7 +643,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfmul_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v3, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
@@ -671,7 +671,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index 3bbedc109bd0..582043ffb903 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1204,7 +1204,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
; CHECK-NEXT: mul a1, a1, a3
@@ -1267,7 +1267,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB92_2:
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add a0, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
index db34980f5252..785f60ad1d39 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
@@ -326,7 +326,7 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vfnmsub_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv4r.v v0, v8
+; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
@@ -338,7 +338,7 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
; ZVFHMIN-NEXT: vfneg.v v16, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
@@ -529,8 +529,8 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v24
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v0, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
index ccbed4b95905..1a2da051c962 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
@@ -284,7 +284,7 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
; ZVFHMIN-NEXT: slli a0, a0, 2
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
-; ZVFHMIN-NEXT: vmv4r.v v0, v12
+; ZVFHMIN-NEXT: vmv4r.v v4, v12
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
@@ -293,7 +293,7 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
; ZVFHMIN-NEXT: vfneg.v v16, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v16
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
@@ -490,8 +490,8 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v8, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v8
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
index 8e983f63428a..b888fde7d068 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
@@ -937,7 +937,7 @@ define <vscale x 32 x i1> @vfptosi_nxv32f16_nxv32i1(<vscale x 32 x half> %va) {
; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v24
; ZVFHMIN-NEXT: vand.vi v8, v8, 1
; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
-; ZVFHMIN-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; ZVFHMIN-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslideup.vx v0, v16, a0
; ZVFHMIN-NEXT: ret
%evec = fptosi <vscale x 32 x half> %va to <vscale x 32 x i1>
@@ -967,7 +967,7 @@ define <vscale x 32 x i1> @vfptoui_nxv32f16_nxv32i1(<vscale x 32 x half> %va) {
; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v24
; ZVFHMIN-NEXT: vand.vi v8, v8, 1
; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
-; ZVFHMIN-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; ZVFHMIN-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslideup.vx v0, v16, a0
; ZVFHMIN-NEXT: ret
%evec = fptoui <vscale x 32 x half> %va to <vscale x 32 x i1>
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index 4e84a31d71b5..dd122f1f2511 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -102,7 +102,7 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
@@ -122,7 +122,7 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB7_2:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
@@ -145,7 +145,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
@@ -180,7 +180,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: mv a5, a1
; CHECK-NEXT: .LBB8_2:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v2, v1, a3
+; CHECK-NEXT: vslidedown.vx v6, v7, a3
; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t
@@ -193,7 +193,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfncvt.f.f.w v28, v8, v0.t
@@ -202,7 +202,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB8_6:
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index 0df7b2ce1978..010b133e51b1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -535,7 +535,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -563,7 +563,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfsub.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -643,7 +643,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfsub_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v3, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
@@ -671,7 +671,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
index d84df3a06473..8a484c7f6b77 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
@@ -781,10 +781,10 @@ define <vscale x 16 x float> @vfmacc_vf_nxv16f32_unmasked(<vscale x 16 x half> %
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
index 3dc8340600fd..3a03f0d65273 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
@@ -764,10 +764,10 @@ define <vscale x 16 x float> @vfnmacc_vf_nxv16f32_unmasked(<vscale x 16 x half>
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfnmadd.vv v8, v24, v16
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
index 6eb1f512f76a..a8cc0ce92aa1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
@@ -735,10 +735,10 @@ define <vscale x 16 x float> @vfnmsac_vf_nxv16f32_unmasked(<vscale x 16 x half>
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfnmsub.vv v8, v24, v16
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index 2f8454983d0d..4f67aac2d2d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -379,7 +379,7 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
@@ -401,7 +401,7 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB28_2:
; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
index 2546ec95a007..6d42b15273cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
@@ -894,7 +894,7 @@ define half @vreduce_ord_fadd_nxv3f16(<vscale x 3 x half> %v, half %s) {
; CHECK-NEXT: lui a2, 1048568
; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a2
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
@@ -916,7 +916,7 @@ define half @vreduce_ord_fadd_nxv6f16(<vscale x 6 x half> %v, half %s) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v9, v10, a0
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
@@ -938,11 +938,11 @@ define half @vreduce_ord_fadd_nxv10f16(<vscale x 10 x half> %v, half %s) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v10, v12, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; CHECK-NEXT: vmv.v.v v11, v12
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v11, v12, a0
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vfmv.s.f v12, fa0
@@ -982,7 +982,7 @@ define half @vreduce_fadd_nxv3f16(<vscale x 3 x half> %v, half %s) {
; CHECK-NEXT: lui a2, 1048568
; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a2
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
@@ -1002,7 +1002,7 @@ define half @vreduce_fadd_nxv6f16(<vscale x 6 x half> %v, half %s) {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v9, v10, a0
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
@@ -1025,11 +1025,11 @@ define half @vreduce_fmin_nxv10f16(<vscale x 10 x half> %v) {
; CHECK-NEXT: vlse16.v v12, (a1), zero
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v10, v12, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; CHECK-NEXT: vmv.v.v v11, v12
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v11, v12, a0
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vfredmin.vs v8, v8, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
index bd510d26279c..4f7cb84c0864 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
@@ -203,7 +203,7 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFHMIN-NEXT: csrr a3, vlenb
; ZVFHMIN-NEXT: srli a1, a3, 1
; ZVFHMIN-NEXT: vsetvli a2, zero, e8, m1, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v1, v0, a1
+; ZVFHMIN-NEXT: vslidedown.vx v7, v0, a1
; ZVFHMIN-NEXT: slli a5, a3, 2
; ZVFHMIN-NEXT: sub a1, a0, a5
; ZVFHMIN-NEXT: sltu a2, a0, a1
@@ -228,7 +228,7 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFHMIN-NEXT: and a5, a6, a5
; ZVFHMIN-NEXT: srli a3, a3, 2
; ZVFHMIN-NEXT: vsetvli a6, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v2, v0, a3
+; ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
; ZVFHMIN-NEXT: bltu a0, a4, .LBB6_6
; ZVFHMIN-NEXT: # %bb.5:
; ZVFHMIN-NEXT: mv a0, a4
@@ -248,7 +248,7 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v2
+; ZVFHMIN-NEXT: vmv1r.v v0, v6
; ZVFHMIN-NEXT: vfredusum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
@@ -258,7 +258,7 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfredusum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
@@ -266,7 +266,7 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFHMIN-NEXT: vsetivli zero, 1, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.s.f v8, fa5
; ZVFHMIN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a3
+; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a3
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
@@ -309,7 +309,7 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFHMIN-NEXT: csrr a3, vlenb
; ZVFHMIN-NEXT: srli a1, a3, 1
; ZVFHMIN-NEXT: vsetvli a2, zero, e8, m1, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v1, v0, a1
+; ZVFHMIN-NEXT: vslidedown.vx v7, v0, a1
; ZVFHMIN-NEXT: slli a5, a3, 2
; ZVFHMIN-NEXT: sub a1, a0, a5
; ZVFHMIN-NEXT: sltu a2, a0, a1
@@ -334,7 +334,7 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFHMIN-NEXT: and a5, a6, a5
; ZVFHMIN-NEXT: srli a3, a3, 2
; ZVFHMIN-NEXT: vsetvli a6, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v2, v0, a3
+; ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
; ZVFHMIN-NEXT: bltu a0, a4, .LBB7_6
; ZVFHMIN-NEXT: # %bb.5:
; ZVFHMIN-NEXT: mv a0, a4
@@ -354,7 +354,7 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v2
+; ZVFHMIN-NEXT: vmv1r.v v0, v6
; ZVFHMIN-NEXT: vfredosum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
@@ -364,7 +364,7 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfredosum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
@@ -372,7 +372,7 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFHMIN-NEXT: vsetivli zero, 1, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.s.f v8, fa5
; ZVFHMIN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a3
+; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a3
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 3faceb0aa6b6..17c362fc0a1a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -500,15 +500,15 @@ define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
-; CHECK-NEXT: vmand.mm v1, v0, v24
+; CHECK-NEXT: vmand.mm v7, v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a2, a0, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v1, a2
+; CHECK-NEXT: vslidedown.vx v0, v7, a2
; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv.v.i v24, 0
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
; CHECK-NEXT: vs8r.v v8, (a1)
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
index 016a43784733..706876dc3854 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
@@ -410,7 +410,7 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
;
; ZVFHMIN-LABEL: vsitofp_nxv32f16_nxv32i32:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: srli a2, a1, 2
; ZVFHMIN-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -429,7 +429,7 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB25_2:
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index a7b4d6616b7b..4857810e7a17 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -288,7 +288,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
@@ -323,7 +323,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: mv a5, a1
; CHECK-NEXT: .LBB17_2:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v2, v1, a3
+; CHECK-NEXT: vslidedown.vx v6, v7, a3
; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
@@ -336,7 +336,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vnsrl.wi v28, v8, 0, v0.t
@@ -345,7 +345,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB17_6:
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
index 668d9373b81d..e083d594db25 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
@@ -410,7 +410,7 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
;
; ZVFHMIN-LABEL: vuitofp_nxv32f16_nxv32i32:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v1, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: srli a2, a1, 2
; ZVFHMIN-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -429,7 +429,7 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB25_2:
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v1
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
diff --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
index 87406f22d169..c0c11fefafb5 100644
--- a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
+++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
@@ -813,24 +813,24 @@ define i64 @select_sll(i64 %A, i64 %B, i64 %C, i1 zeroext %cond) {
; RV32SFB-NEXT: not a7, a2
; RV32SFB-NEXT: srli a0, a0, 1
; RV32SFB-NEXT: sll t0, a1, a2
-; RV32SFB-NEXT: srl a0, a0, a7
; RV32SFB-NEXT: addi a2, a2, -32
+; RV32SFB-NEXT: srl a0, a0, a7
; RV32SFB-NEXT: mv a1, a3
-; RV32SFB-NEXT: bgez a2, .LBB20_2
+; RV32SFB-NEXT: bltz a2, .LBB20_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: or a1, t0, a0
+; RV32SFB-NEXT: li a3, 0
; RV32SFB-NEXT: .LBB20_2: # %entry
-; RV32SFB-NEXT: bltz a2, .LBB20_4
+; RV32SFB-NEXT: bgez a2, .LBB20_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: li a3, 0
+; RV32SFB-NEXT: or a1, t0, a0
; RV32SFB-NEXT: .LBB20_4: # %entry
; RV32SFB-NEXT: beqz a6, .LBB20_6
; RV32SFB-NEXT: # %bb.5: # %entry
-; RV32SFB-NEXT: mv a1, a5
+; RV32SFB-NEXT: mv a3, a4
; RV32SFB-NEXT: .LBB20_6: # %entry
; RV32SFB-NEXT: beqz a6, .LBB20_8
; RV32SFB-NEXT: # %bb.7: # %entry
-; RV32SFB-NEXT: mv a3, a4
+; RV32SFB-NEXT: mv a1, a5
; RV32SFB-NEXT: .LBB20_8: # %entry
; RV32SFB-NEXT: mv a0, a3
; RV32SFB-NEXT: ret
@@ -874,24 +874,24 @@ define i64 @select_srl(i64 %A, i64 %B, i64 %C, i1 zeroext %cond) {
; RV32SFB-NEXT: not a7, a2
; RV32SFB-NEXT: slli a1, a1, 1
; RV32SFB-NEXT: srl t0, a0, a2
-; RV32SFB-NEXT: sll a1, a1, a7
; RV32SFB-NEXT: addi a2, a2, -32
+; RV32SFB-NEXT: sll a1, a1, a7
; RV32SFB-NEXT: mv a0, a3
-; RV32SFB-NEXT: bgez a2, .LBB21_2
+; RV32SFB-NEXT: bltz a2, .LBB21_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: or a0, t0, a1
+; RV32SFB-NEXT: li a3, 0
; RV32SFB-NEXT: .LBB21_2: # %entry
-; RV32SFB-NEXT: bltz a2, .LBB21_4
+; RV32SFB-NEXT: bgez a2, .LBB21_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: li a3, 0
+; RV32SFB-NEXT: or a0, t0, a1
; RV32SFB-NEXT: .LBB21_4: # %entry
; RV32SFB-NEXT: beqz a6, .LBB21_6
; RV32SFB-NEXT: # %bb.5: # %entry
-; RV32SFB-NEXT: mv a0, a4
+; RV32SFB-NEXT: mv a3, a5
; RV32SFB-NEXT: .LBB21_6: # %entry
; RV32SFB-NEXT: beqz a6, .LBB21_8
; RV32SFB-NEXT: # %bb.7: # %entry
-; RV32SFB-NEXT: mv a3, a5
+; RV32SFB-NEXT: mv a0, a4
; RV32SFB-NEXT: .LBB21_8: # %entry
; RV32SFB-NEXT: mv a1, a3
; RV32SFB-NEXT: ret
@@ -935,24 +935,24 @@ define i64 @select_sra(i64 %A, i64 %B, i64 %C, i1 zeroext %cond) {
; RV32SFB-NEXT: not a7, a2
; RV32SFB-NEXT: slli t0, a1, 1
; RV32SFB-NEXT: srl t1, a0, a2
-; RV32SFB-NEXT: sll a7, t0, a7
; RV32SFB-NEXT: addi a2, a2, -32
+; RV32SFB-NEXT: sll a7, t0, a7
; RV32SFB-NEXT: mv a0, a3
-; RV32SFB-NEXT: bgez a2, .LBB22_2
+; RV32SFB-NEXT: bltz a2, .LBB22_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: or a0, t1, a7
+; RV32SFB-NEXT: srai a3, a1, 31
; RV32SFB-NEXT: .LBB22_2: # %entry
-; RV32SFB-NEXT: bltz a2, .LBB22_4
+; RV32SFB-NEXT: bgez a2, .LBB22_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: srai a3, a1, 31
+; RV32SFB-NEXT: or a0, t1, a7
; RV32SFB-NEXT: .LBB22_4: # %entry
; RV32SFB-NEXT: beqz a6, .LBB22_6
; RV32SFB-NEXT: # %bb.5: # %entry
-; RV32SFB-NEXT: mv a0, a4
+; RV32SFB-NEXT: mv a3, a5
; RV32SFB-NEXT: .LBB22_6: # %entry
; RV32SFB-NEXT: beqz a6, .LBB22_8
; RV32SFB-NEXT: # %bb.7: # %entry
-; RV32SFB-NEXT: mv a3, a5
+; RV32SFB-NEXT: mv a0, a4
; RV32SFB-NEXT: .LBB22_8: # %entry
; RV32SFB-NEXT: mv a1, a3
; RV32SFB-NEXT: ret
@@ -1088,11 +1088,11 @@ define i64 @select_andi(i64 %A, i64 %C, i1 zeroext %cond) {
; RV32SFB-NEXT: # %bb.1: # %entry
; RV32SFB-NEXT: andi a2, a0, 567
; RV32SFB-NEXT: .LBB25_2: # %entry
+; RV32SFB-NEXT: mv a0, a2
; RV32SFB-NEXT: bnez a4, .LBB25_4
; RV32SFB-NEXT: # %bb.3: # %entry
; RV32SFB-NEXT: li a1, 0
; RV32SFB-NEXT: .LBB25_4: # %entry
-; RV32SFB-NEXT: mv a0, a2
; RV32SFB-NEXT: ret
entry:
%0 = and i64 %A, 567
@@ -1130,13 +1130,13 @@ define i64 @select_ori(i64 %A, i64 %C, i1 zeroext %cond) {
;
; RV32SFB-LABEL: select_ori:
; RV32SFB: # %bb.0: # %entry
-; RV32SFB-NEXT: beqz a4, .LBB26_2
+; RV32SFB-NEXT: bnez a4, .LBB26_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: mv a1, a3
+; RV32SFB-NEXT: ori a2, a0, 890
; RV32SFB-NEXT: .LBB26_2: # %entry
-; RV32SFB-NEXT: bnez a4, .LBB26_4
+; RV32SFB-NEXT: beqz a4, .LBB26_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: ori a2, a0, 890
+; RV32SFB-NEXT: mv a1, a3
; RV32SFB-NEXT: .LBB26_4: # %entry
; RV32SFB-NEXT: mv a0, a2
; RV32SFB-NEXT: ret
@@ -1176,13 +1176,13 @@ define i64 @select_xori(i64 %A, i64 %C, i1 zeroext %cond) {
;
; RV32SFB-LABEL: select_xori:
; RV32SFB: # %bb.0: # %entry
-; RV32SFB-NEXT: beqz a4, .LBB27_2
+; RV32SFB-NEXT: bnez a4, .LBB27_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: mv a1, a3
+; RV32SFB-NEXT: xori a2, a0, 321
; RV32SFB-NEXT: .LBB27_2: # %entry
-; RV32SFB-NEXT: bnez a4, .LBB27_4
+; RV32SFB-NEXT: beqz a4, .LBB27_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: xori a2, a0, 321
+; RV32SFB-NEXT: mv a1, a3
; RV32SFB-NEXT: .LBB27_4: # %entry
; RV32SFB-NEXT: mv a0, a2
; RV32SFB-NEXT: ret
@@ -1272,11 +1272,11 @@ define i64 @select_srli(i64 %A, i64 %C, i1 zeroext %cond) {
; RV32SFB-NEXT: mv a0, a2
; RV32SFB-NEXT: bnez a4, .LBB29_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: srli a0, a1, 3
+; RV32SFB-NEXT: li a3, 0
; RV32SFB-NEXT: .LBB29_2: # %entry
; RV32SFB-NEXT: bnez a4, .LBB29_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: li a3, 0
+; RV32SFB-NEXT: srli a0, a1, 3
; RV32SFB-NEXT: .LBB29_4: # %entry
; RV32SFB-NEXT: mv a1, a3
; RV32SFB-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
index 3740dc675949..ec7e0ecce80c 100644
--- a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
+++ b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
@@ -283,9 +283,8 @@ define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
; CHECK-LABEL: safe_sub_var_imm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbu a0, 0(a0)
-; CHECK-NEXT: addi a0, a0, 8
-; CHECK-NEXT: andi a0, a0, 255
-; CHECK-NEXT: sltiu a0, a0, 253
+; CHECK-NEXT: addi a0, a0, -248
+; CHECK-NEXT: sltiu a0, a0, -3
; CHECK-NEXT: xori a0, a0, 1
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll
index daf46c6eef02..4f33439db770 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll
@@ -1,5 +1,5 @@
-; RUN: not --crash llc < %s -enable-emscripten-cxx-exceptions -mattr=+multivalue -wasm-emit-multivalue 2>&1 | FileCheck %s --check-prefix=EH
-; RUN: not --crash llc < %s -enable-emscripten-sjlj -mattr=+multivalue 2>&1 -wasm-emit-multivalue | FileCheck %s --check-prefix=SJLJ
+; RUN: not --crash llc < %s -enable-emscripten-cxx-exceptions -mattr=+multivalue 2>&1 | FileCheck %s --check-prefix=EH
+; RUN: not --crash llc < %s -enable-emscripten-sjlj -mattr=+multivalue 2>&1 | FileCheck %s --check-prefix=SJLJ
; Currently multivalue returning functions are not supported in Emscripten EH /
; SjLj. Make sure they error out.
diff --git a/llvm/test/CodeGen/WebAssembly/multivalue-dont-move-def-past-use.mir b/llvm/test/CodeGen/WebAssembly/multivalue-dont-move-def-past-use.mir
index 4fadbd5f07e6..4b4661b14466 100644
--- a/llvm/test/CodeGen/WebAssembly/multivalue-dont-move-def-past-use.mir
+++ b/llvm/test/CodeGen/WebAssembly/multivalue-dont-move-def-past-use.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=wasm32-unknown-unknown -mattr=+multivalue -wasm-emit-multivalue -run-pass=wasm-reg-stackify -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple=wasm32-unknown-unknown -mattr=+multivalue -run-pass=wasm-reg-stackify -verify-machineinstrs %s -o - | FileCheck %s
--- |
target datalayout = "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20"
diff --git a/llvm/test/CodeGen/WebAssembly/multivalue-stackify.ll b/llvm/test/CodeGen/WebAssembly/multivalue-stackify.ll
index f4f93ac2f30c..52a8c686824d 100644
--- a/llvm/test/CodeGen/WebAssembly/multivalue-stackify.ll
+++ b/llvm/test/CodeGen/WebAssembly/multivalue-stackify.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; NOTE: Test functions have been generated by multivalue-stackify.py.
-; RUN: llc < %s -verify-machineinstrs -mattr=+multivalue -wasm-emit-multivalue | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mattr=+multivalue | FileCheck %s
; Test that the multivalue stackification works
diff --git a/llvm/test/CodeGen/WebAssembly/multivalue.ll b/llvm/test/CodeGen/WebAssembly/multivalue.ll
index 846691e5ff0c..675009c8f3e5 100644
--- a/llvm/test/CodeGen/WebAssembly/multivalue.ll
+++ b/llvm/test/CodeGen/WebAssembly/multivalue.ll
@@ -1,8 +1,7 @@
-; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mcpu=mvp -mattr=+multivalue,+tail-call -wasm-emit-multivalue | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mcpu=mvp -mattr=+reference-types,+multivalue,+tail-call -wasm-emit-multivalue | FileCheck --check-prefix REF %s
-; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mcpu=mvp -mattr=+multivalue,+tail-call -wasm-emit-multivalue | FileCheck %s --check-prefix REGS
-; RUN: llc < %s --filetype=obj -mcpu=mvp -mattr=+multivalue,+tail-call -wasm-emit-multivalue | obj2yaml | FileCheck %s --check-prefix OBJ
-; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mcpu=mvp -mattr=+multivalue,+tail-call | FileCheck %s --check-prefix NO-MULTIVALUE
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mcpu=mvp -mattr=+multivalue,+tail-call | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mcpu=mvp -mattr=+reference-types,+multivalue,+tail-call | FileCheck --check-prefix REF %s
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mcpu=mvp -mattr=+multivalue,+tail-call | FileCheck %s --check-prefix REGS
+; RUN: llc < %s --filetype=obj -mcpu=mvp -mattr=+multivalue,+tail-call | obj2yaml | FileCheck %s --check-prefix OBJ
; Test that the multivalue calls, returns, function types, and block
; types work as expected.
@@ -20,7 +19,6 @@ declare void @use_i64(i64)
; CHECK-NEXT: i32.const 42{{$}}
; CHECK-NEXT: i64.const 42{{$}}
; CHECK-NEXT: end_function{{$}}
-; NO-MULTIVALUE-NOT: .functype pair_const () -> (i32, i64)
define %pair @pair_const() {
ret %pair { i32 42, i64 42 }
}
diff --git a/llvm/test/CodeGen/WebAssembly/multivalue_libcall.ll b/llvm/test/CodeGen/WebAssembly/multivalue_libcall.ll
index 7bf37b59353a..47c5ae7b457d 100644
--- a/llvm/test/CodeGen/WebAssembly/multivalue_libcall.ll
+++ b/llvm/test/CodeGen/WebAssembly/multivalue_libcall.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc < %s -verify-machineinstrs -mcpu=mvp -mattr=+multivalue -wasm-emit-multivalue | FileCheck %s --check-prefix=MULTIVALUE
+; RUN: llc < %s -verify-machineinstrs -mcpu=mvp -mattr=+multivalue | FileCheck %s --check-prefix=MULTIVALUE
; RUN: llc < %s -verify-machineinstrs -mcpu=mvp | FileCheck %s --check-prefix=NO_MULTIVALUE
; Test libcall signatures when multivalue is enabled and disabled
diff --git a/llvm/test/CodeGen/WebAssembly/ref-type-mem2local.ll b/llvm/test/CodeGen/WebAssembly/ref-type-mem2local.ll
new file mode 100644
index 000000000000..a38243ca218c
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/ref-type-mem2local.ll
@@ -0,0 +1,57 @@
+; RUN: opt < %s -wasm-ref-type-mem2local -S | FileCheck %s
+
+target triple = "wasm32-unknown-unknown"
+
+%externref = type ptr addrspace(10)
+%funcref = type ptr addrspace(20)
+
+declare %externref @get_externref()
+declare %funcref @get_funcref()
+declare i32 @get_i32()
+declare void @take_externref(%externref)
+declare void @take_funcref(%funcref)
+declare void @take_i32(i32)
+
+; Reference type allocas should be moved to addrspace(1)
+; CHECK-LABEL: @test_ref_type_mem2local
+define void @test_ref_type_mem2local() {
+entry:
+ %alloc.externref = alloca %externref, align 1
+ %eref = call %externref @get_externref()
+ store %externref %eref, ptr %alloc.externref, align 1
+ %eref.loaded = load %externref, ptr %alloc.externref, align 1
+ call void @take_externref(%externref %eref.loaded)
+ ; CHECK: %alloc.externref.var = alloca ptr addrspace(10), align 1, addrspace(1)
+ ; CHECK-NEXT: %eref = call ptr addrspace(10) @get_externref()
+ ; CHECK-NEXT: store ptr addrspace(10) %eref, ptr addrspace(1) %alloc.externref.var, align 1
+ ; CHECK-NEXT: %eref.loaded = load ptr addrspace(10), ptr addrspace(1) %alloc.externref.var, align 1
+ ; CHECK-NEXT: call void @take_externref(ptr addrspace(10) %eref.loaded)
+
+ %alloc.funcref = alloca %funcref, align 1
+ %fref = call %funcref @get_funcref()
+ store %funcref %fref, ptr %alloc.funcref, align 1
+ %fref.loaded = load %funcref, ptr %alloc.funcref, align 1
+ call void @take_funcref(%funcref %fref.loaded)
+ ; CHECK-NEXT: %alloc.funcref.var = alloca ptr addrspace(20), align 1, addrspace(1)
+ ; CHECK-NEXT: %fref = call ptr addrspace(20) @get_funcref()
+ ; CHECK-NEXT: store ptr addrspace(20) %fref, ptr addrspace(1) %alloc.funcref.var, align 1
+ ; CHECK-NEXT: %fref.loaded = load ptr addrspace(20), ptr addrspace(1) %alloc.funcref.var, align 1
+ ; CHECK-NEXT: call void @take_funcref(ptr addrspace(20) %fref.loaded)
+
+ ret void
+}
+
+; POD type allocas should stay the same
+; CHECK-LABEL: @test_pod_type
+define void @test_pod_type() {
+entry:
+ %alloc.i32 = alloca i32
+ %i32 = call i32 @get_i32()
+ store i32 %i32, ptr %alloc.i32
+ %i32.loaded = load i32, ptr %alloc.i32
+ call void @take_i32(i32 %i32.loaded)
+ ; CHECK: %alloc.i32 = alloca i32, align 4{{$}}
+ ; CHECK-NOT: addrspace(1)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll b/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
index 0044d1c35683..e6f28c2057f7 100644
--- a/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
+++ b/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
@@ -18,15 +18,16 @@ define i1 @loadAndRLEsource_no_exit_2E_1_label_2E_0(i32 %tmp.21.reload, i32 %tmp
; CHECK-NEXT: movl _block, %esi
; CHECK-NEXT: movb %al, 1(%esi,%edx)
; CHECK-NEXT: cmpl %ecx, _last
-; CHECK-NEXT: jge LBB0_3
-; CHECK-NEXT: ## %bb.1: ## %label.0
+; CHECK-NEXT: setl %cl
; CHECK-NEXT: cmpl $257, %eax ## imm = 0x101
-; CHECK-NEXT: je LBB0_3
-; CHECK-NEXT: ## %bb.2: ## %label.0.no_exit.1_crit_edge.exitStub
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: testb %al, %cl
+; CHECK-NEXT: je LBB0_2
+; CHECK-NEXT: ## %bb.1: ## %label.0.no_exit.1_crit_edge.exitStub
; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
-; CHECK-NEXT: LBB0_3: ## %codeRepl5.exitStub
+; CHECK-NEXT: LBB0_2: ## %codeRepl5.exitStub
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll b/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
index 7bdc4e19a1cf..28b4541c1bfc 100644
--- a/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
+++ b/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
@@ -44,7 +44,7 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
; CHECK-NEXT: callq __ubyte_convert_to_ctype
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: js LBB0_4
+; CHECK-NEXT: js LBB0_6
; CHECK-NEXT: ## %bb.1: ## %cond_next.i
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
; CHECK-NEXT: movq %rbx, %rdi
@@ -53,81 +53,84 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: sarl $31, %ecx
; CHECK-NEXT: andl %eax, %ecx
; CHECK-NEXT: cmpl $-2, %ecx
-; CHECK-NEXT: je LBB0_8
+; CHECK-NEXT: je LBB0_10
; CHECK-NEXT: ## %bb.2: ## %cond_next.i
; CHECK-NEXT: cmpl $-1, %ecx
-; CHECK-NEXT: jne LBB0_6
-; CHECK-NEXT: LBB0_3: ## %bb4
+; CHECK-NEXT: jne LBB0_3
+; CHECK-NEXT: LBB0_8: ## %bb4
; CHECK-NEXT: movq _PyArray_API@GOTPCREL(%rip), %rax
; CHECK-NEXT: movq (%rax), %rax
; CHECK-NEXT: movq 16(%rax), %rax
-; CHECK-NEXT: jmp LBB0_10
-; CHECK-NEXT: LBB0_4: ## %_ubyte_convert2_to_ctypes.exit
+; CHECK-NEXT: jmp LBB0_9
+; CHECK-NEXT: LBB0_6: ## %_ubyte_convert2_to_ctypes.exit
; CHECK-NEXT: cmpl $-2, %eax
-; CHECK-NEXT: je LBB0_8
-; CHECK-NEXT: ## %bb.5: ## %_ubyte_convert2_to_ctypes.exit
+; CHECK-NEXT: je LBB0_10
+; CHECK-NEXT: ## %bb.7: ## %_ubyte_convert2_to_ctypes.exit
; CHECK-NEXT: cmpl $-1, %eax
-; CHECK-NEXT: je LBB0_3
-; CHECK-NEXT: LBB0_6: ## %bb35
+; CHECK-NEXT: je LBB0_8
+; CHECK-NEXT: LBB0_3: ## %bb35
; CHECK-NEXT: movq _PyUFunc_API@GOTPCREL(%rip), %r14
; CHECK-NEXT: movq (%r14), %rax
; CHECK-NEXT: callq *216(%rax)
; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %edx
; CHECK-NEXT: testb %dl, %dl
-; CHECK-NEXT: je LBB0_11
-; CHECK-NEXT: ## %bb.7: ## %cond_false.i
+; CHECK-NEXT: je LBB0_4
+; CHECK-NEXT: ## %bb.12: ## %cond_false.i
+; CHECK-NEXT: setne %dil
; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
; CHECK-NEXT: movzbl %sil, %ecx
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: divb %dl
; CHECK-NEXT: movl %eax, %r15d
; CHECK-NEXT: testb %cl, %cl
-; CHECK-NEXT: jne LBB0_12
-; CHECK-NEXT: jmp LBB0_14
-; CHECK-NEXT: LBB0_8: ## %bb17
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: testb %dil, %al
+; CHECK-NEXT: jne LBB0_5
+; CHECK-NEXT: LBB0_13: ## %cond_true.i200
+; CHECK-NEXT: testb %dl, %dl
+; CHECK-NEXT: jne LBB0_15
+; CHECK-NEXT: ## %bb.14: ## %cond_true14.i
+; CHECK-NEXT: movl $4, %edi
+; CHECK-NEXT: callq _feraiseexcept
+; CHECK-NEXT: LBB0_15: ## %ubyte_ctype_remainder.exit
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: jmp LBB0_16
+; CHECK-NEXT: LBB0_10: ## %bb17
; CHECK-NEXT: callq _PyErr_Occurred
; CHECK-NEXT: testq %rax, %rax
-; CHECK-NEXT: jne LBB0_27
-; CHECK-NEXT: ## %bb.9: ## %cond_next
+; CHECK-NEXT: jne LBB0_23
+; CHECK-NEXT: ## %bb.11: ## %cond_next
; CHECK-NEXT: movq _PyArray_API@GOTPCREL(%rip), %rax
; CHECK-NEXT: movq (%rax), %rax
; CHECK-NEXT: movq 80(%rax), %rax
-; CHECK-NEXT: LBB0_10: ## %bb4
+; CHECK-NEXT: LBB0_9: ## %bb4
; CHECK-NEXT: movq 96(%rax), %rax
; CHECK-NEXT: movq %r14, %rdi
; CHECK-NEXT: movq %rbx, %rsi
; CHECK-NEXT: callq *40(%rax)
-; CHECK-NEXT: jmp LBB0_28
-; CHECK-NEXT: LBB0_11: ## %cond_true.i
+; CHECK-NEXT: jmp LBB0_24
+; CHECK-NEXT: LBB0_4: ## %cond_true.i
; CHECK-NEXT: movl $4, %edi
; CHECK-NEXT: callq _feraiseexcept
; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %edx
; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
-; CHECK-NEXT: xorl %r15d, %r15d
; CHECK-NEXT: testb %sil, %sil
-; CHECK-NEXT: je LBB0_14
-; CHECK-NEXT: LBB0_12: ## %cond_false.i
+; CHECK-NEXT: sete %al
; CHECK-NEXT: testb %dl, %dl
-; CHECK-NEXT: je LBB0_14
-; CHECK-NEXT: ## %bb.13: ## %cond_next17.i
+; CHECK-NEXT: sete %cl
+; CHECK-NEXT: xorl %r15d, %r15d
+; CHECK-NEXT: orb %al, %cl
+; CHECK-NEXT: jne LBB0_13
+; CHECK-NEXT: LBB0_5: ## %cond_next17.i
; CHECK-NEXT: movzbl %sil, %eax
; CHECK-NEXT: divb %dl
; CHECK-NEXT: movzbl %ah, %ebx
-; CHECK-NEXT: jmp LBB0_18
-; CHECK-NEXT: LBB0_14: ## %cond_true.i200
-; CHECK-NEXT: testb %dl, %dl
-; CHECK-NEXT: jne LBB0_17
-; CHECK-NEXT: ## %bb.16: ## %cond_true14.i
-; CHECK-NEXT: movl $4, %edi
-; CHECK-NEXT: callq _feraiseexcept
-; CHECK-NEXT: LBB0_17: ## %ubyte_ctype_remainder.exit
-; CHECK-NEXT: xorl %ebx, %ebx
-; CHECK-NEXT: LBB0_18: ## %ubyte_ctype_remainder.exit
+; CHECK-NEXT: LBB0_16: ## %ubyte_ctype_remainder.exit
; CHECK-NEXT: movq (%r14), %rax
; CHECK-NEXT: callq *224(%rax)
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je LBB0_21
-; CHECK-NEXT: ## %bb.19: ## %cond_true61
+; CHECK-NEXT: je LBB0_19
+; CHECK-NEXT: ## %bb.17: ## %cond_true61
; CHECK-NEXT: movl %eax, %ebp
; CHECK-NEXT: movq (%r14), %rax
; CHECK-NEXT: movq _.str5@GOTPCREL(%rip), %rdi
@@ -136,8 +139,8 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
; CHECK-NEXT: callq *200(%rax)
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: js LBB0_27
-; CHECK-NEXT: ## %bb.20: ## %cond_next73
+; CHECK-NEXT: js LBB0_23
+; CHECK-NEXT: ## %bb.18: ## %cond_next73
; CHECK-NEXT: movl $1, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq (%r14), %rax
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rsi
@@ -146,13 +149,13 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: movl %ebp, %edx
; CHECK-NEXT: callq *232(%rax)
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: jne LBB0_27
-; CHECK-NEXT: LBB0_21: ## %cond_next89
+; CHECK-NEXT: jne LBB0_23
+; CHECK-NEXT: LBB0_19: ## %cond_next89
; CHECK-NEXT: movl $2, %edi
; CHECK-NEXT: callq _PyTuple_New
; CHECK-NEXT: testq %rax, %rax
-; CHECK-NEXT: je LBB0_27
-; CHECK-NEXT: ## %bb.22: ## %cond_next97
+; CHECK-NEXT: je LBB0_23
+; CHECK-NEXT: ## %bb.20: ## %cond_next97
; CHECK-NEXT: movq %rax, %r14
; CHECK-NEXT: movq _PyArray_API@GOTPCREL(%rip), %r12
; CHECK-NEXT: movq (%r12), %rax
@@ -160,8 +163,8 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: xorl %esi, %esi
; CHECK-NEXT: callq *304(%rdi)
; CHECK-NEXT: testq %rax, %rax
-; CHECK-NEXT: je LBB0_25
-; CHECK-NEXT: ## %bb.23: ## %cond_next135
+; CHECK-NEXT: je LBB0_21
+; CHECK-NEXT: ## %bb.25: ## %cond_next135
; CHECK-NEXT: movb %r15b, 16(%rax)
; CHECK-NEXT: movq %rax, 24(%r14)
; CHECK-NEXT: movq (%r12), %rax
@@ -169,22 +172,22 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: xorl %esi, %esi
; CHECK-NEXT: callq *304(%rdi)
; CHECK-NEXT: testq %rax, %rax
-; CHECK-NEXT: je LBB0_25
-; CHECK-NEXT: ## %bb.24: ## %cond_next182
+; CHECK-NEXT: je LBB0_21
+; CHECK-NEXT: ## %bb.26: ## %cond_next182
; CHECK-NEXT: movb %bl, 16(%rax)
; CHECK-NEXT: movq %rax, 32(%r14)
; CHECK-NEXT: movq %r14, %rax
-; CHECK-NEXT: jmp LBB0_28
-; CHECK-NEXT: LBB0_25: ## %cond_true113
+; CHECK-NEXT: jmp LBB0_24
+; CHECK-NEXT: LBB0_21: ## %cond_true113
; CHECK-NEXT: decq (%r14)
-; CHECK-NEXT: jne LBB0_27
-; CHECK-NEXT: ## %bb.26: ## %cond_true126
+; CHECK-NEXT: jne LBB0_23
+; CHECK-NEXT: ## %bb.22: ## %cond_true126
; CHECK-NEXT: movq 8(%r14), %rax
; CHECK-NEXT: movq %r14, %rdi
; CHECK-NEXT: callq *48(%rax)
-; CHECK-NEXT: LBB0_27: ## %UnifiedReturnBlock
+; CHECK-NEXT: LBB0_23: ## %UnifiedReturnBlock
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: LBB0_28: ## %UnifiedReturnBlock
+; CHECK-NEXT: LBB0_24: ## %UnifiedReturnBlock
; CHECK-NEXT: addq $32, %rsp
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r12
diff --git a/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll b/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll
index 4482c5aec8e8..d9d4424267d7 100644
--- a/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll
+++ b/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll
@@ -16,15 +16,12 @@ define void @_ada_c34007g() {
; CHECK-NEXT: andl $-8, %esp
; CHECK-NEXT: subl $8, %esp
; CHECK-NEXT: movl (%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: orl %eax, %ecx
+; CHECK-NEXT: sete %cl
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB0_3
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: orl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: jne .LBB0_3
-; CHECK-NEXT: # %bb.2: # %entry
-; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: .LBB0_3: # %bb5507
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: testb %cl, %al
; CHECK-NEXT: movl %ebp, %esp
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: .cfi_def_cfa %esp, 4
diff --git a/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll b/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
index dd60e641df25..e0b6e38647d8 100644
--- a/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
+++ b/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: llc < %s -mtriple=i686-- -mcpu=yonah -stats 2>&1 | grep "Number of block tails merged" | grep 16
+; RUN: llc < %s -mtriple=i686-- -mcpu=yonah -stats 2>&1 | grep "Number of block tails merged" | grep 9
; PR1909
@.str = internal constant [48 x i8] c"transformed bounds: (%.2f, %.2f), (%.2f, %.2f)\0A\00" ; <ptr> [#uses=1]
@@ -217,4 +217,4 @@ bb456: ; preds = %bb448, %bb425, %bb417, %bb395, %bb385, %bb371
ret void
}
-declare i32 @printf(ptr, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
diff --git a/llvm/test/CodeGen/X86/apx/compress-evex.mir b/llvm/test/CodeGen/X86/apx/compress-evex.mir
index 997a8395aa75..d8bef886e234 100644
--- a/llvm/test/CodeGen/X86/apx/compress-evex.mir
+++ b/llvm/test/CodeGen/X86/apx/compress-evex.mir
@@ -52,6 +52,15 @@ body: |
RET64 $rax
...
---
+name: ndd_2_non_ndd_mem
+body: |
+ bb.0.entry:
+ ; CHECK: addq $123456, (%rax), %rax # encoding: [0x62,0xf4,0xfc,0x18,0x81,0x00,0x40,0xe2,0x01,0x00]
+ renamable $rax = MOV64rm $noreg, 1, $noreg, 0, $fs
+ renamable $rax = nsw ADD64mi32_ND killed renamable $rax, 1, $noreg, 0, $noreg, 123456, implicit-def dead $eflags
+ RET64 $rax
+...
+---
name: ndd_2_non_ndd_egpr
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/apx/sub.ll b/llvm/test/CodeGen/X86/apx/sub.ll
index 4bcfa2586fbf..4b0bd1487214 100644
--- a/llvm/test/CodeGen/X86/apx/sub.ll
+++ b/llvm/test/CodeGen/X86/apx/sub.ll
@@ -89,31 +89,31 @@ entry:
define i16 @sub16ri8(i16 noundef %a) {
; CHECK-LABEL: sub16ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl $-123, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x83,0xc7,0x85]
+; CHECK-NEXT: subl $-128, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x83,0xef,0x80]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %sub = sub i16 %a, 123
+ %sub = sub i16 %a, -128
ret i16 %sub
}
define i32 @sub32ri8(i32 noundef %a) {
; CHECK-LABEL: sub32ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl $-123, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x83,0xc7,0x85]
+; CHECK-NEXT: subl $-128, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x83,0xef,0x80]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %sub = sub i32 %a, 123
+ %sub = sub i32 %a, -128
ret i32 %sub
}
define i64 @sub64ri8(i64 noundef %a) {
; CHECK-LABEL: sub64ri8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq $-123, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x83,0xc7,0x85]
+; CHECK-NEXT: subq $-128, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x83,0xef,0x80]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %sub = sub i64 %a, 123
+ %sub = sub i64 %a, -128
ret i64 %sub
}
@@ -153,11 +153,11 @@ entry:
define i64 @sub64ri(i64 noundef %a) {
; CHECK-LABEL: sub64ri:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq $-123456, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x81,0xc7,0xc0,0x1d,0xfe,0xff]
-; CHECK-NEXT: # imm = 0xFFFE1DC0
+; CHECK-NEXT: subq $-2147483648, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x81,0xef,0x00,0x00,0x00,0x80]
+; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %sub = sub i64 %a, 123456
+ %sub = sub i64 %a, -2147483648
ret i64 %sub
}
@@ -211,34 +211,34 @@ define i16 @sub16mi8(ptr %a) {
; CHECK-LABEL: sub16mi8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movzwl (%rdi), %eax # encoding: [0x0f,0xb7,0x07]
-; CHECK-NEXT: addl $-123, %eax # EVEX TO LEGACY Compression encoding: [0x83,0xc0,0x85]
+; CHECK-NEXT: subl $-128, %eax # EVEX TO LEGACY Compression encoding: [0x83,0xe8,0x80]
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t= load i16, ptr %a
- %sub = sub nsw i16 %t, 123
+ %sub = sub nsw i16 %t, -128
ret i16 %sub
}
define i32 @sub32mi8(ptr %a) {
; CHECK-LABEL: sub32mi8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addl $-123, (%rdi), %eax # encoding: [0x62,0xf4,0x7c,0x18,0x83,0x07,0x85]
+; CHECK-NEXT: subl $-128, (%rdi), %eax # encoding: [0x62,0xf4,0x7c,0x18,0x83,0x2f,0x80]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t= load i32, ptr %a
- %sub = sub nsw i32 %t, 123
+ %sub = sub nsw i32 %t, -128
ret i32 %sub
}
define i64 @sub64mi8(ptr %a) {
; CHECK-LABEL: sub64mi8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq $-123, (%rdi), %rax # encoding: [0x62,0xf4,0xfc,0x18,0x83,0x07,0x85]
+; CHECK-NEXT: subq $-128, (%rdi), %rax # encoding: [0x62,0xf4,0xfc,0x18,0x83,0x2f,0x80]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t= load i64, ptr %a
- %sub = sub nsw i64 %t, 123
+ %sub = sub nsw i64 %t, -128
ret i64 %sub
}
@@ -282,12 +282,12 @@ entry:
define i64 @sub64mi(ptr %a) {
; CHECK-LABEL: sub64mi:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addq $-123456, (%rdi), %rax # encoding: [0x62,0xf4,0xfc,0x18,0x81,0x07,0xc0,0x1d,0xfe,0xff]
-; CHECK-NEXT: # imm = 0xFFFE1DC0
+; CHECK-NEXT: subq $-2147483648, (%rdi), %rax # encoding: [0x62,0xf4,0xfc,0x18,0x81,0x2f,0x00,0x00,0x00,0x80]
+; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
%t= load i64, ptr %a
- %sub = sub nsw i64 %t, 123456
+ %sub = sub nsw i64 %t, -2147483648
ret i64 %sub
}
diff --git a/llvm/test/CodeGen/X86/avx-cmp.ll b/llvm/test/CodeGen/X86/avx-cmp.ll
index 502bbf3f5d11..4ab9c545ed90 100644
--- a/llvm/test/CodeGen/X86/avx-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx-cmp.ll
@@ -26,40 +26,33 @@ declare void @scale() nounwind
define void @render(double %a0) nounwind {
; CHECK-LABEL: render:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %rbx
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: vmovsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT: subq $16, %rsp
+; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: jne .LBB2_6
+; CHECK-NEXT: jne .LBB2_5
; CHECK-NEXT: # %bb.1: # %for.cond5.preheader
-; CHECK-NEXT: xorl %ebx, %ebx
-; CHECK-NEXT: movb $1, %bpl
+; CHECK-NEXT: movb $1, %bl
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB2_2: # %for.cond5
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: testb %bl, %bl
; CHECK-NEXT: jne .LBB2_2
-; CHECK-NEXT: # %bb.3: # %for.cond5
-; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
-; CHECK-NEXT: testb %bpl, %bpl
-; CHECK-NEXT: jne .LBB2_2
-; CHECK-NEXT: # %bb.4: # %for.body33.preheader
+; CHECK-NEXT: # %bb.3: # %for.body33.preheader
; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
-; CHECK-NEXT: vmovsd (%rsp), %xmm0 # 8-byte Reload
+; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: jne .LBB2_5
+; CHECK-NEXT: jne .LBB2_4
; CHECK-NEXT: jnp .LBB2_2
-; CHECK-NEXT: .LBB2_5: # %if.then
+; CHECK-NEXT: .LBB2_4: # %if.then
; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
; CHECK-NEXT: callq scale@PLT
; CHECK-NEXT: jmp .LBB2_2
-; CHECK-NEXT: .LBB2_6: # %for.end52
-; CHECK-NEXT: addq $8, %rsp
+; CHECK-NEXT: .LBB2_5: # %for.end52
+; CHECK-NEXT: addq $16, %rsp
; CHECK-NEXT: popq %rbx
-; CHECK-NEXT: popq %rbp
; CHECK-NEXT: retq
entry:
br i1 undef, label %for.cond5, label %for.end52
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index abfe3e6428e6..3e40bfa1e791 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -2171,19 +2171,13 @@ define void @test_concat_v2i1(ptr %arg, ptr %arg1, ptr %arg2) nounwind {
; KNL-LABEL: test_concat_v2i1:
; KNL: ## %bb.0:
; KNL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; KNL-NEXT: vpextrw $0, %xmm0, %eax
-; KNL-NEXT: movzwl %ax, %eax
-; KNL-NEXT: vmovd %eax, %xmm1
-; KNL-NEXT: vcvtph2ps %xmm1, %xmm1
+; KNL-NEXT: vcvtph2ps %xmm0, %xmm1
; KNL-NEXT: vmovss {{.*#+}} xmm2 = [6.0E+0,0.0E+0,0.0E+0,0.0E+0]
; KNL-NEXT: vucomiss %xmm2, %xmm1
; KNL-NEXT: setb %al
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k0
-; KNL-NEXT: vpsrld $16, %xmm0, %xmm0
-; KNL-NEXT: vpextrw $0, %xmm0, %eax
-; KNL-NEXT: movzwl %ax, %eax
-; KNL-NEXT: vmovd %eax, %xmm0
+; KNL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; KNL-NEXT: vcvtph2ps %xmm0, %xmm0
; KNL-NEXT: vucomiss %xmm2, %xmm0
; KNL-NEXT: setb %al
@@ -2212,19 +2206,13 @@ define void @test_concat_v2i1(ptr %arg, ptr %arg1, ptr %arg2) nounwind {
; SKX-LABEL: test_concat_v2i1:
; SKX: ## %bb.0:
; SKX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; SKX-NEXT: vpsrld $16, %xmm0, %xmm1
-; SKX-NEXT: vpextrw $0, %xmm1, %eax
-; SKX-NEXT: movzwl %ax, %eax
-; SKX-NEXT: vmovd %eax, %xmm1
+; SKX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[1,1,1,1,4,5,6,7]
; SKX-NEXT: vcvtph2ps %xmm1, %xmm1
; SKX-NEXT: vmovss {{.*#+}} xmm2 = [6.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SKX-NEXT: vucomiss %xmm2, %xmm1
; SKX-NEXT: setb %al
; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kshiftlb $1, %k0, %k0
-; SKX-NEXT: vpextrw $0, %xmm0, %eax
-; SKX-NEXT: movzwl %ax, %eax
-; SKX-NEXT: vmovd %eax, %xmm0
; SKX-NEXT: vcvtph2ps %xmm0, %xmm0
; SKX-NEXT: vucomiss %xmm2, %xmm0
; SKX-NEXT: setb %al
diff --git a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
index f5cca7838bd8..86ebb1e40870 100644
--- a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -1436,10 +1436,8 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; KNL: ## %bb.0: ## %entry
; KNL-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; KNL-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
-; KNL-NEXT: vpsrld $16, %xmm0, %xmm1 ## encoding: [0xc5,0xf1,0x72,0xd0,0x10]
-; KNL-NEXT: vpextrw $0, %xmm1, %eax ## encoding: [0xc5,0xf9,0xc5,0xc1,0x00]
-; KNL-NEXT: movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
-; KNL-NEXT: vmovd %eax, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc8]
+; KNL-NEXT: vpshuflw $85, %xmm0, %xmm1 ## encoding: [0xc5,0xfb,0x70,0xc8,0x55]
+; KNL-NEXT: ## xmm1 = xmm0[1,1,1,1,4,5,6,7]
; KNL-NEXT: vcvtph2ps %xmm1, %xmm1 ## encoding: [0xc4,0xe2,0x79,0x13,0xc9]
; KNL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; KNL-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
@@ -1449,9 +1447,6 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; KNL-NEXT: movl $0, %edx ## encoding: [0xba,0x00,0x00,0x00,0x00]
; KNL-NEXT: cmovnel %ecx, %edx ## encoding: [0x0f,0x45,0xd1]
; KNL-NEXT: cmovpl %ecx, %edx ## encoding: [0x0f,0x4a,0xd1]
-; KNL-NEXT: vpextrw $0, %xmm0, %edi ## encoding: [0xc5,0xf9,0xc5,0xf8,0x00]
-; KNL-NEXT: movzwl %di, %edi ## encoding: [0x0f,0xb7,0xff]
-; KNL-NEXT: vmovd %edi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; KNL-NEXT: vcvtph2ps %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; KNL-NEXT: vucomiss %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc2]
; KNL-NEXT: cmovnel %ecx, %eax ## encoding: [0x0f,0x45,0xc1]
@@ -1468,10 +1463,8 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; AVX512BW: ## %bb.0: ## %entry
; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512BW-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
-; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1 ## encoding: [0xc5,0xf1,0x72,0xd0,0x10]
-; AVX512BW-NEXT: vpextrw $0, %xmm1, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xc1,0x00]
-; AVX512BW-NEXT: movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
-; AVX512BW-NEXT: vmovd %eax, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc8]
+; AVX512BW-NEXT: vpshuflw $85, %xmm0, %xmm1 ## encoding: [0xc5,0xfb,0x70,0xc8,0x55]
+; AVX512BW-NEXT: ## xmm1 = xmm0[1,1,1,1,4,5,6,7]
; AVX512BW-NEXT: vcvtph2ps %xmm1, %xmm1 ## encoding: [0xc4,0xe2,0x79,0x13,0xc9]
; AVX512BW-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX512BW-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
@@ -1481,9 +1474,6 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; AVX512BW-NEXT: movl $0, %edx ## encoding: [0xba,0x00,0x00,0x00,0x00]
; AVX512BW-NEXT: cmovnel %ecx, %edx ## encoding: [0x0f,0x45,0xd1]
; AVX512BW-NEXT: cmovpl %ecx, %edx ## encoding: [0x0f,0x4a,0xd1]
-; AVX512BW-NEXT: vpextrw $0, %xmm0, %edi ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xf8,0x00]
-; AVX512BW-NEXT: movzwl %di, %edi ## encoding: [0x0f,0xb7,0xff]
-; AVX512BW-NEXT: vmovd %edi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; AVX512BW-NEXT: vcvtph2ps %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; AVX512BW-NEXT: vucomiss %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc2]
; AVX512BW-NEXT: cmovnel %ecx, %eax ## encoding: [0x0f,0x45,0xc1]
@@ -1500,10 +1490,8 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SKX-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
-; SKX-NEXT: vpsrld $16, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x72,0xd0,0x10]
-; SKX-NEXT: vpextrw $0, %xmm1, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xc1,0x00]
-; SKX-NEXT: movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
-; SKX-NEXT: vmovd %eax, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc8]
+; SKX-NEXT: vpshuflw $85, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x70,0xc8,0x55]
+; SKX-NEXT: ## xmm1 = xmm0[1,1,1,1,4,5,6,7]
; SKX-NEXT: vcvtph2ps %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc9]
; SKX-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x57,0xd2]
; SKX-NEXT: vucomiss %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xca]
@@ -1512,9 +1500,6 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; SKX-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
; SKX-NEXT: testb %cl, %cl ## encoding: [0x84,0xc9]
; SKX-NEXT: setne %al ## encoding: [0x0f,0x95,0xc0]
-; SKX-NEXT: vpextrw $0, %xmm0, %ecx ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xc8,0x00]
-; SKX-NEXT: movzwl %cx, %ecx ## encoding: [0x0f,0xb7,0xc9]
-; SKX-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; SKX-NEXT: vcvtph2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; SKX-NEXT: vucomiss %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc2]
; SKX-NEXT: setp %cl ## encoding: [0x0f,0x9a,0xc1]
diff --git a/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll b/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll
index e2ea8974f655..f6fb2fcc957e 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll
@@ -92,7 +92,7 @@ define half @f6(half %x, i16 %y) {
define half @f7(half %x) {
; CHECK-LABEL: f7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: vmovsh {{.*#+}} xmm1 = [1.7881E-7,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast half %x to i16
@@ -106,7 +106,7 @@ define half @f7(half %x) {
define half @f8(half %x) {
; CHECK-LABEL: f8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: vmovsh {{.*#+}} xmm1 = [2.3842E-7,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast half %x to i16
@@ -171,7 +171,7 @@ define half @xor(half %x, half %y) {
define half @f7_or(half %x) {
; CHECK-LABEL: f7_or:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: vmovsh {{.*#+}} xmm1 = [1.7881E-7,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast half %x to i16
@@ -183,7 +183,7 @@ define half @f7_or(half %x) {
define half @f7_xor(half %x) {
; CHECK-LABEL: f7_xor:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: vmovsh {{.*#+}} xmm1 = [1.7881E-7,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vxorps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast half %x to i16
@@ -199,7 +199,7 @@ define half @f7_xor(half %x) {
define half @movmsk(half %x) {
; CHECK-LABEL: movmsk:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: vmovsh {{.*#+}} xmm1 = [-0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast half %x to i16
@@ -271,7 +271,7 @@ define half @fadd_bitcast_fneg(half %x, half %y) {
define half @fsub_bitcast_fneg(half %x, half %y) {
; CHECK-LABEL: fsub_bitcast_fneg:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; CHECK-NEXT: vmovsh {{.*#+}} xmm2 = [NaN,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vxorps %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vsubsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/cmov-fp.ll b/llvm/test/CodeGen/X86/cmov-fp.ll
index 26e720ffcebc..77665d083b7e 100644
--- a/llvm/test/CodeGen/X86/cmov-fp.ll
+++ b/llvm/test/CodeGen/X86/cmov-fp.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i686-- -mcpu pentium4 < %s | FileCheck %s -check-prefix=SSE
-; RUN: llc -mtriple=i686-- -mcpu pentium3 < %s | FileCheck %s -check-prefix=NOSSE2
-; RUN: llc -mtriple=i686-- -mcpu pentium2 < %s | FileCheck %s -check-prefix=NOSSE1
+; RUN: llc -mtriple=i686-- -mcpu pentium3 < %s | FileCheck %s -check-prefixes=NOSSE,NOSSE2
+; RUN: llc -mtriple=i686-- -mcpu pentium2 < %s | FileCheck %s -check-prefixes=NOSSE,NOSSE1
; RUN: llc -mtriple=i686-- -mcpu pentium < %s | FileCheck %s -check-prefix=NOCMOV
; PR14035
@@ -27,27 +27,16 @@ define double @test1(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test1:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovnbe %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test1:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovnbe %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test1:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovnbe %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test1:
; NOCMOV: # %bb.0:
@@ -90,27 +79,16 @@ define double @test2(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test2:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovnb %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test2:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovnb %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test2:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovnb %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test2:
; NOCMOV: # %bb.0:
@@ -153,27 +131,16 @@ define double @test3(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test3:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovb %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test3:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovb %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test3:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovb %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test3:
; NOCMOV: # %bb.0:
@@ -216,27 +183,16 @@ define double @test4(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test4:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovbe %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test4:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovbe %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test4:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovbe %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test4:
; NOCMOV: # %bb.0:
@@ -279,31 +235,18 @@ define double @test5(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test5:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setg %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test5:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setg %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test5:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setg %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test5:
; NOCMOV: # %bb.0:
@@ -346,31 +289,18 @@ define double @test6(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test6:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setge %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test6:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setge %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test6:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setge %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test6:
; NOCMOV: # %bb.0:
@@ -413,31 +343,18 @@ define double @test7(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test7:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setl %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test7:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setl %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test7:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setl %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test7:
; NOCMOV: # %bb.0:
@@ -480,31 +397,18 @@ define double @test8(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test8:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setle %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test8:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setle %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test8:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setle %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test8:
; NOCMOV: # %bb.0:
@@ -1065,27 +969,16 @@ define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test17:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovnbe %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test17:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovnbe %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test17:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovnbe %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test17:
; NOCMOV: # %bb.0:
@@ -1118,27 +1011,16 @@ define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test18:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovnb %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test18:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovnb %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test18:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovnb %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test18:
; NOCMOV: # %bb.0:
@@ -1171,27 +1053,16 @@ define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test19:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovb %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test19:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovb %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test19:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovb %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test19:
; NOCMOV: # %bb.0:
@@ -1224,27 +1095,16 @@ define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test20:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovbe %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test20:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovbe %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test20:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovbe %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test20:
; NOCMOV: # %bb.0:
@@ -1279,31 +1139,18 @@ define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test21:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setg %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test21:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setg %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test21:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setg %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test21:
; NOCMOV: # %bb.0:
@@ -1339,31 +1186,18 @@ define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test22:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setge %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test22:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setge %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test22:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setge %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test22:
; NOCMOV: # %bb.0:
@@ -1398,31 +1232,18 @@ define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test23:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setl %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test23:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setl %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test23:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setl %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test23:
; NOCMOV: # %bb.0:
@@ -1457,31 +1278,18 @@ define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test24:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setle %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test24:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setle %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test24:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setle %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test24:
; NOCMOV: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll b/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll
index 7996454a0158..3887d9547fd0 100644
--- a/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll
+++ b/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll
@@ -1,7 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=CHECK,CHECK-NOBMI,CHECK-NOBMI-SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi2 | FileCheck %s --check-prefixes=CHECK,CHECK-BMI2,CHECK-BMI2-SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi2,+avx2 | FileCheck %s --check-prefixes=CHECK,CHECK-BMI2,CHECK-AVX,CHECK-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi2,+avx | FileCheck %s --check-prefixes=CHECK,CHECK-BMI2,CHECK-AVX,CHECK-AVX12,CHECK-AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi2,+avx2 | FileCheck %s --check-prefixes=CHECK,CHECK-BMI2,CHECK-AVX,CHECK-AVX12,CHECK-AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi2,+avx512f,+avx512vl | FileCheck %s --check-prefixes=CHECK,CHECK-BMI2,CHECK-AVX,CHECK-AVX512
declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
@@ -353,6 +354,15 @@ define <4 x i1> @shr_to_ror_eq_4xi32_s4(<4 x i32> %x) {
; CHECK-BMI2-SSE2-NEXT: pxor %xmm1, %xmm0
; CHECK-BMI2-SSE2-NEXT: retq
;
+; CHECK-AVX1-LABEL: shr_to_ror_eq_4xi32_s4:
+; CHECK-AVX1: # %bb.0:
+; CHECK-AVX1-NEXT: vpsrld $4, %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: retq
+;
; CHECK-AVX2-LABEL: shr_to_ror_eq_4xi32_s4:
; CHECK-AVX2: # %bb.0:
; CHECK-AVX2-NEXT: vpsrld $4, %xmm0, %xmm1
@@ -396,14 +406,14 @@ define <4 x i1> @shl_to_ror_eq_4xi32_s8(<4 x i32> %x) {
; CHECK-BMI2-SSE2-NEXT: pxor %xmm1, %xmm0
; CHECK-BMI2-SSE2-NEXT: retq
;
-; CHECK-AVX2-LABEL: shl_to_ror_eq_4xi32_s8:
-; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpslld $8, %xmm0, %xmm1
-; CHECK-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0
-; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT: retq
+; CHECK-AVX12-LABEL: shl_to_ror_eq_4xi32_s8:
+; CHECK-AVX12: # %bb.0:
+; CHECK-AVX12-NEXT: vpslld $8, %xmm0, %xmm1
+; CHECK-AVX12-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX12-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0
+; CHECK-AVX12-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX12-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX12-NEXT: retq
;
; CHECK-AVX512-LABEL: shl_to_ror_eq_4xi32_s8:
; CHECK-AVX512: # %bb.0:
@@ -438,6 +448,15 @@ define <4 x i1> @shl_to_ror_eq_4xi32_s7_fail_no_p2(<4 x i32> %x) {
; CHECK-BMI2-SSE2-NEXT: pxor %xmm1, %xmm0
; CHECK-BMI2-SSE2-NEXT: retq
;
+; CHECK-AVX1-LABEL: shl_to_ror_eq_4xi32_s7_fail_no_p2:
+; CHECK-AVX1: # %bb.0:
+; CHECK-AVX1-NEXT: vpslld $7, %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: retq
+;
; CHECK-AVX2-LABEL: shl_to_ror_eq_4xi32_s7_fail_no_p2:
; CHECK-AVX2: # %bb.0:
; CHECK-AVX2-NEXT: vpslld $7, %xmm0, %xmm1
@@ -490,6 +509,17 @@ define <4 x i1> @shr_to_ror_eq_4xi32_s4_fail_no_splat(<4 x i32> %x) {
; CHECK-BMI2-SSE2-NEXT: pxor %xmm1, %xmm0
; CHECK-BMI2-SSE2-NEXT: retq
;
+; CHECK-AVX1-LABEL: shr_to_ror_eq_4xi32_s4_fail_no_splat:
+; CHECK-AVX1: # %bb.0:
+; CHECK-AVX1-NEXT: vpsrld $8, %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpsrld $4, %xmm0, %xmm2
+; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5],xmm1[6,7]
+; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: retq
+;
; CHECK-AVX2-LABEL: shr_to_ror_eq_4xi32_s4_fail_no_splat:
; CHECK-AVX2: # %bb.0:
; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -546,6 +576,21 @@ define <16 x i1> @shl_to_ror_eq_16xi16_s8_fail_preserve_i16(<16 x i16> %x) {
; CHECK-BMI2-SSE2-NEXT: pxor %xmm1, %xmm0
; CHECK-BMI2-SSE2-NEXT: retq
;
+; CHECK-AVX1-LABEL: shl_to_ror_eq_16xi16_s8_fail_preserve_i16:
+; CHECK-AVX1: # %bb.0:
+; CHECK-AVX1-NEXT: vpsllw $8, %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; CHECK-AVX1-NEXT: vpsllw $8, %xmm2, %xmm2
+; CHECK-AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; CHECK-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-AVX1-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
+; CHECK-AVX1-NEXT: vpcmpeqw %xmm0, %xmm1, %xmm0
+; CHECK-AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vzeroupper
+; CHECK-AVX1-NEXT: retq
+;
; CHECK-AVX2-LABEL: shl_to_ror_eq_16xi16_s8_fail_preserve_i16:
; CHECK-AVX2: # %bb.0:
; CHECK-AVX2-NEXT: vpsllw $8, %ymm0, %ymm1
@@ -574,6 +619,162 @@ define <16 x i1> @shl_to_ror_eq_16xi16_s8_fail_preserve_i16(<16 x i16> %x) {
ret <16 x i1> %r
}
+define <16 x i8> @shl_s3_cmp_v16i8(<16 x i8> %x, <16 x i8> %y) {
+; CHECK-NOBMI-LABEL: shl_s3_cmp_v16i8:
+; CHECK-NOBMI: # %bb.0:
+; CHECK-NOBMI-NEXT: pcmpeqb %xmm1, %xmm0
+; CHECK-NOBMI-NEXT: psllw $3, %xmm0
+; CHECK-NOBMI-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NOBMI-NEXT: retq
+;
+; CHECK-BMI2-SSE2-LABEL: shl_s3_cmp_v16i8:
+; CHECK-BMI2-SSE2: # %bb.0:
+; CHECK-BMI2-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT: psllw $3, %xmm0
+; CHECK-BMI2-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-BMI2-SSE2-NEXT: retq
+;
+; CHECK-AVX12-LABEL: shl_s3_cmp_v16i8:
+; CHECK-AVX12: # %bb.0:
+; CHECK-AVX12-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; CHECK-AVX12-NEXT: vpsllw $3, %xmm0, %xmm0
+; CHECK-AVX12-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX12-NEXT: retq
+;
+; CHECK-AVX512-LABEL: shl_s3_cmp_v16i8:
+; CHECK-AVX512: # %bb.0:
+; CHECK-AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; CHECK-AVX512-NEXT: vpsllw $3, %xmm0, %xmm0
+; CHECK-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512-NEXT: retq
+ %cmp = icmp eq <16 x i8> %x, %y
+ %ext = sext <16 x i1> %cmp to <16 x i8>
+ %shr = shl <16 x i8> %ext, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <16 x i8> %shr
+}
+
+define <4 x i64> @shl_s31_cmp_v4f64(<4 x double> %x, <4 x double> %y) {
+; CHECK-NOBMI-LABEL: shl_s31_cmp_v4f64:
+; CHECK-NOBMI: # %bb.0:
+; CHECK-NOBMI-NEXT: cmpeqpd %xmm3, %xmm1
+; CHECK-NOBMI-NEXT: cmpeqpd %xmm2, %xmm0
+; CHECK-NOBMI-NEXT: psllq $31, %xmm0
+; CHECK-NOBMI-NEXT: psllq $31, %xmm1
+; CHECK-NOBMI-NEXT: retq
+;
+; CHECK-BMI2-SSE2-LABEL: shl_s31_cmp_v4f64:
+; CHECK-BMI2-SSE2: # %bb.0:
+; CHECK-BMI2-SSE2-NEXT: cmpeqpd %xmm3, %xmm1
+; CHECK-BMI2-SSE2-NEXT: cmpeqpd %xmm2, %xmm0
+; CHECK-BMI2-SSE2-NEXT: psllq $31, %xmm0
+; CHECK-BMI2-SSE2-NEXT: psllq $31, %xmm1
+; CHECK-BMI2-SSE2-NEXT: retq
+;
+; CHECK-AVX1-LABEL: shl_s31_cmp_v4f64:
+; CHECK-AVX1: # %bb.0:
+; CHECK-AVX1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; CHECK-AVX1-NEXT: vpsllq $31, %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-AVX1-NEXT: vpsllq $31, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-AVX1-NEXT: retq
+;
+; CHECK-AVX2-LABEL: shl_s31_cmp_v4f64:
+; CHECK-AVX2: # %bb.0:
+; CHECK-AVX2-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; CHECK-AVX2-NEXT: vpsllq $31, %ymm0, %ymm0
+; CHECK-AVX2-NEXT: retq
+;
+; CHECK-AVX512-LABEL: shl_s31_cmp_v4f64:
+; CHECK-AVX512: # %bb.0:
+; CHECK-AVX512-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
+; CHECK-AVX512-NEXT: vpsllq $31, %ymm0, %ymm0
+; CHECK-AVX512-NEXT: retq
+ %cmp = fcmp oeq <4 x double> %x, %y
+ %ext = sext <4 x i1> %cmp to <4 x i64>
+ %shr = shl <4 x i64> %ext, <i64 31, i64 31, i64 31, i64 31>
+ ret <4 x i64> %shr
+}
+
+define <16 x i8> @shr_s1_cmp_v16i8(<16 x i8> %x, <16 x i8> %y) {
+; CHECK-NOBMI-LABEL: shr_s1_cmp_v16i8:
+; CHECK-NOBMI: # %bb.0:
+; CHECK-NOBMI-NEXT: pcmpeqb %xmm1, %xmm0
+; CHECK-NOBMI-NEXT: psrlw $1, %xmm0
+; CHECK-NOBMI-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NOBMI-NEXT: retq
+;
+; CHECK-BMI2-SSE2-LABEL: shr_s1_cmp_v16i8:
+; CHECK-BMI2-SSE2: # %bb.0:
+; CHECK-BMI2-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT: psrlw $1, %xmm0
+; CHECK-BMI2-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-BMI2-SSE2-NEXT: retq
+;
+; CHECK-AVX12-LABEL: shr_s1_cmp_v16i8:
+; CHECK-AVX12: # %bb.0:
+; CHECK-AVX12-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; CHECK-AVX12-NEXT: vpsrlw $1, %xmm0, %xmm0
+; CHECK-AVX12-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX12-NEXT: retq
+;
+; CHECK-AVX512-LABEL: shr_s1_cmp_v16i8:
+; CHECK-AVX512: # %bb.0:
+; CHECK-AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; CHECK-AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
+; CHECK-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512-NEXT: retq
+ %cmp = icmp eq <16 x i8> %x, %y
+ %ext = sext <16 x i1> %cmp to <16 x i8>
+ %shr = lshr <16 x i8> %ext, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %shr
+}
+
+define <8 x i32> @shr_s9_cmp_v8i32(<8 x i32> %x, <8 x i32> %y) {
+; CHECK-NOBMI-LABEL: shr_s9_cmp_v8i32:
+; CHECK-NOBMI: # %bb.0:
+; CHECK-NOBMI-NEXT: pcmpgtd %xmm3, %xmm1
+; CHECK-NOBMI-NEXT: pcmpgtd %xmm2, %xmm0
+; CHECK-NOBMI-NEXT: psrld $9, %xmm0
+; CHECK-NOBMI-NEXT: psrld $9, %xmm1
+; CHECK-NOBMI-NEXT: retq
+;
+; CHECK-BMI2-SSE2-LABEL: shr_s9_cmp_v8i32:
+; CHECK-BMI2-SSE2: # %bb.0:
+; CHECK-BMI2-SSE2-NEXT: pcmpgtd %xmm3, %xmm1
+; CHECK-BMI2-SSE2-NEXT: pcmpgtd %xmm2, %xmm0
+; CHECK-BMI2-SSE2-NEXT: psrld $9, %xmm0
+; CHECK-BMI2-SSE2-NEXT: psrld $9, %xmm1
+; CHECK-BMI2-SSE2-NEXT: retq
+;
+; CHECK-AVX1-LABEL: shr_s9_cmp_v8i32:
+; CHECK-AVX1: # %bb.0:
+; CHECK-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; CHECK-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
+; CHECK-AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpsrld $9, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpsrld $9, %xmm2, %xmm1
+; CHECK-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-AVX1-NEXT: retq
+;
+; CHECK-AVX2-LABEL: shr_s9_cmp_v8i32:
+; CHECK-AVX2: # %bb.0:
+; CHECK-AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
+; CHECK-AVX2-NEXT: vpsrld $9, %ymm0, %ymm0
+; CHECK-AVX2-NEXT: retq
+;
+; CHECK-AVX512-LABEL: shr_s9_cmp_v8i32:
+; CHECK-AVX512: # %bb.0:
+; CHECK-AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
+; CHECK-AVX512-NEXT: vpsrld $9, %ymm0, %ymm0
+; CHECK-AVX512-NEXT: retq
+ %cmp = icmp sgt <8 x i32> %x, %y
+ %ext = sext <8 x i1> %cmp to <8 x i32>
+ %shr = lshr <8 x i32> %ext, <i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9>
+ ret <8 x i32> %shr
+}
+
define i1 @shr_to_shl_eq_i32_s5_fail_doesnt_add_up(i32 %x) {
; CHECK-LABEL: shr_to_shl_eq_i32_s5_fail_doesnt_add_up:
; CHECK: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/cmp.ll b/llvm/test/CodeGen/X86/cmp.ll
index cd1953bec774..09419f870b70 100644
--- a/llvm/test/CodeGen/X86/cmp.ll
+++ b/llvm/test/CodeGen/X86/cmp.ll
@@ -159,43 +159,51 @@ define i64 @test4(i64 %x) nounwind {
define i32 @test5(double %A) nounwind {
; CHECK-LABEL: test5:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: ucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [7.5E+1,0.0E+0]
+; CHECK-NEXT: # encoding: [0xf2,0x0f,0x10,0x0d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
-; CHECK-NEXT: ja .LBB5_3 # encoding: [0x77,A]
-; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: ucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
+; CHECK-NEXT: cmplepd %xmm0, %xmm1 # encoding: [0x66,0x0f,0xc2,0xc8,0x02]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [1.5E+2,0.0E+0]
+; CHECK-NEXT: # encoding: [0xf2,0x0f,0x10,0x15,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
-; CHECK-NEXT: jb .LBB5_3 # encoding: [0x72,A]
-; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
-; CHECK-NEXT: # %bb.2: # %bb12
-; CHECK-NEXT: movl $32, %eax # encoding: [0xb8,0x20,0x00,0x00,0x00]
-; CHECK-NEXT: retq # encoding: [0xc3]
-; CHECK-NEXT: .LBB5_3: # %bb8
+; CHECK-NEXT: cmpnltpd %xmm0, %xmm2 # encoding: [0x66,0x0f,0xc2,0xd0,0x05]
+; CHECK-NEXT: andpd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x54,0xd1]
+; CHECK-NEXT: movd %xmm2, %eax # encoding: [0x66,0x0f,0x7e,0xd0]
+; CHECK-NEXT: testb $1, %al # encoding: [0xa8,0x01]
+; CHECK-NEXT: jne .LBB5_1 # encoding: [0x75,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %bb8
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: jmp foo@PLT # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: foo@PLT-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB5_1: # %bb12
+; CHECK-NEXT: movl $32, %eax # encoding: [0xb8,0x20,0x00,0x00,0x00]
+; CHECK-NEXT: retq # encoding: [0xc3]
;
; NDD-LABEL: test5:
; NDD: # %bb.0: # %entry
-; NDD-NEXT: ucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
+; NDD-NEXT: movsd {{.*#+}} xmm1 = [7.5E+1,0.0E+0]
+; NDD-NEXT: # encoding: [0xf2,0x0f,0x10,0x0d,A,A,A,A]
; NDD-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
-; NDD-NEXT: ja .LBB5_3 # encoding: [0x77,A]
-; NDD-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
-; NDD-NEXT: # %bb.1: # %entry
-; NDD-NEXT: ucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
+; NDD-NEXT: cmplepd %xmm0, %xmm1 # encoding: [0x66,0x0f,0xc2,0xc8,0x02]
+; NDD-NEXT: movsd {{.*#+}} xmm2 = [1.5E+2,0.0E+0]
+; NDD-NEXT: # encoding: [0xf2,0x0f,0x10,0x15,A,A,A,A]
; NDD-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
-; NDD-NEXT: jb .LBB5_3 # encoding: [0x72,A]
-; NDD-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
-; NDD-NEXT: # %bb.2: # %bb12
-; NDD-NEXT: movl $32, %eax # encoding: [0xb8,0x20,0x00,0x00,0x00]
-; NDD-NEXT: retq # encoding: [0xc3]
-; NDD-NEXT: .LBB5_3: # %bb8
+; NDD-NEXT: cmpnltpd %xmm0, %xmm2 # encoding: [0x66,0x0f,0xc2,0xd0,0x05]
+; NDD-NEXT: andpd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x54,0xd1]
+; NDD-NEXT: movd %xmm2, %eax # encoding: [0x66,0x0f,0x7e,0xd0]
+; NDD-NEXT: testb $1, %al # encoding: [0xa8,0x01]
+; NDD-NEXT: jne .LBB5_1 # encoding: [0x75,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB5_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %bb8
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; NDD-NEXT: jmp foo@PLT # TAILCALL
; NDD-NEXT: # encoding: [0xeb,A]
; NDD-NEXT: # fixup A - offset: 1, value: foo@PLT-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB5_1: # %bb12
+; NDD-NEXT: movl $32, %eax # encoding: [0xb8,0x20,0x00,0x00,0x00]
+; NDD-NEXT: retq # encoding: [0xc3]
entry:
%tmp2 = fcmp ogt double %A, 1.500000e+02
%tmp5 = fcmp ult double %A, 7.500000e+01
diff --git a/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll b/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll
index 7039e33c0093..cbb5bd09c239 100644
--- a/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll
@@ -160,6 +160,53 @@ define <16 x i8> @demandedelts_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>
ret <16 x i8> %5
}
+define <4 x float> @demandedbits_sitofp_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x i32> %a2) {
+; SSE-LABEL: demandedbits_sitofp_blendvps:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: cvtdq2ps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm3
+; SSE-NEXT: movaps %xmm3, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: demandedbits_sitofp_blendvps:
+; AVX: # %bb.0:
+; AVX-NEXT: vcvtdq2ps %xmm2, %xmm2
+; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %cvt = sitofp <4 x i32> %a2 to <4 x float>
+ %sel = tail call noundef <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %cvt)
+ ret <4 x float> %sel
+}
+
+define <4 x float> @demandedbits_uitofp_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x i32> %a2) {
+; SSE-LABEL: demandedbits_uitofp_blendvps:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [1258291200,1258291200,1258291200,1258291200]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
+; SSE-NEXT: psrld $16, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],mem[1],xmm2[2],mem[3],xmm2[4],mem[5],xmm2[6],mem[7]
+; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm3
+; SSE-NEXT: movaps %xmm3, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: demandedbits_uitofp_blendvps:
+; AVX: # %bb.0:
+; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],mem[1],xmm2[2],mem[3],xmm2[4],mem[5],xmm2[6],mem[7]
+; AVX-NEXT: vpsrld $16, %xmm2, %xmm2
+; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],mem[1],xmm2[2],mem[3],xmm2[4],mem[5],xmm2[6],mem[7]
+; AVX-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX-NEXT: vaddps %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %cvt = uitofp <4 x i32> %a2 to <4 x float>
+ %sel = tail call noundef <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %cvt)
+ ret <4 x float> %sel
+}
+
define <2 x i64> @demandedbits_blendvpd(i64 %a0, i64 %a2, <2 x double> %a3) {
; SSE-LABEL: demandedbits_blendvpd:
; SSE: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/cvt16.ll b/llvm/test/CodeGen/X86/cvt16.ll
index 59097f8fb5d2..c7ef353f7f60 100644
--- a/llvm/test/CodeGen/X86/cvt16.ll
+++ b/llvm/test/CodeGen/X86/cvt16.ll
@@ -89,7 +89,6 @@ define float @test3(float %src) nounwind uwtable readnone {
; F16C-LABEL: test3:
; F16C: # %bb.0:
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; F16C-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/dagcombine-and-setcc.ll b/llvm/test/CodeGen/X86/dagcombine-and-setcc.ll
index 842ee55d255a..6fded2eeaf35 100644
--- a/llvm/test/CodeGen/X86/dagcombine-and-setcc.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-and-setcc.ll
@@ -16,7 +16,8 @@ declare i32 @printf(ptr nocapture readonly, ...)
;CHECK: cmpl
;CHECK: setl
;CHECK: orb
-;CHECK: je
+;CHECK: testb
+;CHECK: jne
@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
; Function Attrs: optsize ssp uwtable
diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
index 1372bd804735..fa45afbb634c 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
@@ -178,13 +178,13 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: subl $136, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: orl %esi, %eax
-; X86-NEXT: orl %edx, %ecx
-; X86-NEXT: movl %edx, %edi
+; X86-NEXT: orl %edi, %eax
+; X86-NEXT: movl %ebp, %ecx
+; X86-NEXT: orl %esi, %ecx
; X86-NEXT: orl %eax, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: sete %bl
@@ -195,30 +195,33 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: sete %al
; X86-NEXT: orb %bl, %al
; X86-NEXT: movb %al, (%esp) # 1-byte Spill
-; X86-NEXT: bsrl %esi, %edx
+; X86-NEXT: bsrl %edi, %edx
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: bsrl %edi, %ecx
+; X86-NEXT: bsrl %esi, %ecx
; X86-NEXT: xorl $31, %ecx
; X86-NEXT: addl $32, %ecx
-; X86-NEXT: testl %esi, %esi
+; X86-NEXT: testl %edi, %edi
+; X86-NEXT: movl %edi, %ebx
; X86-NEXT: cmovnel %edx, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: bsrl %eax, %edx
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: bsrl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT: movl %esi, %ebx
+; X86-NEXT: bsrl %ebp, %ebp
+; X86-NEXT: movl %esi, %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: xorl $31, %ebp
; X86-NEXT: addl $32, %ebp
; X86-NEXT: testl %eax, %eax
; X86-NEXT: cmovnel %edx, %ebp
; X86-NEXT: addl $64, %ebp
-; X86-NEXT: orl %ebx, %edi
+; X86-NEXT: movl %edi, %edx
+; X86-NEXT: orl %ebx, %edx
; X86-NEXT: cmovnel %ecx, %ebp
; X86-NEXT: bsrl %esi, %edx
+; X86-NEXT: movl %esi, %ebx
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT: bsrl %ebx, %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: bsrl %eax, %ecx
; X86-NEXT: xorl $31, %ecx
; X86-NEXT: addl $32, %ecx
; X86-NEXT: testl %esi, %esi
@@ -230,51 +233,51 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: xorl $31, %edx
; X86-NEXT: addl $32, %edx
; X86-NEXT: testl %edi, %edi
-; X86-NEXT: movl %edi, %eax
; X86-NEXT: cmovnel %esi, %edx
; X86-NEXT: addl $64, %edx
-; X86-NEXT: movl %ebx, %esi
-; X86-NEXT: orl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: orl %ebx, %eax
; X86-NEXT: cmovnel %ecx, %edx
-; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: subl %edx, %ebp
+; X86-NEXT: movl $0, %edx
+; X86-NEXT: sbbl %edx, %edx
; X86-NEXT: movl $0, %esi
; X86-NEXT: sbbl %esi, %esi
; X86-NEXT: movl $0, %edi
; X86-NEXT: sbbl %edi, %edi
-; X86-NEXT: movl $0, %ebx
-; X86-NEXT: sbbl %ebx, %ebx
-; X86-NEXT: movl $127, %edx
+; X86-NEXT: movl $127, %ecx
+; X86-NEXT: cmpl %ebp, %ecx
+; X86-NEXT: movl $0, %ecx
+; X86-NEXT: sbbl %edx, %ecx
+; X86-NEXT: movl $0, %ecx
+; X86-NEXT: sbbl %esi, %ecx
+; X86-NEXT: movl $0, %ecx
+; X86-NEXT: sbbl %edi, %ecx
+; X86-NEXT: setb %cl
+; X86-NEXT: orb (%esp), %cl # 1-byte Folded Reload
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: cmpl %ebp, %edx
-; X86-NEXT: movl $0, %edx
+; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: xorl $127, %eax
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %esi, %edx
-; X86-NEXT: movl $0, %edx
+; X86-NEXT: orl %esi, %eax
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %edi, %edx
-; X86-NEXT: movl $0, %edx
-; X86-NEXT: sbbl %ebx, %edx
-; X86-NEXT: setb %dl
-; X86-NEXT: orb (%esp), %dl # 1-byte Folded Reload
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: cmovnel %ecx, %edx
+; X86-NEXT: orl %edi, %edx
+; X86-NEXT: orl %eax, %edx
+; X86-NEXT: sete %al
+; X86-NEXT: testb %cl, %cl
+; X86-NEXT: movl %ebx, %edx
+; X86-NEXT: movl $0, %edi
+; X86-NEXT: cmovnel %edi, %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: cmovnel %ecx, %esi
-; X86-NEXT: cmovnel %ecx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: cmovnel %edi, %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT: cmovnel %ecx, %ebp
-; X86-NEXT: jne .LBB4_8
-; X86-NEXT: # %bb.1: # %_udiv-special-cases
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: xorl $127, %eax
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl %ebx, %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: je .LBB4_8
-; X86-NEXT: # %bb.2: # %udiv-bb1
+; X86-NEXT: cmovnel %edi, %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: cmovnel %edi, %ebx
+; X86-NEXT: orb %cl, %al
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: jne .LBB4_7
+; X86-NEXT: # %bb.1: # %udiv-bb1
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -287,9 +290,8 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: movl %ecx, %edi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: movl %ebx, %eax
; X86-NEXT: xorb $127, %al
; X86-NEXT: movb %al, %ch
; X86-NEXT: andb $7, %ch
@@ -301,7 +303,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl 132(%esp,%eax), %esi
; X86-NEXT: movb %ch, %cl
; X86-NEXT: shldl %cl, %edx, %esi
-; X86-NEXT: movl %esi, (%esp) # 4-byte Spill
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: shll %cl, %edx
; X86-NEXT: notb %cl
; X86-NEXT: movl 124(%esp,%eax), %ebp
@@ -309,68 +311,69 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: shrl %esi
; X86-NEXT: shrl %cl, %esi
; X86-NEXT: orl %edx, %esi
-; X86-NEXT: movl %ebp, %edx
-; X86-NEXT: movl 120(%esp,%eax), %ebp
+; X86-NEXT: movl 120(%esp,%eax), %eax
; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shldl %cl, %ebp, %edx
-; X86-NEXT: shll %cl, %ebp
-; X86-NEXT: addl $1, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shldl %cl, %eax, %ebp
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT: addl $1, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: adcl $0, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: adcl $0, %eax
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: adcl $0, %ebx
-; X86-NEXT: jae .LBB4_3
-; X86-NEXT: # %bb.6:
+; X86-NEXT: jae .LBB4_2
+; X86-NEXT: # %bb.5:
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: xorl %ecx, %ecx
-; X86-NEXT: movl %edx, %ebx
-; X86-NEXT: jmp .LBB4_7
-; X86-NEXT: .LBB4_3: # %udiv-preheader
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: jmp .LBB4_6
+; X86-NEXT: .LBB4_2: # %udiv-preheader
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movb %al, %ch
; X86-NEXT: andb $7, %ch
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: shrb $3, %al
; X86-NEXT: andb $15, %al
; X86-NEXT: movzbl %al, %eax
-; X86-NEXT: movl 84(%esp,%eax), %ebp
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: movl 80(%esp,%eax), %ebx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebx, %esi
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shrdl %cl, %ebp, %esi
+; X86-NEXT: movl 84(%esp,%eax), %ebx
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 72(%esp,%eax), %esi
-; X86-NEXT: movl 76(%esp,%eax), %eax
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: shrl %cl, %edx
-; X86-NEXT: notb %cl
-; X86-NEXT: addl %ebx, %ebx
-; X86-NEXT: shll %cl, %ebx
-; X86-NEXT: orl %edx, %ebx
+; X86-NEXT: movl 80(%esp,%eax), %esi
+; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %esi, %edx
; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shrl %cl, %ebp
-; X86-NEXT: shrdl %cl, %eax, %esi
+; X86-NEXT: shrdl %cl, %ebx, %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 72(%esp,%eax), %ebp
+; X86-NEXT: movl 76(%esp,%eax), %edx
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: notb %cl
+; X86-NEXT: addl %esi, %esi
+; X86-NEXT: shll %cl, %esi
+; X86-NEXT: orl %eax, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movb %ch, %cl
+; X86-NEXT: shrl %cl, %ebx
+; X86-NEXT: movl %ebx, %edi
+; X86-NEXT: shrdl %cl, %edx, %ebp
+; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl $-1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -380,25 +383,25 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: adcl $-1, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: adcl $-1, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: adcl $-1, %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl (%esp), %esi # 4-byte Reload
+; X86-NEXT: movl (%esp), %edx # 4-byte Reload
; X86-NEXT: .p2align 4, 0x90
-; X86-NEXT: .LBB4_4: # %udiv-do-while
+; X86-NEXT: .LBB4_3: # %udiv-do-while
; X86-NEXT: # =>This Inner Loop Header: Depth=1
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl $1, %edx, %ebp
-; X86-NEXT: movl %ebp, (%esp) # 4-byte Spill
-; X86-NEXT: shldl $1, %ebx, %edx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NEXT: shldl $1, %ebp, %ebx
-; X86-NEXT: shldl $1, %esi, %ebp
+; X86-NEXT: shldl $1, %ebp, %edi
+; X86-NEXT: movl %edi, (%esp) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: shldl $1, %ebx, %ebp
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT: shldl $1, %esi, %ebx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: shldl $1, %edi, %esi
-; X86-NEXT: orl %ecx, %esi
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: shldl $1, %eax, %edi
; X86-NEXT: orl %ecx, %edi
@@ -407,14 +410,16 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: shldl $1, %edi, %eax
; X86-NEXT: orl %ecx, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: addl %edi, %edi
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-NEXT: shldl $1, %edx, %edi
+; X86-NEXT: orl %ecx, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: cmpl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; X86-NEXT: addl %edx, %edx
+; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-NEXT: cmpl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl %ebx, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: sbbl %edx, %ecx
+; X86-NEXT: sbbl %ebp, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl (%esp), %ecx # 4-byte Folded Reload
; X86-NEXT: sarl $31, %ecx
@@ -429,84 +434,81 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: subl %ecx, %ebp
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: subl %ecx, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %ebx
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %edi, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT: sbbl %edi, %ebp
+; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: sbbl %eax, (%esp) # 4-byte Folded Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: addl $-1, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: adcl $-1, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: adcl $-1, %edx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: adcl $-1, %ebx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT: adcl $-1, %edi
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: orl %ebx, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: orl %edi, %eax
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: orl %edx, %ecx
-; X86-NEXT: movl (%esp), %ebp # 4-byte Reload
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: orl %ebx, %ecx
+; X86-NEXT: movl (%esp), %edi # 4-byte Reload
; X86-NEXT: orl %eax, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: jne .LBB4_4
-; X86-NEXT: # %bb.5:
-; X86-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-NEXT: movl %edi, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: jne .LBB4_3
+; X86-NEXT: # %bb.4:
+; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: .LBB4_7: # %udiv-loop-exit
-; X86-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-NEXT: .LBB4_6: # %udiv-loop-exit
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: shldl $1, %esi, %edx
; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: shldl $1, %ebx, %esi
+; X86-NEXT: shldl $1, %ebp, %esi
; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: shldl $1, %ebp, %ebx
-; X86-NEXT: orl %ecx, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: addl %ebp, %ebp
-; X86-NEXT: orl %eax, %ebp
-; X86-NEXT: .LBB4_8: # %udiv-end
+; X86-NEXT: movl (%esp), %ebx # 4-byte Reload
+; X86-NEXT: shldl $1, %ebx, %ebp
+; X86-NEXT: orl %ecx, %ebp
+; X86-NEXT: addl %ebx, %ebx
+; X86-NEXT: orl %eax, %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %ebp, (%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: .LBB4_7: # %udiv-end
+; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %ebx, (%eax)
+; X86-NEXT: movl %ebp, 4(%eax)
; X86-NEXT: movl %esi, 8(%eax)
; X86-NEXT: movl %edx, 12(%eax)
+; X86-NEXT: movl %ebx, %ecx
+; X86-NEXT: movl %ebx, (%esp) # 4-byte Spill
; X86-NEXT: movl %esi, %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %esi
-; X86-NEXT: imull %ecx, %esi
-; X86-NEXT: movl %ebp, %ecx
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edx, %ebp
+; X86-NEXT: imull %ebp, %esi
+; X86-NEXT: movl %edx, %edi
; X86-NEXT: mull %ecx
-; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: addl %esi, %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: imull %ecx, %edi
-; X86-NEXT: addl %edx, %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: imull %ecx, %ebp
+; X86-NEXT: addl %edx, %ebp
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl %esi, %eax
; X86-NEXT: mull %ebx
; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: imull %esi, %ebp
-; X86-NEXT: addl %edx, %ebp
+; X86-NEXT: imull %esi, %edi
+; X86-NEXT: addl %edx, %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: imull %eax, %ebx
-; X86-NEXT: addl %ebp, %ebx
-; X86-NEXT: addl (%esp), %ecx # 4-byte Folded Reload
-; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-NEXT: adcl %edi, %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-NEXT: addl %edi, %ebx
+; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: adcl %ebp, %ebx
+; X86-NEXT: movl (%esp), %ebp # 4-byte Reload
; X86-NEXT: movl %ebp, %eax
; X86-NEXT: mull %esi
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -522,7 +524,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl %edx, %ebp
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
; X86-NEXT: adcl %edi, %ebp
; X86-NEXT: setb %cl
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -530,11 +532,11 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: addl %ebp, %eax
; X86-NEXT: movzbl %cl, %ecx
; X86-NEXT: adcl %ecx, %edx
-; X86-NEXT: addl (%esp), %eax # 4-byte Folded Reload
+; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: adcl %ebx, %edx
; X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-NEXT: sbbl (%esp), %edi # 4-byte Folded Reload
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-NEXT: sbbl %eax, %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
diff --git a/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
index e114c205d797..1886e2911ede 100644
--- a/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
@@ -18,8 +18,7 @@ define float @test_cvtsh_ss(i16 %a0) nounwind {
;
; X64-LABEL: test_cvtsh_ss:
; X64: # %bb.0:
-; X64-NEXT: movzwl %di, %eax
-; X64-NEXT: vmovd %eax, %xmm0
+; X64-NEXT: vmovd %edi, %xmm0
; X64-NEXT: vcvtph2ps %xmm0, %xmm0
; X64-NEXT: retq
%ins0 = insertelement <8 x i16> undef, i16 %a0, i32 0
@@ -41,8 +40,6 @@ define i16 @test_cvtss_sh(float %a0) nounwind {
; X86-LABEL: test_cvtss_sh:
; X86: # %bb.0:
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X86-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X86-NEXT: vmovd %xmm0, %eax
; X86-NEXT: # kill: def $ax killed $ax killed $eax
@@ -50,8 +47,6 @@ define i16 @test_cvtss_sh(float %a0) nounwind {
;
; X64-LABEL: test_cvtss_sh:
; X64: # %bb.0:
-; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X64-NEXT: vmovd %xmm0, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
diff --git a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
index 5f326b6d6998..8f875c70a25f 100644
--- a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
+++ b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
@@ -1432,7 +1432,6 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bounds(i32 %cnt) nounwind {
; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax
; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
@@ -1447,7 +1446,6 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bounds(i32 %cnt) nounwind {
; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax
; CHECK-FMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0
; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-FMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %xmm0
; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
@@ -1550,7 +1548,6 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bound2(i16 %cnt) nounwind {
; CHECK-NO-FASTFMA-NEXT: movzwl %ax, %eax
; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
@@ -1566,7 +1563,6 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bound2(i16 %cnt) nounwind {
; CHECK-FMA-NEXT: movzwl %ax, %eax
; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0
; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-FMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %xmm0
; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/fp-roundeven.ll b/llvm/test/CodeGen/X86/fp-roundeven.ll
index fed2060dabd3..8037c783dd8e 100644
--- a/llvm/test/CodeGen/X86/fp-roundeven.ll
+++ b/llvm/test/CodeGen/X86/fp-roundeven.ll
@@ -51,7 +51,6 @@ define half @roundeven_f16(half %h) {
; AVX512F-LABEL: roundeven_f16:
; AVX512F: ## %bb.0: ## %entry
; AVX512F-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512F-NEXT: movzwl %ax, %eax
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512F-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/fpclamptosat_vec.ll b/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
index c8708ea9b681..6aad4c2ebba1 100644
--- a/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
@@ -698,30 +698,18 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) nounwind {
;
; AVX2-LABEL: stest_f16i32:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
-; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm1, %rax
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm1
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX2-NEXT: vcvttss2si %xmm1, %rcx
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm1, %rax
-; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vmovq %rcx, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: vcvttss2si %xmm2, %rax
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm0, %rax
@@ -848,10 +836,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
;
; AVX2-LABEL: utesth_f16i32:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -862,20 +847,14 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttss2si %xmm3, %rax
; AVX2-NEXT: vcvttss2si %xmm2, %rcx
; AVX2-NEXT: vmovq %rdx, %xmm2
-; AVX2-NEXT: vpextrw $0, %xmm0, %edx
-; AVX2-NEXT: movzwl %dx, %edx
-; AVX2-NEXT: vmovd %edx, %xmm3
; AVX2-NEXT: movq %rcx, %rdx
; AVX2-NEXT: sarq $63, %rdx
-; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm3
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: vsubss %xmm1, %xmm3, %xmm4
; AVX2-NEXT: vcvttss2si %xmm4, %rax
@@ -887,10 +866,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
; AVX2-NEXT: sarq $63, %rdx
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: orq %rcx, %rdx
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vcvttss2si %xmm1, %rax
@@ -1023,31 +999,19 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) nounwind {
;
; AVX2-LABEL: ustest_f16i32:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX2-NEXT: vcvttss2si %xmm1, %rax
; AVX2-NEXT: vmovq %rax, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: vcvttss2si %xmm2, %rax
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm2
; AVX2-NEXT: vcvttss2si %xmm2, %rax
; AVX2-NEXT: vmovq %rax, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vcvttss2si %xmm0, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
@@ -3346,30 +3310,18 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) nounwind {
;
; AVX2-LABEL: stest_f16i32_mm:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
-; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm1, %rax
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm1
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX2-NEXT: vcvttss2si %xmm1, %rcx
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm1, %rax
-; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vmovq %rcx, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: vcvttss2si %xmm2, %rax
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm0, %rax
@@ -3494,10 +3446,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
;
; AVX2-LABEL: utesth_f16i32_mm:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -3508,20 +3457,14 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttss2si %xmm3, %rax
; AVX2-NEXT: vcvttss2si %xmm2, %rcx
; AVX2-NEXT: vmovq %rdx, %xmm2
-; AVX2-NEXT: vpextrw $0, %xmm0, %edx
-; AVX2-NEXT: movzwl %dx, %edx
-; AVX2-NEXT: vmovd %edx, %xmm3
; AVX2-NEXT: movq %rcx, %rdx
; AVX2-NEXT: sarq $63, %rdx
-; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm3
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: vsubss %xmm1, %xmm3, %xmm4
; AVX2-NEXT: vcvttss2si %xmm4, %rax
@@ -3533,10 +3476,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
; AVX2-NEXT: sarq $63, %rdx
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: orq %rcx, %rdx
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vcvttss2si %xmm1, %rax
@@ -3668,31 +3608,19 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) nounwind {
;
; AVX2-LABEL: ustest_f16i32_mm:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX2-NEXT: vcvttss2si %xmm1, %rax
; AVX2-NEXT: vmovq %rax, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: vcvttss2si %xmm2, %rax
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm2
; AVX2-NEXT: vcvttss2si %xmm2, %rax
; AVX2-NEXT: vmovq %rax, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vcvttss2si %xmm0, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index d0853fdc748d..9f01d07e6a67 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -851,16 +851,14 @@ define float @test_sitofp_fadd_i32(i32 %a, ptr %b) #0 {
;
; BWON-F16C-LABEL: test_sitofp_fadd_i32:
; BWON-F16C: # %bb.0:
-; BWON-F16C-NEXT: movzwl (%rsi), %eax
; BWON-F16C-NEXT: vcvtsi2ss %edi, %xmm0, %xmm0
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; BWON-F16C-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT: movzwl (%rsi), %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm1
; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
; BWON-F16C-NEXT: vaddss %xmm0, %xmm1, %xmm0
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; BWON-F16C-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: retq
;
@@ -919,7 +917,6 @@ define half @PR40273(half) #0 {
; BWON-F16C-LABEL: PR40273:
; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: xorl %eax, %eax
@@ -973,7 +970,6 @@ define void @brcond(half %0) #0 {
; BWON-F16C-LABEL: brcond:
; BWON-F16C: # %bb.0: # %entry
; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -1029,7 +1025,6 @@ define half @test_sqrt(half %0) #0 {
; BWON-F16C-LABEL: test_sqrt:
; BWON-F16C: # %bb.0: # %entry
; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
@@ -1083,7 +1078,6 @@ define void @main.158() #0 {
; BWON-F16C: # %bb.0: # %entry
; BWON-F16C-NEXT: vxorps %xmm0, %xmm0, %xmm0
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm1
-; BWON-F16C-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
; BWON-F16C-NEXT: vmovss {{.*#+}} xmm2 = [8.0E+0,0.0E+0,0.0E+0,0.0E+0]
; BWON-F16C-NEXT: vucomiss %xmm1, %xmm2
@@ -1172,8 +1166,7 @@ define void @main.45() #0 {
;
; BWON-F16C-LABEL: main.45:
; BWON-F16C: # %bb.0: # %entry
-; BWON-F16C-NEXT: movzwl (%rax), %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; BWON-F16C-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: xorl %eax, %eax
@@ -1345,10 +1338,8 @@ define half @pr61271(half %0, half %1) #0 {
; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
; BWON-F16C-NEXT: vpextrw $0, %xmm1, %ecx
-; BWON-F16C-NEXT: movzwl %cx, %ecx
; BWON-F16C-NEXT: vmovd %ecx, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
-; BWON-F16C-NEXT: movzwl %ax, %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm1
; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
; BWON-F16C-NEXT: vminss %xmm0, %xmm1, %xmm0
@@ -1615,14 +1606,8 @@ define <8 x half> @maxnum_v8f16(<8 x half> %0, <8 x half> %1) #0 {
; BWON-F16C-LABEL: maxnum_v8f16:
; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; BWON-F16C-NEXT: vpextrw $0, %xmm2, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm2
; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm2
; BWON-F16C-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; BWON-F16C-NEXT: vpextrw $0, %xmm3, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm3
; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm3
; BWON-F16C-NEXT: vucomiss %xmm2, %xmm3
; BWON-F16C-NEXT: ja .LBB26_2
@@ -1631,14 +1616,8 @@ define <8 x half> @maxnum_v8f16(<8 x half> %0, <8 x half> %1) #0 {
; BWON-F16C-NEXT: .LBB26_2:
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm3, %xmm2
; BWON-F16C-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
-; BWON-F16C-NEXT: vpextrw $0, %xmm3, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm3
; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm3
; BWON-F16C-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; BWON-F16C-NEXT: vpextrw $0, %xmm4, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm4
; BWON-F16C-NEXT: vcvtph2ps %xmm4, %xmm4
; BWON-F16C-NEXT: vucomiss %xmm3, %xmm4
; BWON-F16C-NEXT: ja .LBB26_4
@@ -1649,48 +1628,30 @@ define <8 x half> @maxnum_v8f16(<8 x half> %0, <8 x half> %1) #0 {
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm4, %xmm2
; BWON-F16C-NEXT: vmovd %xmm2, %ecx
; BWON-F16C-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; BWON-F16C-NEXT: vpextrw $0, %xmm2, %edx
-; BWON-F16C-NEXT: movzwl %dx, %edx
-; BWON-F16C-NEXT: vmovd %edx, %xmm2
+; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm3
+; BWON-F16C-NEXT: vpsrldq {{.*#+}} xmm2 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm2
-; BWON-F16C-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; BWON-F16C-NEXT: vpextrw $0, %xmm3, %edx
-; BWON-F16C-NEXT: movzwl %dx, %edx
-; BWON-F16C-NEXT: vmovd %edx, %xmm3
-; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm3
-; BWON-F16C-NEXT: vucomiss %xmm2, %xmm3
+; BWON-F16C-NEXT: vucomiss %xmm3, %xmm2
; BWON-F16C-NEXT: ja .LBB26_6
; BWON-F16C-NEXT: # %bb.5:
-; BWON-F16C-NEXT: vmovaps %xmm2, %xmm3
+; BWON-F16C-NEXT: vmovaps %xmm3, %xmm2
; BWON-F16C-NEXT: .LBB26_6:
-; BWON-F16C-NEXT: vcvtps2ph $4, %xmm3, %xmm2
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; BWON-F16C-NEXT: vmovd %xmm2, %edx
; BWON-F16C-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0]
-; BWON-F16C-NEXT: vpextrw $0, %xmm2, %esi
-; BWON-F16C-NEXT: movzwl %si, %esi
-; BWON-F16C-NEXT: vmovd %esi, %xmm2
+; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm3
+; BWON-F16C-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm2
-; BWON-F16C-NEXT: vshufpd {{.*#+}} xmm3 = xmm0[1,0]
-; BWON-F16C-NEXT: vpextrw $0, %xmm3, %esi
-; BWON-F16C-NEXT: movzwl %si, %esi
-; BWON-F16C-NEXT: vmovd %esi, %xmm3
-; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm3
-; BWON-F16C-NEXT: vucomiss %xmm2, %xmm3
+; BWON-F16C-NEXT: vucomiss %xmm3, %xmm2
; BWON-F16C-NEXT: ja .LBB26_8
; BWON-F16C-NEXT: # %bb.7:
-; BWON-F16C-NEXT: vmovaps %xmm2, %xmm3
+; BWON-F16C-NEXT: vmovaps %xmm3, %xmm2
; BWON-F16C-NEXT: .LBB26_8:
-; BWON-F16C-NEXT: vcvtps2ph $4, %xmm3, %xmm2
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; BWON-F16C-NEXT: vmovd %xmm2, %esi
-; BWON-F16C-NEXT: vpsrlq $48, %xmm1, %xmm2
-; BWON-F16C-NEXT: vpextrw $0, %xmm2, %edi
-; BWON-F16C-NEXT: movzwl %di, %edi
-; BWON-F16C-NEXT: vmovd %edi, %xmm2
+; BWON-F16C-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[3,3,3,3,4,5,6,7]
; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm2
-; BWON-F16C-NEXT: vpsrlq $48, %xmm0, %xmm3
-; BWON-F16C-NEXT: vpextrw $0, %xmm3, %edi
-; BWON-F16C-NEXT: movzwl %di, %edi
-; BWON-F16C-NEXT: vmovd %edi, %xmm3
+; BWON-F16C-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[3,3,3,3,4,5,6,7]
; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm6
; BWON-F16C-NEXT: vucomiss %xmm2, %xmm6
; BWON-F16C-NEXT: ja .LBB26_10
@@ -1704,53 +1665,35 @@ define <8 x half> @maxnum_v8f16(<8 x half> %0, <8 x half> %1) #0 {
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm6, %xmm6
; BWON-F16C-NEXT: vmovd %xmm6, %eax
; BWON-F16C-NEXT: vmovshdup {{.*#+}} xmm6 = xmm1[1,1,3,3]
-; BWON-F16C-NEXT: vpextrw $0, %xmm6, %ecx
-; BWON-F16C-NEXT: movzwl %cx, %ecx
-; BWON-F16C-NEXT: vmovd %ecx, %xmm6
+; BWON-F16C-NEXT: vcvtph2ps %xmm6, %xmm7
+; BWON-F16C-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
; BWON-F16C-NEXT: vcvtph2ps %xmm6, %xmm6
-; BWON-F16C-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; BWON-F16C-NEXT: vpextrw $0, %xmm7, %ecx
-; BWON-F16C-NEXT: movzwl %cx, %ecx
-; BWON-F16C-NEXT: vmovd %ecx, %xmm7
-; BWON-F16C-NEXT: vcvtph2ps %xmm7, %xmm7
-; BWON-F16C-NEXT: vucomiss %xmm6, %xmm7
+; BWON-F16C-NEXT: vucomiss %xmm7, %xmm6
; BWON-F16C-NEXT: ja .LBB26_12
; BWON-F16C-NEXT: # %bb.11:
-; BWON-F16C-NEXT: vmovaps %xmm6, %xmm7
+; BWON-F16C-NEXT: vmovaps %xmm7, %xmm6
; BWON-F16C-NEXT: .LBB26_12:
; BWON-F16C-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; BWON-F16C-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; BWON-F16C-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4
-; BWON-F16C-NEXT: vcvtps2ph $4, %xmm7, %xmm5
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm6, %xmm5
; BWON-F16C-NEXT: vmovd %xmm5, %eax
; BWON-F16C-NEXT: vpinsrw $0, %eax, %xmm0, %xmm5
-; BWON-F16C-NEXT: vpextrw $0, %xmm1, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm6
-; BWON-F16C-NEXT: vcvtph2ps %xmm6, %xmm6
-; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm7
-; BWON-F16C-NEXT: vcvtph2ps %xmm7, %xmm7
-; BWON-F16C-NEXT: vucomiss %xmm6, %xmm7
+; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm7
+; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm6
+; BWON-F16C-NEXT: vucomiss %xmm7, %xmm6
; BWON-F16C-NEXT: ja .LBB26_14
; BWON-F16C-NEXT: # %bb.13:
-; BWON-F16C-NEXT: vmovaps %xmm6, %xmm7
+; BWON-F16C-NEXT: vmovaps %xmm7, %xmm6
; BWON-F16C-NEXT: .LBB26_14:
; BWON-F16C-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; BWON-F16C-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; BWON-F16C-NEXT: vcvtps2ph $4, %xmm7, %xmm4
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm6, %xmm4
; BWON-F16C-NEXT: vmovd %xmm4, %eax
; BWON-F16C-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4
-; BWON-F16C-NEXT: vpsrld $16, %xmm1, %xmm1
-; BWON-F16C-NEXT: vpextrw $0, %xmm1, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm1
+; BWON-F16C-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
-; BWON-F16C-NEXT: vpsrld $16, %xmm0, %xmm0
-; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: vucomiss %xmm1, %xmm0
; BWON-F16C-NEXT: ja .LBB26_16
diff --git a/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll b/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll
index 5fde9bd5566b..9946267b48e7 100644
--- a/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll
+++ b/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll
@@ -635,7 +635,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX2-LABEL: test_reduce_v4i64:
; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; X64-AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; X64-AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm4
; X64-AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
diff --git a/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll b/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll
index 699dce75e505..0bbf94f1817f 100644
--- a/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll
+++ b/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll
@@ -581,7 +581,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX2-LABEL: test_reduce_v4i64:
; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; X64-AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
; X64-AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm4
; X64-AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
diff --git a/llvm/test/CodeGen/X86/inline-asm-memop.ll b/llvm/test/CodeGen/X86/inline-asm-memop.ll
new file mode 100644
index 000000000000..834424980761
--- /dev/null
+++ b/llvm/test/CodeGen/X86/inline-asm-memop.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -O0 < %s | FileCheck %s
+
+; A bug in X86DAGToDAGISel::matchAddressRecursively create a zext SDValue which
+; is quickly replaced by other SDValue but already pushed into vector for later
+; calling for SelectionDAGISel::Select_INLINEASM getNode builder, see issue
+; 82431 for more infomation.
+
+define void @PR82431(i8 %call, ptr %b) {
+; CHECK-LABEL: PR82431:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movb %dil, %al
+; CHECK-NEXT: addb $1, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: # kill: def $rax killed $eax
+; CHECK-NEXT: shlq $3, %rax
+; CHECK-NEXT: addq %rax, %rsi
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: retq
+entry:
+ %narrow = add nuw i8 %call, 1
+ %idxprom = zext i8 %narrow to i64
+ %arrayidx = getelementptr [1 x i64], ptr %b, i64 0, i64 %idxprom
+ tail call void asm "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %arrayidx, ptr elementtype(i64) %arrayidx)
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/inline-spiller-impdef-on-implicit-def-regression.ll b/llvm/test/CodeGen/X86/inline-spiller-impdef-on-implicit-def-regression.ll
index 0250b1b4a7f8..f42c2f8f1447 100644
--- a/llvm/test/CodeGen/X86/inline-spiller-impdef-on-implicit-def-regression.ll
+++ b/llvm/test/CodeGen/X86/inline-spiller-impdef-on-implicit-def-regression.ll
@@ -28,78 +28,70 @@ define i32 @decode_sb(ptr %t, i32 %bl, i32 %_msprop1966, i32 %sub.i, i64 %idxpro
; CHECK-NEXT: .cfi_offset %r15, -24
; CHECK-NEXT: movl %r9d, %ebx
; CHECK-NEXT: # kill: def $edx killed $edx def $rdx
-; CHECK-NEXT: movabsq $87960930222080, %r15 # imm = 0x500000000000
-; CHECK-NEXT: movl 0, %r11d
-; CHECK-NEXT: movl %esi, %r12d
-; CHECK-NEXT: # implicit-def: $r13d
+; CHECK-NEXT: movabsq $87960930222080, %r14 # imm = 0x500000000000
+; CHECK-NEXT: movl 0, %r13d
+; CHECK-NEXT: movl %esi, %r15d
+; CHECK-NEXT: # implicit-def: $r12d
; CHECK-NEXT: testb $1, %bl
-; CHECK-NEXT: jne .LBB0_7
+; CHECK-NEXT: jne .LBB0_6
; CHECK-NEXT: # %bb.1: # %if.else
-; CHECK-NEXT: movq %r8, %r14
-; CHECK-NEXT: movl %ecx, %r13d
-; CHECK-NEXT: andl $1, %r13d
-; CHECK-NEXT: movzbl 544(%r13), %r8d
-; CHECK-NEXT: andl $1, %r8d
-; CHECK-NEXT: movl %r15d, %r9d
+; CHECK-NEXT: movl %ecx, %r12d
+; CHECK-NEXT: andl $1, %r12d
+; CHECK-NEXT: movzbl 544(%r12), %r9d
; CHECK-NEXT: andl $1, %r9d
; CHECK-NEXT: movl %r14d, %r10d
; CHECK-NEXT: andl $1, %r10d
+; CHECK-NEXT: andl $1, %r8d
; CHECK-NEXT: movabsq $17592186044416, %rax # imm = 0x100000000000
-; CHECK-NEXT: orq %r10, %rax
-; CHECK-NEXT: movl %esi, %r10d
+; CHECK-NEXT: orq %r8, %rax
+; CHECK-NEXT: movl %esi, %r8d
; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %r10d
-; CHECK-NEXT: andl $2, %r10d
+; CHECK-NEXT: shrl %cl, %r8d
+; CHECK-NEXT: andl $2, %r8d
; CHECK-NEXT: testb $1, %bl
-; CHECK-NEXT: cmoveq %r9, %rax
-; CHECK-NEXT: orl %r8d, %edx
-; CHECK-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movq %r11, %rcx
+; CHECK-NEXT: cmoveq %r10, %rax
+; CHECK-NEXT: orl %r9d, %edx
+; CHECK-NEXT: movq %r13, %rcx
; CHECK-NEXT: orq $1, %rcx
-; CHECK-NEXT: orl %esi, %r10d
+; CHECK-NEXT: orl %esi, %r8d
; CHECK-NEXT: movl $1, %r8d
; CHECK-NEXT: je .LBB0_3
; CHECK-NEXT: # %bb.2: # %if.else
; CHECK-NEXT: movl (%rax), %r8d
; CHECK-NEXT: .LBB0_3: # %if.else
; CHECK-NEXT: shlq $5, %rdx
-; CHECK-NEXT: movq %r12, %rax
+; CHECK-NEXT: movq %r15, %rax
; CHECK-NEXT: shlq $7, %rax
; CHECK-NEXT: leaq (%rax,%rdx), %rsi
; CHECK-NEXT: addq $1248, %rsi # imm = 0x4E0
; CHECK-NEXT: movq %rcx, 0
-; CHECK-NEXT: movq %rdi, %r15
+; CHECK-NEXT: movq %rdi, %r14
; CHECK-NEXT: movl %r8d, (%rdi)
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %edi, %edi
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: callq *%rax
-; CHECK-NEXT: xorq $1, %r14
-; CHECK-NEXT: cmpl $0, (%r14)
-; CHECK-NEXT: je .LBB0_6
-; CHECK-NEXT: # %bb.4: # %if.else
; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: je .LBB0_5
-; CHECK-NEXT: .LBB0_6: # %bb19
+; CHECK-NEXT: je .LBB0_4
+; CHECK-NEXT: # %bb.5: # %bb19
; CHECK-NEXT: testb $1, %bl
-; CHECK-NEXT: movq %r15, %rdi
-; CHECK-NEXT: movabsq $87960930222080, %r15 # imm = 0x500000000000
-; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; CHECK-NEXT: jne .LBB0_8
-; CHECK-NEXT: .LBB0_7: # %if.end69
-; CHECK-NEXT: movl %r11d, 0
+; CHECK-NEXT: movq %r14, %rdi
+; CHECK-NEXT: movabsq $87960930222080, %r14 # imm = 0x500000000000
+; CHECK-NEXT: jne .LBB0_7
+; CHECK-NEXT: .LBB0_6: # %if.end69
+; CHECK-NEXT: movl %r13d, 0
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %esi, %esi
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: xorl %r8d, %r8d
; CHECK-NEXT: callq *%rax
-; CHECK-NEXT: xorq %r15, %r12
-; CHECK-NEXT: movslq %r13d, %rax
-; CHECK-NEXT: movzbl (%r12), %ecx
+; CHECK-NEXT: xorq %r14, %r15
+; CHECK-NEXT: movslq %r12d, %rax
+; CHECK-NEXT: movzbl (%r15), %ecx
; CHECK-NEXT: movb %cl, 544(%rax)
-; CHECK-NEXT: .LBB0_8: # %land.lhs.true56
+; CHECK-NEXT: .LBB0_7: # %land.lhs.true56
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: addq $8, %rsp
; CHECK-NEXT: popq %rbx
@@ -110,7 +102,7 @@ define i32 @decode_sb(ptr %t, i32 %bl, i32 %_msprop1966, i32 %sub.i, i64 %idxpro
; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .cfi_def_cfa %rsp, 8
; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB0_5: # %bb
+; CHECK-NEXT: .LBB0_4: # %bb
entry:
%i = load i32, ptr null, align 8
br i1 %cmp54, label %if.end69, label %if.else
diff --git a/llvm/test/CodeGen/X86/kshift.ll b/llvm/test/CodeGen/X86/kshift.ll
index 0acf82f5a144..f4efacc1946c 100644
--- a/llvm/test/CodeGen/X86/kshift.ll
+++ b/llvm/test/CodeGen/X86/kshift.ll
@@ -270,11 +270,10 @@ define i64 @kshiftl_v64i1_63(<64 x i8> %x, <64 x i8> %y) {
; KNL-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
-; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $15, %k0, %k1
-; KNL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; KNL-NEXT: vpcmpeqb %ymm0, %ymm1, %ymm0
+; KNL-NEXT: vextracti64x4 $1, %zmm1, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
; KNL-NEXT: kmovw %k0, %eax
@@ -564,14 +563,13 @@ define i64 @kshiftr_v64i1_63(<64 x i8> %x, <64 x i8> %y) {
; KNL-LABEL: kshiftr_v64i1_63:
; KNL: # %bb.0:
; KNL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; KNL-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; KNL-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k1
-; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; KNL-NEXT: vpcmpeqb %xmm0, %xmm1, %xmm0
+; KNL-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
; KNL-NEXT: kmovw %k0, %eax
diff --git a/llvm/test/CodeGen/X86/lsr-addrecloops.ll b/llvm/test/CodeGen/X86/lsr-addrecloops.ll
index 74a8d68a850f..d41942bea69d 100644
--- a/llvm/test/CodeGen/X86/lsr-addrecloops.ll
+++ b/llvm/test/CodeGen/X86/lsr-addrecloops.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc < %s | FileCheck %s
; Check that the SCEVs produced from the multiple loops don't attempt to get
@@ -9,7 +10,43 @@ target triple = "x86_64-unknown-linux-gnu"
define void @in4dob_(ptr nocapture writeonly %0, ptr nocapture readonly %1, ptr nocapture readonly %2, i64 %3, i1 %min.iters.check840) "target-cpu"="icelake-server" {
; CHECK-LABEL: in4dob_:
-; CHECK: .LBB0_6: # %vector.body807
+; CHECK: # %bb.0: # %.preheader263
+; CHECK-NEXT: leaq (,%rcx,4), %r9
+; CHECK-NEXT: movl $1, %r10d
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: vcmpneqps %xmm0, %xmm1, %k0
+; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: vcmpneqps %xmm0, %xmm1, %k1
+; CHECK-NEXT: korw %k0, %k1, %k0
+; CHECK-NEXT: kmovd %k0, %r11d
+; CHECK-NEXT: testb $1, %r11b
+; CHECK-NEXT: je .LBB0_2
+; CHECK-NEXT: # %bb.19: # in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: incq %r10
+; CHECK-NEXT: addq %r9, %rax
+; CHECK-NEXT: cmpq %r10, %rcx
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: jmp .LBB0_17
+; CHECK-NEXT: .LBB0_2: # %vector.body807.preheader
+; CHECK-NEXT: leaq 1(%rcx), %rdx
+; CHECK-NEXT: movl %edx, %esi
+; CHECK-NEXT: andl $7, %esi
+; CHECK-NEXT: cmpq $7, %rcx
+; CHECK-NEXT: jae .LBB0_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: xorl %r9d, %r9d
+; CHECK-NEXT: jmp .LBB0_6
+; CHECK-NEXT: .LBB0_4: # %vector.body807.preheader.new
+; CHECK-NEXT: movq %rdx, %r10
+; CHECK-NEXT: andq $-8, %r10
+; CHECK-NEXT: xorl %r9d, %r9d
+; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_5: # %vector.body807
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: leaq (%rdi,%r9), %r11
; CHECK-NEXT: vmovups %ymm0, (%rax,%r11)
@@ -22,8 +59,43 @@ define void @in4dob_(ptr nocapture writeonly %0, ptr nocapture readonly %1, ptr
; CHECK-NEXT: vmovups %ymm0, 7(%rax,%r11)
; CHECK-NEXT: addq $8, %r9
; CHECK-NEXT: cmpq %r9, %r10
-; CHECK-NEXT: jne .LBB0_6
-; CHECK: .LBB0_14: # %vector.body847
+; CHECK-NEXT: jne .LBB0_5
+; CHECK-NEXT: .LBB0_6: # %.lr.ph373.unr-lcssa
+; CHECK-NEXT: testq %rsi, %rsi
+; CHECK-NEXT: je .LBB0_9
+; CHECK-NEXT: # %bb.7: # %vector.body807.epil.preheader
+; CHECK-NEXT: addq %rdi, %r9
+; CHECK-NEXT: xorl %r10d, %r10d
+; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_8: # %vector.body807.epil
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: leaq (%r9,%r10), %r11
+; CHECK-NEXT: vmovups %ymm0, (%rax,%r11)
+; CHECK-NEXT: incq %r10
+; CHECK-NEXT: cmpq %r10, %rsi
+; CHECK-NEXT: jne .LBB0_8
+; CHECK-NEXT: .LBB0_9: # %.lr.ph373
+; CHECK-NEXT: testb $1, %r8b
+; CHECK-NEXT: je .LBB0_10
+; CHECK-NEXT: # %bb.18: # %scalar.ph839.preheader
+; CHECK-NEXT: movl $0, (%rdi)
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_10: # %vector.body847.preheader
+; CHECK-NEXT: movl %edx, %esi
+; CHECK-NEXT: andl $7, %esi
+; CHECK-NEXT: cmpq $7, %rcx
+; CHECK-NEXT: jae .LBB0_12
+; CHECK-NEXT: # %bb.11:
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: jmp .LBB0_14
+; CHECK-NEXT: .LBB0_12: # %vector.body847.preheader.new
+; CHECK-NEXT: andq $-8, %rdx
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_13: # %vector.body847
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: leaq (%rdi,%rcx), %r8
; CHECK-NEXT: vmovups %ymm0, 96(%rax,%r8)
@@ -36,7 +108,25 @@ define void @in4dob_(ptr nocapture writeonly %0, ptr nocapture readonly %1, ptr
; CHECK-NEXT: vmovups %ymm0, 103(%rax,%r8)
; CHECK-NEXT: addq $8, %rcx
; CHECK-NEXT: cmpq %rcx, %rdx
-; CHECK-NEXT: jne .LBB0_14
+; CHECK-NEXT: jne .LBB0_13
+; CHECK-NEXT: .LBB0_14: # %common.ret.loopexit.unr-lcssa
+; CHECK-NEXT: testq %rsi, %rsi
+; CHECK-NEXT: je .LBB0_17
+; CHECK-NEXT: # %bb.15: # %vector.body847.epil.preheader
+; CHECK-NEXT: leaq 96(%rcx,%rdi), %rcx
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_16: # %vector.body847.epil
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: leaq (%rcx,%rdx), %rdi
+; CHECK-NEXT: vmovups %ymm0, (%rax,%rdi)
+; CHECK-NEXT: incq %rdx
+; CHECK-NEXT: cmpq %rdx, %rsi
+; CHECK-NEXT: jne .LBB0_16
+; CHECK-NEXT: .LBB0_17: # %common.ret
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
.preheader263:
%4 = shl i64 %3, 2
br label %5
diff --git a/llvm/test/CodeGen/X86/movmsk-cmp.ll b/llvm/test/CodeGen/X86/movmsk-cmp.ll
index a7564c9622c5..e8b3121ecfb5 100644
--- a/llvm/test/CodeGen/X86/movmsk-cmp.ll
+++ b/llvm/test/CodeGen/X86/movmsk-cmp.ll
@@ -4440,16 +4440,14 @@ define i32 @pr67287(<2 x i64> %broadcast.splatinsert25) {
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
; SSE2-NEXT: movmskpd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: jne .LBB97_2
-; SSE2-NEXT: # %bb.1: # %entry
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: testb $1, %al
-; SSE2-NEXT: jne .LBB97_2
-; SSE2-NEXT: # %bb.3: # %middle.block
-; SSE2-NEXT: xorl %eax, %eax
-; SSE2-NEXT: retq
-; SSE2-NEXT: .LBB97_2:
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: orb %al, %cl
+; SSE2-NEXT: testb $1, %cl
+; SSE2-NEXT: je .LBB97_2
+; SSE2-NEXT: # %bb.1:
; SSE2-NEXT: movw $0, 0
+; SSE2-NEXT: .LBB97_2: # %middle.block
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: retq
;
@@ -4460,16 +4458,14 @@ define i32 @pr67287(<2 x i64> %broadcast.splatinsert25) {
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: movmskpd %xmm0, %eax
; SSE41-NEXT: testl %eax, %eax
-; SSE41-NEXT: jne .LBB97_2
-; SSE41-NEXT: # %bb.1: # %entry
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: testb $1, %al
-; SSE41-NEXT: jne .LBB97_2
-; SSE41-NEXT: # %bb.3: # %middle.block
-; SSE41-NEXT: xorl %eax, %eax
-; SSE41-NEXT: retq
-; SSE41-NEXT: .LBB97_2:
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: movd %xmm0, %ecx
+; SSE41-NEXT: orb %al, %cl
+; SSE41-NEXT: testb $1, %cl
+; SSE41-NEXT: je .LBB97_2
+; SSE41-NEXT: # %bb.1:
; SSE41-NEXT: movw $0, 0
+; SSE41-NEXT: .LBB97_2: # %middle.block
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: retq
;
@@ -4479,16 +4475,14 @@ define i32 @pr67287(<2 x i64> %broadcast.splatinsert25) {
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vtestpd %xmm0, %xmm0
-; AVX1-NEXT: jne .LBB97_2
-; AVX1-NEXT: # %bb.1: # %entry
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: testb $1, %al
-; AVX1-NEXT: jne .LBB97_2
-; AVX1-NEXT: # %bb.3: # %middle.block
-; AVX1-NEXT: xorl %eax, %eax
-; AVX1-NEXT: retq
-; AVX1-NEXT: .LBB97_2:
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vmovd %xmm0, %ecx
+; AVX1-NEXT: orb %al, %cl
+; AVX1-NEXT: testb $1, %cl
+; AVX1-NEXT: je .LBB97_2
+; AVX1-NEXT: # %bb.1:
; AVX1-NEXT: movw $0, 0
+; AVX1-NEXT: .LBB97_2: # %middle.block
; AVX1-NEXT: xorl %eax, %eax
; AVX1-NEXT: retq
;
@@ -4498,16 +4492,14 @@ define i32 @pr67287(<2 x i64> %broadcast.splatinsert25) {
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vtestpd %xmm0, %xmm0
-; AVX2-NEXT: jne .LBB97_2
-; AVX2-NEXT: # %bb.1: # %entry
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: testb $1, %al
-; AVX2-NEXT: jne .LBB97_2
-; AVX2-NEXT: # %bb.3: # %middle.block
-; AVX2-NEXT: xorl %eax, %eax
-; AVX2-NEXT: retq
-; AVX2-NEXT: .LBB97_2:
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vmovd %xmm0, %ecx
+; AVX2-NEXT: orb %al, %cl
+; AVX2-NEXT: testb $1, %cl
+; AVX2-NEXT: je .LBB97_2
+; AVX2-NEXT: # %bb.1:
; AVX2-NEXT: movw $0, 0
+; AVX2-NEXT: .LBB97_2: # %middle.block
; AVX2-NEXT: xorl %eax, %eax
; AVX2-NEXT: retq
;
@@ -4517,18 +4509,15 @@ define i32 @pr67287(<2 x i64> %broadcast.splatinsert25) {
; KNL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; KNL-NEXT: vptestnmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: testb $3, %al
-; KNL-NEXT: jne .LBB97_2
-; KNL-NEXT: # %bb.1: # %entry
-; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: kmovw %k0, %ecx
+; KNL-NEXT: testb $3, %cl
+; KNL-NEXT: setne %cl
+; KNL-NEXT: orb %cl, %al
; KNL-NEXT: testb $1, %al
-; KNL-NEXT: jne .LBB97_2
-; KNL-NEXT: # %bb.3: # %middle.block
-; KNL-NEXT: xorl %eax, %eax
-; KNL-NEXT: vzeroupper
-; KNL-NEXT: retq
-; KNL-NEXT: .LBB97_2:
+; KNL-NEXT: je .LBB97_2
+; KNL-NEXT: # %bb.1:
; KNL-NEXT: movw $0, 0
+; KNL-NEXT: .LBB97_2: # %middle.block
; KNL-NEXT: xorl %eax, %eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
@@ -4539,16 +4528,14 @@ define i32 @pr67287(<2 x i64> %broadcast.splatinsert25) {
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SKX-NEXT: vptestnmq %xmm0, %xmm0, %k0
; SKX-NEXT: kortestb %k0, %k0
-; SKX-NEXT: jne .LBB97_2
-; SKX-NEXT: # %bb.1: # %entry
-; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: testb $1, %al
-; SKX-NEXT: jne .LBB97_2
-; SKX-NEXT: # %bb.3: # %middle.block
-; SKX-NEXT: xorl %eax, %eax
-; SKX-NEXT: retq
-; SKX-NEXT: .LBB97_2:
+; SKX-NEXT: setne %al
+; SKX-NEXT: kmovd %k0, %ecx
+; SKX-NEXT: orb %al, %cl
+; SKX-NEXT: testb $1, %cl
+; SKX-NEXT: je .LBB97_2
+; SKX-NEXT: # %bb.1:
; SKX-NEXT: movw $0, 0
+; SKX-NEXT: .LBB97_2: # %middle.block
; SKX-NEXT: xorl %eax, %eax
; SKX-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/or-branch.ll b/llvm/test/CodeGen/X86/or-branch.ll
index 5d5cc2cb32f1..c6df237393e4 100644
--- a/llvm/test/CodeGen/X86/or-branch.ll
+++ b/llvm/test/CodeGen/X86/or-branch.ll
@@ -5,12 +5,13 @@
define void @foo(i32 %X, i32 %Y, i32 %Z) nounwind {
; JUMP2-LABEL: foo:
; JUMP2: # %bb.0: # %entry
-; JUMP2-NEXT: cmpl $5, {{[0-9]+}}(%esp)
-; JUMP2-NEXT: jl bar@PLT # TAILCALL
-; JUMP2-NEXT: # %bb.1: # %entry
; JUMP2-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; JUMP2-NEXT: setne %al
+; JUMP2-NEXT: cmpl $5, {{[0-9]+}}(%esp)
+; JUMP2-NEXT: setge %cl
+; JUMP2-NEXT: testb %al, %cl
; JUMP2-NEXT: je bar@PLT # TAILCALL
-; JUMP2-NEXT: # %bb.2: # %UnifiedReturnBlock
+; JUMP2-NEXT: # %bb.1: # %UnifiedReturnBlock
; JUMP2-NEXT: retl
;
; JUMP1-LABEL: foo:
diff --git a/llvm/test/CodeGen/X86/peephole-na-phys-copy-folding.ll b/llvm/test/CodeGen/X86/peephole-na-phys-copy-folding.ll
index 9069688c8037..3354c99a361b 100644
--- a/llvm/test/CodeGen/X86/peephole-na-phys-copy-folding.ll
+++ b/llvm/test/CodeGen/X86/peephole-na-phys-copy-folding.ll
@@ -14,31 +14,33 @@ declare i32 @bar(i64)
define i1 @plus_one() nounwind {
; CHECK32-LABEL: plus_one:
; CHECK32: # %bb.0: # %entry
-; CHECK32-NEXT: movzbl M, %eax
; CHECK32-NEXT: incl L
-; CHECK32-NEXT: jne .LBB0_2
-; CHECK32-NEXT: # %bb.1: # %entry
-; CHECK32-NEXT: andb $8, %al
-; CHECK32-NEXT: je .LBB0_2
-; CHECK32-NEXT: # %bb.3: # %exit2
+; CHECK32-NEXT: sete %al
+; CHECK32-NEXT: movzbl M, %ecx
+; CHECK32-NEXT: andb $8, %cl
+; CHECK32-NEXT: shrb $3, %cl
+; CHECK32-NEXT: testb %cl, %al
+; CHECK32-NEXT: je .LBB0_1
+; CHECK32-NEXT: # %bb.2: # %exit2
; CHECK32-NEXT: xorl %eax, %eax
; CHECK32-NEXT: retl
-; CHECK32-NEXT: .LBB0_2: # %exit
+; CHECK32-NEXT: .LBB0_1: # %exit
; CHECK32-NEXT: movb $1, %al
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: plus_one:
; CHECK64: # %bb.0: # %entry
-; CHECK64-NEXT: movzbl M(%rip), %eax
; CHECK64-NEXT: incl L(%rip)
-; CHECK64-NEXT: jne .LBB0_2
-; CHECK64-NEXT: # %bb.1: # %entry
-; CHECK64-NEXT: andb $8, %al
-; CHECK64-NEXT: je .LBB0_2
-; CHECK64-NEXT: # %bb.3: # %exit2
+; CHECK64-NEXT: sete %al
+; CHECK64-NEXT: movzbl M(%rip), %ecx
+; CHECK64-NEXT: andb $8, %cl
+; CHECK64-NEXT: shrb $3, %cl
+; CHECK64-NEXT: testb %cl, %al
+; CHECK64-NEXT: je .LBB0_1
+; CHECK64-NEXT: # %bb.2: # %exit2
; CHECK64-NEXT: xorl %eax, %eax
; CHECK64-NEXT: retq
-; CHECK64-NEXT: .LBB0_2: # %exit
+; CHECK64-NEXT: .LBB0_1: # %exit
; CHECK64-NEXT: movb $1, %al
; CHECK64-NEXT: retq
entry:
@@ -63,30 +65,32 @@ define i1 @plus_forty_two() nounwind {
; CHECK32-LABEL: plus_forty_two:
; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movzbl M, %eax
-; CHECK32-NEXT: addl $42, L
-; CHECK32-NEXT: jne .LBB1_2
-; CHECK32-NEXT: # %bb.1: # %entry
; CHECK32-NEXT: andb $8, %al
-; CHECK32-NEXT: je .LBB1_2
-; CHECK32-NEXT: # %bb.3: # %exit2
+; CHECK32-NEXT: shrb $3, %al
+; CHECK32-NEXT: addl $42, L
+; CHECK32-NEXT: sete %cl
+; CHECK32-NEXT: testb %al, %cl
+; CHECK32-NEXT: je .LBB1_1
+; CHECK32-NEXT: # %bb.2: # %exit2
; CHECK32-NEXT: xorl %eax, %eax
; CHECK32-NEXT: retl
-; CHECK32-NEXT: .LBB1_2: # %exit
+; CHECK32-NEXT: .LBB1_1: # %exit
; CHECK32-NEXT: movb $1, %al
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: plus_forty_two:
; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: movzbl M(%rip), %eax
-; CHECK64-NEXT: addl $42, L(%rip)
-; CHECK64-NEXT: jne .LBB1_2
-; CHECK64-NEXT: # %bb.1: # %entry
; CHECK64-NEXT: andb $8, %al
-; CHECK64-NEXT: je .LBB1_2
-; CHECK64-NEXT: # %bb.3: # %exit2
+; CHECK64-NEXT: shrb $3, %al
+; CHECK64-NEXT: addl $42, L(%rip)
+; CHECK64-NEXT: sete %cl
+; CHECK64-NEXT: testb %al, %cl
+; CHECK64-NEXT: je .LBB1_1
+; CHECK64-NEXT: # %bb.2: # %exit2
; CHECK64-NEXT: xorl %eax, %eax
; CHECK64-NEXT: retq
-; CHECK64-NEXT: .LBB1_2: # %exit
+; CHECK64-NEXT: .LBB1_1: # %exit
; CHECK64-NEXT: movb $1, %al
; CHECK64-NEXT: retq
entry:
@@ -111,30 +115,32 @@ define i1 @minus_one() nounwind {
; CHECK32-LABEL: minus_one:
; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movzbl M, %eax
-; CHECK32-NEXT: decl L
-; CHECK32-NEXT: jne .LBB2_2
-; CHECK32-NEXT: # %bb.1: # %entry
; CHECK32-NEXT: andb $8, %al
-; CHECK32-NEXT: je .LBB2_2
-; CHECK32-NEXT: # %bb.3: # %exit2
+; CHECK32-NEXT: shrb $3, %al
+; CHECK32-NEXT: decl L
+; CHECK32-NEXT: sete %cl
+; CHECK32-NEXT: testb %al, %cl
+; CHECK32-NEXT: je .LBB2_1
+; CHECK32-NEXT: # %bb.2: # %exit2
; CHECK32-NEXT: xorl %eax, %eax
; CHECK32-NEXT: retl
-; CHECK32-NEXT: .LBB2_2: # %exit
+; CHECK32-NEXT: .LBB2_1: # %exit
; CHECK32-NEXT: movb $1, %al
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: minus_one:
; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: movzbl M(%rip), %eax
-; CHECK64-NEXT: decl L(%rip)
-; CHECK64-NEXT: jne .LBB2_2
-; CHECK64-NEXT: # %bb.1: # %entry
; CHECK64-NEXT: andb $8, %al
-; CHECK64-NEXT: je .LBB2_2
-; CHECK64-NEXT: # %bb.3: # %exit2
+; CHECK64-NEXT: shrb $3, %al
+; CHECK64-NEXT: decl L(%rip)
+; CHECK64-NEXT: sete %cl
+; CHECK64-NEXT: testb %al, %cl
+; CHECK64-NEXT: je .LBB2_1
+; CHECK64-NEXT: # %bb.2: # %exit2
; CHECK64-NEXT: xorl %eax, %eax
; CHECK64-NEXT: retq
-; CHECK64-NEXT: .LBB2_2: # %exit
+; CHECK64-NEXT: .LBB2_1: # %exit
; CHECK64-NEXT: movb $1, %al
; CHECK64-NEXT: retq
entry:
@@ -159,30 +165,32 @@ define i1 @minus_forty_two() nounwind {
; CHECK32-LABEL: minus_forty_two:
; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movzbl M, %eax
-; CHECK32-NEXT: addl $-42, L
-; CHECK32-NEXT: jne .LBB3_2
-; CHECK32-NEXT: # %bb.1: # %entry
; CHECK32-NEXT: andb $8, %al
-; CHECK32-NEXT: je .LBB3_2
-; CHECK32-NEXT: # %bb.3: # %exit2
+; CHECK32-NEXT: shrb $3, %al
+; CHECK32-NEXT: addl $-42, L
+; CHECK32-NEXT: sete %cl
+; CHECK32-NEXT: testb %al, %cl
+; CHECK32-NEXT: je .LBB3_1
+; CHECK32-NEXT: # %bb.2: # %exit2
; CHECK32-NEXT: xorl %eax, %eax
; CHECK32-NEXT: retl
-; CHECK32-NEXT: .LBB3_2: # %exit
+; CHECK32-NEXT: .LBB3_1: # %exit
; CHECK32-NEXT: movb $1, %al
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: minus_forty_two:
; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: movzbl M(%rip), %eax
-; CHECK64-NEXT: addl $-42, L(%rip)
-; CHECK64-NEXT: jne .LBB3_2
-; CHECK64-NEXT: # %bb.1: # %entry
; CHECK64-NEXT: andb $8, %al
-; CHECK64-NEXT: je .LBB3_2
-; CHECK64-NEXT: # %bb.3: # %exit2
+; CHECK64-NEXT: shrb $3, %al
+; CHECK64-NEXT: addl $-42, L(%rip)
+; CHECK64-NEXT: sete %cl
+; CHECK64-NEXT: testb %al, %cl
+; CHECK64-NEXT: je .LBB3_1
+; CHECK64-NEXT: # %bb.2: # %exit2
; CHECK64-NEXT: xorl %eax, %eax
; CHECK64-NEXT: retq
-; CHECK64-NEXT: .LBB3_2: # %exit
+; CHECK64-NEXT: .LBB3_1: # %exit
; CHECK64-NEXT: movb $1, %al
; CHECK64-NEXT: retq
entry:
@@ -284,7 +292,7 @@ define i64 @test_two_live_flags(ptr %foo0, i64 %bar0, i64 %baz0, ptr %foo1, i64
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %esi
; CHECK32-NEXT: lock cmpxchg8b (%esi)
-; CHECK32-NEXT: setne {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; CHECK32-NEXT: sete {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movl %ebp, %edx
; CHECK32-NEXT: movl %edi, %ecx
@@ -292,17 +300,15 @@ define i64 @test_two_live_flags(ptr %foo0, i64 %bar0, i64 %baz0, ptr %foo1, i64
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %esi
; CHECK32-NEXT: lock cmpxchg8b (%esi)
; CHECK32-NEXT: sete %al
-; CHECK32-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
-; CHECK32-NEXT: jne .LBB5_4
-; CHECK32-NEXT: # %bb.1: # %entry
-; CHECK32-NEXT: testb %al, %al
-; CHECK32-NEXT: je .LBB5_4
-; CHECK32-NEXT: # %bb.2: # %t
+; CHECK32-NEXT: andb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Folded Reload
+; CHECK32-NEXT: cmpb $1, %al
+; CHECK32-NEXT: jne .LBB5_3
+; CHECK32-NEXT: # %bb.1: # %t
; CHECK32-NEXT: movl $42, %eax
-; CHECK32-NEXT: jmp .LBB5_3
-; CHECK32-NEXT: .LBB5_4: # %f
+; CHECK32-NEXT: jmp .LBB5_2
+; CHECK32-NEXT: .LBB5_3: # %f
; CHECK32-NEXT: xorl %eax, %eax
-; CHECK32-NEXT: .LBB5_3: # %t
+; CHECK32-NEXT: .LBB5_2: # %t
; CHECK32-NEXT: xorl %edx, %edx
; CHECK32-NEXT: addl $4, %esp
; CHECK32-NEXT: popl %esi
@@ -315,19 +321,17 @@ define i64 @test_two_live_flags(ptr %foo0, i64 %bar0, i64 %baz0, ptr %foo1, i64
; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: movq %rsi, %rax
; CHECK64-NEXT: lock cmpxchgq %rdx, (%rdi)
-; CHECK64-NEXT: setne %dl
+; CHECK64-NEXT: sete %dl
; CHECK64-NEXT: movq %r8, %rax
; CHECK64-NEXT: lock cmpxchgq %r9, (%rcx)
; CHECK64-NEXT: sete %al
-; CHECK64-NEXT: testb %dl, %dl
-; CHECK64-NEXT: jne .LBB5_3
-; CHECK64-NEXT: # %bb.1: # %entry
-; CHECK64-NEXT: testb %al, %al
-; CHECK64-NEXT: je .LBB5_3
-; CHECK64-NEXT: # %bb.2: # %t
+; CHECK64-NEXT: andb %dl, %al
+; CHECK64-NEXT: cmpb $1, %al
+; CHECK64-NEXT: jne .LBB5_2
+; CHECK64-NEXT: # %bb.1: # %t
; CHECK64-NEXT: movl $42, %eax
; CHECK64-NEXT: retq
-; CHECK64-NEXT: .LBB5_3: # %f
+; CHECK64-NEXT: .LBB5_2: # %f
; CHECK64-NEXT: xorl %eax, %eax
; CHECK64-NEXT: retq
entry:
@@ -353,7 +357,6 @@ define i1 @asm_clobbering_flags(ptr %mem) nounwind {
; CHECK32-NEXT: testl %edx, %edx
; CHECK32-NEXT: setg %al
; CHECK32-NEXT: #APP
-; CHECK32-NOT: rep
; CHECK32-NEXT: bsfl %edx, %edx
; CHECK32-NEXT: #NO_APP
; CHECK32-NEXT: movl %edx, (%ecx)
@@ -365,7 +368,6 @@ define i1 @asm_clobbering_flags(ptr %mem) nounwind {
; CHECK64-NEXT: testl %ecx, %ecx
; CHECK64-NEXT: setg %al
; CHECK64-NEXT: #APP
-; CHECK64-NOT: rep
; CHECK64-NEXT: bsfl %ecx, %ecx
; CHECK64-NEXT: #NO_APP
; CHECK64-NEXT: movl %ecx, (%rdi)
diff --git a/llvm/test/CodeGen/X86/pr31088.ll b/llvm/test/CodeGen/X86/pr31088.ll
index fa1014e3ae0d..ce37622c476d 100644
--- a/llvm/test/CodeGen/X86/pr31088.ll
+++ b/llvm/test/CodeGen/X86/pr31088.ll
@@ -41,15 +41,9 @@ define <1 x half> @ir_fadd_v1f16(<1 x half> %arg0, <1 x half> %arg1) nounwind {
;
; F16C-LABEL: ir_fadd_v1f16:
; F16C: # %bb.0:
-; F16C-NEXT: vpextrw $0, %xmm0, %eax
-; F16C-NEXT: vpextrw $0, %xmm1, %ecx
-; F16C-NEXT: movzwl %cx, %ecx
-; F16C-NEXT: vmovd %ecx, %xmm0
-; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
-; F16C-NEXT: movzwl %ax, %eax
-; F16C-NEXT: vmovd %eax, %xmm1
; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
-; F16C-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: vaddss %xmm1, %xmm0, %xmm0
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT: vmovd %xmm0, %eax
; F16C-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
@@ -58,13 +52,15 @@ define <1 x half> @ir_fadd_v1f16(<1 x half> %arg0, <1 x half> %arg1) nounwind {
; F16C-O0-LABEL: ir_fadd_v1f16:
; F16C-O0: # %bb.0:
; F16C-O0-NEXT: vpextrw $0, %xmm1, %eax
-; F16C-O0-NEXT: # kill: def $ax killed $ax killed $eax
-; F16C-O0-NEXT: movzwl %ax, %eax
+; F16C-O0-NEXT: movw %ax, %cx
+; F16C-O0-NEXT: # implicit-def: $eax
+; F16C-O0-NEXT: movw %cx, %ax
; F16C-O0-NEXT: vmovd %eax, %xmm1
; F16C-O0-NEXT: vcvtph2ps %xmm1, %xmm1
; F16C-O0-NEXT: vpextrw $0, %xmm0, %eax
-; F16C-O0-NEXT: # kill: def $ax killed $ax killed $eax
-; F16C-O0-NEXT: movzwl %ax, %eax
+; F16C-O0-NEXT: movw %ax, %cx
+; F16C-O0-NEXT: # implicit-def: $eax
+; F16C-O0-NEXT: movw %cx, %ax
; F16C-O0-NEXT: vmovd %eax, %xmm0
; F16C-O0-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-O0-NEXT: vaddss %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/pr33747.ll b/llvm/test/CodeGen/X86/pr33747.ll
index e261486dd592..c8ba2b2e3a79 100644
--- a/llvm/test/CodeGen/X86/pr33747.ll
+++ b/llvm/test/CodeGen/X86/pr33747.ll
@@ -5,18 +5,19 @@ define void @PR33747(ptr nocapture) {
; CHECK-LABEL: PR33747:
; CHECK: # %bb.0:
; CHECK-NEXT: movl 24(%rdi), %eax
+; CHECK-NEXT: leal 1(%rax), %ecx
+; CHECK-NEXT: cmpl $3, %ecx
+; CHECK-NEXT: setb %cl
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB0_3
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: incl %eax
-; CHECK-NEXT: cmpl $3, %eax
-; CHECK-NEXT: jae .LBB0_3
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: testb %cl, %al
+; CHECK-NEXT: je .LBB0_2
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: jmp .LBB0_1
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_2: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: jmp .LBB0_2
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB0_3: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: jmp .LBB0_3
%2 = getelementptr inbounds i32, ptr %0, i64 6
%3 = load i32, ptr %2, align 4
%4 = add i32 %3, 1
diff --git a/llvm/test/CodeGen/X86/pr34605.ll b/llvm/test/CodeGen/X86/pr34605.ll
index 863b0ffc93a9..25dd6a7436a8 100644
--- a/llvm/test/CodeGen/X86/pr34605.ll
+++ b/llvm/test/CodeGen/X86/pr34605.ll
@@ -17,7 +17,7 @@ define void @pr34605(ptr nocapture %s, i32 %p) {
; CHECK-NEXT: kmovd %ecx, %k1
; CHECK-NEXT: kmovd %k1, %k1
; CHECK-NEXT: kandq %k1, %k0, %k1
-; CHECK-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}, %zmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqu8 {{.*#+}} zmm0 {%k1} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; CHECK-NEXT: vmovdqu64 %zmm0, (%eax)
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqu64 %zmm0, 64(%eax)
diff --git a/llvm/test/CodeGen/X86/pr37025.ll b/llvm/test/CodeGen/X86/pr37025.ll
index a758ddc91541..8ac28d6286a6 100644
--- a/llvm/test/CodeGen/X86/pr37025.ll
+++ b/llvm/test/CodeGen/X86/pr37025.ll
@@ -18,11 +18,13 @@ define void @test_dec_select(ptr nocapture %0, ptr readnone %1) {
; CHECK-LABEL: test_dec_select:
; CHECK: # %bb.0:
; CHECK-NEXT: lock decq (%rdi)
-; CHECK-NEXT: jne .LBB0_2
-; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: sete %al
; CHECK-NEXT: testq %rsi, %rsi
-; CHECK-NEXT: jne func2 # TAILCALL
-; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: setne %cl
+; CHECK-NEXT: andb %al, %cl
+; CHECK-NEXT: cmpb $1, %cl
+; CHECK-NEXT: je func2 # TAILCALL
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: retq
%3 = atomicrmw sub ptr %0, i64 1 seq_cst
%4 = icmp eq i64 %3, 1
@@ -44,11 +46,11 @@ define void @test_dec_select_commute(ptr nocapture %0, ptr readnone %1) {
; CHECK-NEXT: lock decq (%rdi)
; CHECK-NEXT: sete %al
; CHECK-NEXT: testq %rsi, %rsi
-; CHECK-NEXT: je .LBB1_2
+; CHECK-NEXT: setne %cl
+; CHECK-NEXT: andb %al, %cl
+; CHECK-NEXT: cmpb $1, %cl
+; CHECK-NEXT: je func2 # TAILCALL
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: jne func2 # TAILCALL
-; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: retq
%3 = atomicrmw sub ptr %0, i64 1 seq_cst
%4 = icmp eq i64 %3, 1
@@ -69,12 +71,13 @@ define void @test_dec_and(ptr nocapture %0, ptr readnone %1) {
; CHECK: # %bb.0:
; CHECK-NEXT: lock decq (%rdi)
; CHECK-NEXT: sete %al
+; CHECK-NEXT: notb %al
; CHECK-NEXT: testq %rsi, %rsi
-; CHECK-NEXT: je .LBB2_2
+; CHECK-NEXT: sete %cl
+; CHECK-NEXT: orb %al, %cl
+; CHECK-NEXT: testb $1, %cl
+; CHECK-NEXT: je func2 # TAILCALL
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: jne func2 # TAILCALL
-; CHECK-NEXT: .LBB2_2:
; CHECK-NEXT: retq
%3 = atomicrmw sub ptr %0, i64 1 seq_cst
%4 = icmp eq i64 %3, 1
@@ -94,11 +97,14 @@ define void @test_dec_and_commute(ptr nocapture %0, ptr readnone %1) {
; CHECK-LABEL: test_dec_and_commute:
; CHECK: # %bb.0:
; CHECK-NEXT: lock decq (%rdi)
-; CHECK-NEXT: jne .LBB3_2
-; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: notb %al
; CHECK-NEXT: testq %rsi, %rsi
-; CHECK-NEXT: jne func2 # TAILCALL
-; CHECK-NEXT: .LBB3_2:
+; CHECK-NEXT: sete %cl
+; CHECK-NEXT: orb %al, %cl
+; CHECK-NEXT: testb $1, %cl
+; CHECK-NEXT: je func2 # TAILCALL
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: retq
%3 = atomicrmw sub ptr %0, i64 1 seq_cst
%4 = icmp eq i64 %3, 1
diff --git a/llvm/test/CodeGen/X86/pr38795.ll b/llvm/test/CodeGen/X86/pr38795.ll
index 03629a353d84..f64c70e8fc79 100644
--- a/llvm/test/CodeGen/X86/pr38795.ll
+++ b/llvm/test/CodeGen/X86/pr38795.ll
@@ -25,141 +25,126 @@ define dso_local void @fn() {
; CHECK-NEXT: xorl %ebx, %ebx
; CHECK-NEXT: # implicit-def: $ecx
; CHECK-NEXT: # implicit-def: $edi
+; CHECK-NEXT: # implicit-def: $dh
; CHECK-NEXT: # implicit-def: $al
; CHECK-NEXT: # kill: killed $al
-; CHECK-NEXT: # implicit-def: $al
; CHECK-NEXT: # implicit-def: $ebp
; CHECK-NEXT: jmp .LBB0_1
; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB0_16: # in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; CHECK-NEXT: movb %dh, %al
+; CHECK-NEXT: .LBB0_15: # %for.inc
+; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
; CHECK-NEXT: .LBB0_1: # %for.cond
; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB0_22 Depth 2
-; CHECK-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; CHECK-NEXT: cmpb $8, %al
-; CHECK-NEXT: ja .LBB0_3
-; CHECK-NEXT: # %bb.2: # %for.cond
-; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: # Child Loop BB0_19 Depth 2
; CHECK-NEXT: testb %bl, %bl
-; CHECK-NEXT: je .LBB0_3
-; CHECK-NEXT: # %bb.4: # %if.end
+; CHECK-NEXT: jne .LBB0_3
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: movb %dh, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; CHECK-NEXT: movl $.str, (%esp)
+; CHECK-NEXT: calll printf
+; CHECK-NEXT: # implicit-def: $eax
+; CHECK-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: jne .LBB0_10
+; CHECK-NEXT: jmp .LBB0_6
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_3: # %if.end
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: cltd
; CHECK-NEXT: idivl a
-; CHECK-NEXT: movl %eax, %esi
-; CHECK-NEXT: movb %cl, %dh
+; CHECK-NEXT: movl %ecx, %edx
; CHECK-NEXT: movl $0, h
-; CHECK-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
-; CHECK-NEXT: cmpb $8, %al
-; CHECK-NEXT: jg .LBB0_8
-; CHECK-NEXT: # %bb.5: # %if.then13
+; CHECK-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %dh # 1-byte Reload
+; CHECK-NEXT: cmpb $8, %dh
+; CHECK-NEXT: jg .LBB0_7
+; CHECK-NEXT: # %bb.4: # %if.then13
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: movl %eax, %esi
; CHECK-NEXT: movl $.str, (%esp)
-; CHECK-NEXT: movb %dh, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; CHECK-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
; CHECK-NEXT: calll printf
; CHECK-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %dh # 1-byte Reload
+; CHECK-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %dl # 1-byte Reload
; CHECK-NEXT: testb %bl, %bl
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: # implicit-def: $eax
-; CHECK-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %dl # 1-byte Reload
-; CHECK-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; CHECK-NEXT: movb %dh, %dl
-; CHECK-NEXT: je .LBB0_6
-; CHECK-NEXT: jmp .LBB0_18
+; CHECK-NEXT: movb %dh, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; CHECK-NEXT: jne .LBB0_15
; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB0_3: # %if.then
-; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT: movl $.str, (%esp)
-; CHECK-NEXT: calll printf
-; CHECK-NEXT: # implicit-def: $eax
-; CHECK-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload
-; CHECK-NEXT: .LBB0_6: # %for.cond35
+; CHECK-NEXT: # %bb.5: # %for.cond35
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB0_7
-; CHECK-NEXT: .LBB0_11: # %af
+; CHECK-NEXT: je .LBB0_6
+; CHECK-NEXT: .LBB0_10: # %af
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: testb %bl, %bl
-; CHECK-NEXT: jne .LBB0_12
-; CHECK-NEXT: .LBB0_19: # %if.end39
+; CHECK-NEXT: jne .LBB0_11
+; CHECK-NEXT: .LBB0_16: # %if.end39
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB0_21
-; CHECK-NEXT: # %bb.20: # %if.then41
+; CHECK-NEXT: je .LBB0_18
+; CHECK-NEXT: # %bb.17: # %if.then41
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl $fn, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl $.str, (%esp)
; CHECK-NEXT: calll printf
-; CHECK-NEXT: .LBB0_21: # %for.end46
+; CHECK-NEXT: .LBB0_18: # %for.end46
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT: # implicit-def: $al
; CHECK-NEXT: # implicit-def: $dh
+; CHECK-NEXT: # implicit-def: $dl
; CHECK-NEXT: # implicit-def: $ebp
-; CHECK-NEXT: jmp .LBB0_22
+; CHECK-NEXT: jmp .LBB0_19
; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB0_8: # %if.end21
+; CHECK-NEXT: .LBB0_7: # %if.end21
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: # implicit-def: $ebp
-; CHECK-NEXT: jmp .LBB0_9
+; CHECK-NEXT: jmp .LBB0_8
; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB0_7: # in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: .LBB0_6: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: xorl %edi, %edi
-; CHECK-NEXT: movb %dl, %dh
-; CHECK-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; CHECK-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %dh # 1-byte Reload
; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB0_22: # %for.cond47
+; CHECK-NEXT: .LBB0_19: # %for.cond47
; CHECK-NEXT: # Parent Loop BB0_1 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: testb %bl, %bl
-; CHECK-NEXT: jne .LBB0_22
-; CHECK-NEXT: # %bb.23: # %for.cond47
-; CHECK-NEXT: # in Loop: Header=BB0_22 Depth=2
-; CHECK-NEXT: jne .LBB0_22
-; CHECK-NEXT: .LBB0_9: # %ae
+; CHECK-NEXT: jne .LBB0_19
+; CHECK-NEXT: .LBB0_8: # %ae
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: testb %bl, %bl
-; CHECK-NEXT: jne .LBB0_10
-; CHECK-NEXT: # %bb.13: # %if.end26
+; CHECK-NEXT: jne .LBB0_9
+; CHECK-NEXT: # %bb.12: # %if.end26
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: je .LBB0_14
-; CHECK-NEXT: # %bb.15: # %if.end26
+; CHECK-NEXT: testb %dh, %dh
+; CHECK-NEXT: je .LBB0_15
+; CHECK-NEXT: # %bb.13: # %if.end26
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: testl %ebp, %ebp
-; CHECK-NEXT: jne .LBB0_16
-; CHECK-NEXT: # %bb.17: # %if.then31
+; CHECK-NEXT: jne .LBB0_15
+; CHECK-NEXT: # %bb.14: # %if.then31
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
; CHECK-NEXT: xorl %ebp, %ebp
-; CHECK-NEXT: .LBB0_18: # %for.inc
-; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT: movb %dh, %al
-; CHECK-NEXT: jmp .LBB0_1
+; CHECK-NEXT: jmp .LBB0_15
; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB0_10: # in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: .LBB0_9: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: # implicit-def: $eax
; CHECK-NEXT: testb %bl, %bl
-; CHECK-NEXT: je .LBB0_19
-; CHECK-NEXT: .LBB0_12: # in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: je .LBB0_16
+; CHECK-NEXT: .LBB0_11: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: # implicit-def: $edi
; CHECK-NEXT: # implicit-def: $cl
; CHECK-NEXT: # kill: killed $cl
; CHECK-NEXT: # implicit-def: $dl
; CHECK-NEXT: # implicit-def: $ebp
; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: jne .LBB0_11
-; CHECK-NEXT: jmp .LBB0_7
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB0_14: # in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; CHECK-NEXT: movb %dh, %al
-; CHECK-NEXT: jmp .LBB0_1
+; CHECK-NEXT: jne .LBB0_10
+; CHECK-NEXT: jmp .LBB0_6
entry:
br label %for.cond
diff --git a/llvm/test/CodeGen/X86/pr38803.ll b/llvm/test/CodeGen/X86/pr38803.ll
index 61dc228d1d1f..ebac8121df91 100644
--- a/llvm/test/CodeGen/X86/pr38803.ll
+++ b/llvm/test/CodeGen/X86/pr38803.ll
@@ -13,7 +13,7 @@ define dso_local float @_Z3fn2v() {
; CHECK-NEXT: callq _Z1av@PLT
; CHECK-NEXT: # kill: def $al killed $al def $eax
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vmovss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 {%k1} {z} = [7.5E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: cmpl $0, c(%rip)
; CHECK-NEXT: je .LBB0_2
; CHECK-NEXT: # %bb.1: # %if.then
diff --git a/llvm/test/CodeGen/X86/pr43509.ll b/llvm/test/CodeGen/X86/pr43509.ll
index 87ddad03e9c4..a29fe4c6a046 100644
--- a/llvm/test/CodeGen/X86/pr43509.ll
+++ b/llvm/test/CodeGen/X86/pr43509.ll
@@ -7,7 +7,7 @@ define <8 x i8> @foo(<8 x float> %arg) {
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpltps %ymm1, %ymm0, %k1
; CHECK-NEXT: vcmpgtps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
bb:
diff --git a/llvm/test/CodeGen/X86/pr46455.ll b/llvm/test/CodeGen/X86/pr46455.ll
index 092e417c812e..3d1b04f74357 100644
--- a/llvm/test/CodeGen/X86/pr46455.ll
+++ b/llvm/test/CodeGen/X86/pr46455.ll
@@ -4,10 +4,10 @@
define void @EntryModule(ptr %buffer_table) {
; CHECK-LABEL: EntryModule:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: movq (%rdi), %rax
; CHECK-NEXT: movq 24(%rdi), %rcx
-; CHECK-NEXT: vcmpneqps (%rax), %ymm0, %ymm0
+; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vcmpneqps (%rax), %xmm0, %xmm0
; CHECK-NEXT: vpsrld $31, %xmm0, %xmm1
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
@@ -16,7 +16,6 @@ define void @EntryModule(ptr %buffer_table) {
; CHECK-NEXT: vpsubd %xmm0, %xmm2, %xmm0
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vmovd %xmm0, (%rcx)
-; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
%i1 = load ptr, ptr %buffer_table, align 8
diff --git a/llvm/test/CodeGen/X86/pr57340.ll b/llvm/test/CodeGen/X86/pr57340.ll
index 57f52c8dcdbb..00a52c639e43 100644
--- a/llvm/test/CodeGen/X86/pr57340.ll
+++ b/llvm/test/CodeGen/X86/pr57340.ll
@@ -5,54 +5,42 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-LABEL: main.41:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpbroadcastw (%rax), %xmm0
-; CHECK-NEXT: vmovdqu (%rax), %ymm2
-; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
-; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm1 = [31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
-; CHECK-NEXT: vpermi2w %ymm3, %ymm2, %ymm1
; CHECK-NEXT: vpextrw $0, %xmm0, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm0
-; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0
+; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm1
+; CHECK-NEXT: vmovdqu (%rax), %ymm3
+; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm2 = [31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; CHECK-NEXT: vpermi2w %ymm1, %ymm3, %ymm2
+; CHECK-NEXT: vprold $16, %xmm2, %xmm1
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm3
; CHECK-NEXT: vmovdqu (%rax), %xmm5
-; CHECK-NEXT: vpextrw $0, %xmm5, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm0, %xmm2
-; CHECK-NEXT: setnp %al
-; CHECK-NEXT: sete %cl
-; CHECK-NEXT: testb %al, %cl
-; CHECK-NEXT: vpsrld $16, %xmm1, %xmm3
-; CHECK-NEXT: vpextrw $0, %xmm3, %eax
-; CHECK-NEXT: movzwl %ax, %eax
+; CHECK-NEXT: vprold $16, %xmm5, %xmm1
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT: vucomiss %xmm3, %xmm1
+; CHECK-NEXT: setnp %cl
+; CHECK-NEXT: sete %dl
+; CHECK-NEXT: testb %cl, %dl
+; CHECK-NEXT: setne %cl
+; CHECK-NEXT: kmovd %ecx, %k0
+; CHECK-NEXT: kshiftlw $15, %k0, %k0
; CHECK-NEXT: vmovd %eax, %xmm3
-; CHECK-NEXT: vpsrld $16, %xmm5, %xmm4
-; CHECK-NEXT: vpextrw $0, %xmm4, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm4
-; CHECK-NEXT: setne %al
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: vcvtph2ps %xmm3, %xmm6
-; CHECK-NEXT: vcvtph2ps %xmm4, %xmm3
-; CHECK-NEXT: kmovw %eax, %k0
-; CHECK-NEXT: vucomiss %xmm6, %xmm3
+; CHECK-NEXT: vcvtph2ps %xmm3, %xmm3
+; CHECK-NEXT: vcvtph2ps %xmm5, %xmm6
+; CHECK-NEXT: kshiftrw $14, %k0, %k0
+; CHECK-NEXT: vucomiss %xmm3, %xmm6
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
; CHECK-NEXT: setne %al
-; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: kshiftlw $15, %k1, %k1
-; CHECK-NEXT: kshiftrw $14, %k1, %k1
-; CHECK-NEXT: korw %k1, %k0, %k0
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: kmovw %eax, %k1
+; CHECK-NEXT: korw %k0, %k1, %k0
; CHECK-NEXT: movw $-5, %ax
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vprolq $32, %xmm1, %xmm4
-; CHECK-NEXT: vpextrw $0, %xmm4, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm4
-; CHECK-NEXT: vcvtph2ps %xmm4, %xmm4
-; CHECK-NEXT: vucomiss %xmm4, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; CHECK-NEXT: vcvtph2ps %xmm3, %xmm3
+; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0
+; CHECK-NEXT: vucomiss %xmm3, %xmm0
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -63,18 +51,12 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-9, %ax
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vpsrlq $48, %xmm1, %xmm4
-; CHECK-NEXT: vpextrw $0, %xmm4, %eax
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm4
-; CHECK-NEXT: vcvtph2ps %xmm4, %xmm6
-; CHECK-NEXT: vpsrlq $48, %xmm5, %xmm4
-; CHECK-NEXT: vpextrw $0, %xmm4, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm4
-; CHECK-NEXT: vcvtph2ps %xmm4, %xmm4
-; CHECK-NEXT: vucomiss %xmm6, %xmm4
+; CHECK-NEXT: vprolq $16, %xmm2, %xmm3
+; CHECK-NEXT: vcvtph2ps %xmm3, %xmm4
+; CHECK-NEXT: vprolq $16, %xmm5, %xmm3
+; CHECK-NEXT: vcvtph2ps %xmm3, %xmm3
+; CHECK-NEXT: vucomiss %xmm4, %xmm3
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -85,13 +67,10 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-17, %ax
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
-; CHECK-NEXT: vpextrw $0, %xmm6, %eax
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm6
-; CHECK-NEXT: vcvtph2ps %xmm6, %xmm6
-; CHECK-NEXT: vucomiss %xmm6, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; CHECK-NEXT: vcvtph2ps %xmm4, %xmm4
+; CHECK-NEXT: vucomiss %xmm4, %xmm0
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -102,18 +81,12 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-33, %ax
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vpsrldq {{.*#+}} xmm6 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm6, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm6
-; CHECK-NEXT: vcvtph2ps %xmm6, %xmm7
-; CHECK-NEXT: vpsrldq {{.*#+}} xmm6 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm6, %eax
+; CHECK-NEXT: vpsrldq {{.*#+}} xmm4 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT: vcvtph2ps %xmm4, %xmm7
+; CHECK-NEXT: vpsrldq {{.*#+}} xmm4 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT: vcvtph2ps %xmm4, %xmm4
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm6
-; CHECK-NEXT: vcvtph2ps %xmm6, %xmm6
-; CHECK-NEXT: vucomiss %xmm7, %xmm6
+; CHECK-NEXT: vucomiss %xmm7, %xmm4
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -125,10 +98,7 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: movw $-65, %ax
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; CHECK-NEXT: vpextrw $0, %xmm7, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm7
+; CHECK-NEXT: vshufps {{.*#+}} xmm7 = xmm2[3,3,3,3]
; CHECK-NEXT: vcvtph2ps %xmm7, %xmm7
; CHECK-NEXT: vucomiss %xmm7, %xmm0
; CHECK-NEXT: setnp %al
@@ -142,15 +112,9 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: movw $-129, %ax
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpsrldq {{.*#+}} xmm7 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm7, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm7
+; CHECK-NEXT: vpsrldq {{.*#+}} xmm7 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: vcvtph2ps %xmm7, %xmm7
; CHECK-NEXT: vpsrldq {{.*#+}} xmm5 = xmm5[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm5, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm5
; CHECK-NEXT: vcvtph2ps %xmm5, %xmm5
; CHECK-NEXT: vucomiss %xmm7, %xmm5
; CHECK-NEXT: setnp %al
@@ -163,13 +127,10 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-257, %ax # imm = 0xFEFF
; CHECK-NEXT: kmovd %eax, %k1
+; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm2
+; CHECK-NEXT: vcvtph2ps %xmm2, %xmm7
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1
-; CHECK-NEXT: vpextrw $0, %xmm1, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm7
-; CHECK-NEXT: vcvtph2ps %xmm7, %xmm7
-; CHECK-NEXT: vucomiss %xmm7, %xmm2
+; CHECK-NEXT: vucomiss %xmm7, %xmm6
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -181,12 +142,9 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: movw $-513, %ax # imm = 0xFDFF
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpsrld $16, %xmm1, %xmm2
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm2, %xmm3
+; CHECK-NEXT: vprold $16, %xmm2, %xmm6
+; CHECK-NEXT: vcvtph2ps %xmm6, %xmm6
+; CHECK-NEXT: vucomiss %xmm6, %xmm1
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -197,13 +155,10 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-1025, %ax # imm = 0xFBFF
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vprolq $32, %xmm1, %xmm2
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vucomiss %xmm2, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT: vucomiss %xmm1, %xmm0
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -215,12 +170,9 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: movw $-2049, %ax # imm = 0xF7FF
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpsrlq $48, %xmm1, %xmm2
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm2, %xmm4
+; CHECK-NEXT: vprolq $16, %xmm2, %xmm1
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT: vucomiss %xmm1, %xmm3
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -231,13 +183,10 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-4097, %ax # imm = 0xEFFF
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm2, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,0,1]
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT: vucomiss %xmm1, %xmm0
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -249,12 +198,9 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: movw $-8193, %ax # imm = 0xDFFF
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm2, %xmm6
+; CHECK-NEXT: vpsrldq {{.*#+}} xmm1 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT: vucomiss %xmm1, %xmm4
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -265,13 +211,10 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-16385, %ax # imm = 0xBFFF
; CHECK-NEXT: kmovd %eax, %k1
+; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm2[3,3,3,3]
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm2, %xmm0
+; CHECK-NEXT: vucomiss %xmm1, %xmm0
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -280,10 +223,7 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: kshiftlw $14, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kshiftlw $1, %k0, %k0
-; CHECK-NEXT: vpsrldq {{.*#+}} xmm0 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm0, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm0
+; CHECK-NEXT: vpsrldq {{.*#+}} xmm0 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0
; CHECK-NEXT: kshiftrw $1, %k0, %k0
; CHECK-NEXT: vucomiss %xmm0, %xmm5
@@ -294,7 +234,7 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kshiftlw $15, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k1
-; CHECK-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; CHECK-NEXT: vmovdqa %xmm0, (%rax)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/pr78897.ll b/llvm/test/CodeGen/X86/pr78897.ll
index 0c1c3cafc4ea..56e4ec2bc8ec 100644
--- a/llvm/test/CodeGen/X86/pr78897.ll
+++ b/llvm/test/CodeGen/X86/pr78897.ll
@@ -225,7 +225,7 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X86-AVX512-NEXT: pushl %esi
; X86-AVX512-NEXT: vpbroadcastb {{[0-9]+}}(%esp), %xmm0
; X86-AVX512-NEXT: vptestnmb {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %k1
-; X86-AVX512-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 {%k1} {z}
+; X86-AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X86-AVX512-NEXT: vpextrd $1, %xmm0, %eax
; X86-AVX512-NEXT: vmovd %xmm0, %edx
; X86-AVX512-NEXT: movl $286331152, %ecx # imm = 0x11111110
@@ -258,7 +258,7 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X64-AVX512: # %bb.0: # %entry
; X64-AVX512-NEXT: vpbroadcastb %edi, %xmm0
; X64-AVX512-NEXT: vptestnmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
-; X64-AVX512-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z}
+; X64-AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X64-AVX512-NEXT: vmovq %xmm0, %rax
; X64-AVX512-NEXT: movabsq $1229782938247303440, %rcx # imm = 0x1111111111111110
; X64-AVX512-NEXT: movabsq $76861433640456465, %rdx # imm = 0x111111111111111
diff --git a/llvm/test/CodeGen/X86/prefer-fpext-splat.ll b/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
index 1d8b8b3f9a96..c3d7b2e15d01 100644
--- a/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
+++ b/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
@@ -176,8 +176,6 @@ define <2 x double> @prefer_f16_v2f64(ptr %p) nounwind {
; AVX512F-LABEL: prefer_f16_v2f64:
; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0
-; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512F-NEXT: vcvtps2pd %xmm0, %xmm0
; AVX512F-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/select-of-fp-constants.ll b/llvm/test/CodeGen/X86/select-of-fp-constants.ll
index 76b8ea8e2b8a..2cdaa11a2253 100644
--- a/llvm/test/CodeGen/X86/select-of-fp-constants.ll
+++ b/llvm/test/CodeGen/X86/select-of-fp-constants.ll
@@ -86,7 +86,7 @@ define float @fcmp_select_fp_constants(float %x) nounwind readnone {
; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vcmpneqss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
; X64-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [2.3E+1,0.0E+0,0.0E+0,0.0E+0]
-; X64-AVX512F-NEXT: vmovss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1}
+; X64-AVX512F-NEXT: vmovss {{.*#+}} xmm0 {%k1} = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512F-NEXT: retq
%c = fcmp une float %x, -4.0
%r = select i1 %c, float 42.0, float 23.0
diff --git a/llvm/test/CodeGen/X86/select-of-half-constants.ll b/llvm/test/CodeGen/X86/select-of-half-constants.ll
index e22d4c8b792d..e3d92eb47496 100644
--- a/llvm/test/CodeGen/X86/select-of-half-constants.ll
+++ b/llvm/test/CodeGen/X86/select-of-half-constants.ll
@@ -6,9 +6,9 @@
define half @fcmp_select_fp_constants_olt(half %x) nounwind readnone {
; X64-AVX512FP16-LABEL: fcmp_select_fp_constants_olt:
; X64-AVX512FP16: # %bb.0:
-; X64-AVX512FP16-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-AVX512FP16-NEXT: vmovsh {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512FP16-NEXT: vcmpltsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
-; X64-AVX512FP16-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-AVX512FP16-NEXT: vmovsh {{.*#+}} xmm0 = [2.3E+1,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512FP16-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1}
; X64-AVX512FP16-NEXT: retq
%c = fcmp olt half %x, -4.0
@@ -19,9 +19,9 @@ define half @fcmp_select_fp_constants_olt(half %x) nounwind readnone {
define half @fcmp_select_fp_constants_ogt(half %x) nounwind readnone {
; X64-AVX512FP16-LABEL: fcmp_select_fp_constants_ogt:
; X64-AVX512FP16: # %bb.0:
-; X64-AVX512FP16-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-AVX512FP16-NEXT: vmovsh {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512FP16-NEXT: vcmpgtsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
-; X64-AVX512FP16-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-AVX512FP16-NEXT: vmovsh {{.*#+}} xmm0 = [2.3E+1,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512FP16-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1}
; X64-AVX512FP16-NEXT: retq
%c = fcmp ogt half %x, -4.0
diff --git a/llvm/test/CodeGen/X86/setcc-logic.ll b/llvm/test/CodeGen/X86/setcc-logic.ll
index 3faa493ebccd..c98aae7fbf40 100644
--- a/llvm/test/CodeGen/X86/setcc-logic.ll
+++ b/llvm/test/CodeGen/X86/setcc-logic.ll
@@ -132,15 +132,12 @@ return:
define i32 @all_sign_bits_clear_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: all_sign_bits_clear_branch:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: js .LBB9_3
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: js .LBB9_3
-; CHECK-NEXT: # %bb.2: # %bb1
+; CHECK-NEXT: orl %esi, %edi
+; CHECK-NEXT: js .LBB9_2
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB9_3: # %return
+; CHECK-NEXT: .LBB9_2: # %return
; CHECK-NEXT: movl $192, %eax
; CHECK-NEXT: retq
entry:
@@ -159,15 +156,13 @@ return:
define i32 @all_bits_set_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: all_bits_set_branch:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl %esi, %edi
; CHECK-NEXT: cmpl $-1, %edi
-; CHECK-NEXT: jne .LBB10_3
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: cmpl $-1, %esi
-; CHECK-NEXT: jne .LBB10_3
-; CHECK-NEXT: # %bb.2: # %bb1
+; CHECK-NEXT: jne .LBB10_2
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB10_3: # %return
+; CHECK-NEXT: .LBB10_2: # %return
; CHECK-NEXT: movl $192, %eax
; CHECK-NEXT: retq
entry:
@@ -186,15 +181,12 @@ return:
define i32 @all_sign_bits_set_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: all_sign_bits_set_branch:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: jns .LBB11_3
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: jns .LBB11_3
-; CHECK-NEXT: # %bb.2: # %bb1
+; CHECK-NEXT: testl %esi, %edi
+; CHECK-NEXT: jns .LBB11_2
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB11_3: # %return
+; CHECK-NEXT: .LBB11_2: # %return
; CHECK-NEXT: movl $192, %eax
; CHECK-NEXT: retq
entry:
@@ -238,17 +230,14 @@ return:
define i32 @any_sign_bits_set_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: any_sign_bits_set_branch:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: js .LBB13_2
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: js .LBB13_2
-; CHECK-NEXT: # %bb.3: # %return
-; CHECK-NEXT: movl $192, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB13_2: # %bb1
+; CHECK-NEXT: orl %esi, %edi
+; CHECK-NEXT: jns .LBB13_2
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB13_2: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
entry:
%a = icmp slt i32 %P, 0
%b = icmp slt i32 %Q, 0
@@ -265,17 +254,15 @@ return:
define i32 @any_bits_clear_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: any_bits_clear_branch:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl %esi, %edi
; CHECK-NEXT: cmpl $-1, %edi
-; CHECK-NEXT: jne .LBB14_2
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: cmpl $-1, %esi
-; CHECK-NEXT: jne .LBB14_2
-; CHECK-NEXT: # %bb.3: # %return
-; CHECK-NEXT: movl $192, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB14_2: # %bb1
+; CHECK-NEXT: je .LBB14_2
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB14_2: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
entry:
%a = icmp ne i32 %P, -1
%b = icmp ne i32 %Q, -1
@@ -292,17 +279,14 @@ return:
define i32 @any_sign_bits_clear_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: any_sign_bits_clear_branch:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: jns .LBB15_2
-; CHECK-NEXT: # %bb.1: # %entry
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: jns .LBB15_2
-; CHECK-NEXT: # %bb.3: # %return
-; CHECK-NEXT: movl $192, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB15_2: # %bb1
+; CHECK-NEXT: testl %esi, %edi
+; CHECK-NEXT: js .LBB15_2
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB15_2: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
entry:
%a = icmp sgt i32 %P, -1
%b = icmp sgt i32 %Q, -1
diff --git a/llvm/test/CodeGen/X86/setcc-lowering.ll b/llvm/test/CodeGen/X86/setcc-lowering.ll
index 705e48ca4c9c..90e5c279d2e1 100644
--- a/llvm/test/CodeGen/X86/setcc-lowering.ll
+++ b/llvm/test/CodeGen/X86/setcc-lowering.ll
@@ -1,22 +1,35 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX
-; RUN: llc -mtriple=i386-unknown-linux-gnu -mcpu=knl < %s | FileCheck %s --check-prefix=KNL-32
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx2 < %s | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc -mtriple=i386-unknown-linux-gnu -mcpu=knl < %s | FileCheck %s --check-prefixes=AVX,KNL-32
; Verify that we don't crash during codegen due to a wrong lowering
; of a setcc node with illegal operand types and return type.
define <8 x i16> @pr25080(<8 x i32> %a) {
-; AVX-LABEL: pr25080:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; AVX1-LABEL: pr25080:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: pr25080:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [8388607,8388607,8388607,8388607]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
;
; KNL-32-LABEL: pr25080:
; KNL-32: # %bb.0: # %entry
@@ -38,23 +51,40 @@ entry:
}
define void @pr26232(i64 %a, <16 x i1> %b) {
-; AVX-LABEL: pr26232:
-; AVX: # %bb.0: # %allocas
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: .p2align 4, 0x90
-; AVX-NEXT: .LBB1_1: # %for_loop599
-; AVX-NEXT: # =>This Inner Loop Header: Depth=1
-; AVX-NEXT: cmpq $65536, %rdi # imm = 0x10000
-; AVX-NEXT: setl %al
-; AVX-NEXT: vmovd %eax, %xmm2
-; AVX-NEXT: vpshufb %xmm1, %xmm2, %xmm2
-; AVX-NEXT: vpand %xmm0, %xmm2, %xmm2
-; AVX-NEXT: vpsllw $7, %xmm2, %xmm2
-; AVX-NEXT: vpmovmskb %xmm2, %eax
-; AVX-NEXT: testl %eax, %eax
-; AVX-NEXT: jne .LBB1_1
-; AVX-NEXT: # %bb.2: # %for_exit600
-; AVX-NEXT: retq
+; AVX1-LABEL: pr26232:
+; AVX1: # %bb.0: # %allocas
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: .p2align 4, 0x90
+; AVX1-NEXT: .LBB1_1: # %for_loop599
+; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX1-NEXT: cmpq $65536, %rdi # imm = 0x10000
+; AVX1-NEXT: setl %al
+; AVX1-NEXT: vmovd %eax, %xmm2
+; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $7, %xmm2, %xmm2
+; AVX1-NEXT: vpmovmskb %xmm2, %eax
+; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: jne .LBB1_1
+; AVX1-NEXT: # %bb.2: # %for_exit600
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: pr26232:
+; AVX2: # %bb.0: # %allocas
+; AVX2-NEXT: .p2align 4, 0x90
+; AVX2-NEXT: .LBB1_1: # %for_loop599
+; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX2-NEXT: cmpq $65536, %rdi # imm = 0x10000
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
+; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1
+; AVX2-NEXT: vpmovmskb %xmm1, %eax
+; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: jne .LBB1_1
+; AVX2-NEXT: # %bb.2: # %for_exit600
+; AVX2-NEXT: retq
;
; KNL-32-LABEL: pr26232:
; KNL-32: # %bb.0: # %allocas
@@ -108,14 +138,7 @@ define <4 x i32> @pcmpgt(<4 x i8> %x) {
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; KNL-32-LABEL: pcmpgt:
-; KNL-32: # %bb.0:
-; KNL-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; KNL-32-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; KNL-32-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
-; KNL-32-NEXT: retl
+; AVX-NEXT: ret{{[l|q]}}
%zext = zext <4 x i8> %x to <4 x i32>
%icmp = icmp ne <4 x i32> %zext, zeroinitializer
%sext = sext <4 x i1> %icmp to <4 x i32>
diff --git a/llvm/test/CodeGen/X86/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/X86/srem-seteq-illegal-types.ll
index fdb2f41ec0e4..d644ed87c3c1 100644
--- a/llvm/test/CodeGen/X86/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/X86/srem-seteq-illegal-types.ll
@@ -267,11 +267,13 @@ define <3 x i1> @test_srem_vec(<3 x i33> %X) nounwind {
; SSE41-NEXT: pcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE41-NEXT: pxor %xmm1, %xmm2
+; SSE41-NEXT: movl $3, %eax
+; SSE41-NEXT: movq %rax, %xmm3
+; SSE41-NEXT: pcmpeqq %xmm2, %xmm3
+; SSE41-NEXT: pxor %xmm1, %xmm3
; SSE41-NEXT: movd %xmm0, %eax
; SSE41-NEXT: pextrb $8, %xmm0, %edx
-; SSE41-NEXT: pextrb $0, %xmm2, %ecx
+; SSE41-NEXT: pextrb $0, %xmm3, %ecx
; SSE41-NEXT: # kill: def $al killed $al killed $eax
; SSE41-NEXT: # kill: def $dl killed $dl killed $edx
; SSE41-NEXT: # kill: def $cl killed $cl killed $ecx
@@ -318,7 +320,9 @@ define <3 x i1> @test_srem_vec(<3 x i33> %X) nounwind {
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: movl $3, %eax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/swifterror.ll b/llvm/test/CodeGen/X86/swifterror.ll
index 75252309790b..1489b0295e93 100644
--- a/llvm/test/CodeGen/X86/swifterror.ll
+++ b/llvm/test/CodeGen/X86/swifterror.ll
@@ -1259,12 +1259,7 @@ entry:
define swiftcc void @dont_crash_on_new_isel_blocks(ptr nocapture swifterror, i1, ptr) {
; CHECK-APPLE-LABEL: dont_crash_on_new_isel_blocks:
; CHECK-APPLE: ## %bb.0: ## %entry
-; CHECK-APPLE-NEXT: xorl %eax, %eax
-; CHECK-APPLE-NEXT: testb %al, %al
-; CHECK-APPLE-NEXT: jne LBB15_2
-; CHECK-APPLE-NEXT: ## %bb.1: ## %entry
; CHECK-APPLE-NEXT: testb $1, %dil
-; CHECK-APPLE-NEXT: LBB15_2: ## %cont
; CHECK-APPLE-NEXT: pushq %rax
; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 16
; CHECK-APPLE-NEXT: callq *%rax
@@ -1290,12 +1285,7 @@ define swiftcc void @dont_crash_on_new_isel_blocks(ptr nocapture swifterror, i1,
;
; CHECK-i386-LABEL: dont_crash_on_new_isel_blocks:
; CHECK-i386: ## %bb.0: ## %entry
-; CHECK-i386-NEXT: xorl %eax, %eax
-; CHECK-i386-NEXT: testb %al, %al
-; CHECK-i386-NEXT: jne LBB15_2
-; CHECK-i386-NEXT: ## %bb.1: ## %entry
; CHECK-i386-NEXT: testb $1, 8(%esp)
-; CHECK-i386-NEXT: LBB15_2: ## %cont
; CHECK-i386-NEXT: jmpl *%eax ## TAILCALL
entry:
%3 = or i1 false, %1
diff --git a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
index 9cd373151812..8d84e887d3f2 100644
--- a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
@@ -91,116 +91,97 @@ define i32 @loop_shared_header(ptr %exe, i32 %exesz, i32 %headsize, i32 %min, i3
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r15
; CHECK-NEXT: pushq %r14
-; CHECK-NEXT: pushq %r13
; CHECK-NEXT: pushq %r12
; CHECK-NEXT: pushq %rbx
-; CHECK-NEXT: pushq %rax
; CHECK-NEXT: movl $1, %ebx
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: jne .LBB1_24
+; CHECK-NEXT: jne .LBB1_12
; CHECK-NEXT: # %bb.1: # %if.end19
-; CHECK-NEXT: movl %esi, %ebp
-; CHECK-NEXT: movq %rdi, %r15
-; CHECK-NEXT: movl (%rax), %r13d
-; CHECK-NEXT: leal (,%r13,4), %ebx
-; CHECK-NEXT: movl %ebx, %r12d
+; CHECK-NEXT: movl (%rax), %r12d
+; CHECK-NEXT: leal (,%r12,4), %ebp
+; CHECK-NEXT: movl %ebp, %r15d
; CHECK-NEXT: movl $1, %esi
-; CHECK-NEXT: movq %r12, %rdi
+; CHECK-NEXT: movq %r15, %rdi
; CHECK-NEXT: callq cli_calloc@PLT
-; CHECK-NEXT: testl %ebp, %ebp
-; CHECK-NEXT: je .LBB1_23
-; CHECK-NEXT: # %bb.2: # %if.end19
-; CHECK-NEXT: testl %r13d, %r13d
-; CHECK-NEXT: je .LBB1_23
-; CHECK-NEXT: # %bb.3: # %if.end19
; CHECK-NEXT: movq %rax, %r14
-; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: jne .LBB1_23
-; CHECK-NEXT: # %bb.4: # %if.end19
-; CHECK-NEXT: cmpq %r15, %r14
-; CHECK-NEXT: jb .LBB1_23
-; CHECK-NEXT: # %bb.5: # %if.end50
+; CHECK-NEXT: jne .LBB1_12
+; CHECK-NEXT: # %bb.2: # %if.end50
; CHECK-NEXT: movq %r14, %rdi
-; CHECK-NEXT: movq %r12, %rdx
+; CHECK-NEXT: movq %r15, %rdx
; CHECK-NEXT: callq memcpy@PLT
-; CHECK-NEXT: cmpl $4, %ebx
-; CHECK-NEXT: jb .LBB1_26
-; CHECK-NEXT: # %bb.6: # %shared_preheader
+; CHECK-NEXT: cmpl $4, %ebp
+; CHECK-NEXT: jb .LBB1_19
+; CHECK-NEXT: # %bb.3: # %shared_preheader
; CHECK-NEXT: movb $32, %cl
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: jmp .LBB1_8
+; CHECK-NEXT: jmp .LBB1_4
; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB1_7: # %merge_predecessor_split
-; CHECK-NEXT: # in Loop: Header=BB1_8 Depth=1
+; CHECK-NEXT: .LBB1_15: # %merge_predecessor_split
+; CHECK-NEXT: # in Loop: Header=BB1_4 Depth=1
; CHECK-NEXT: movb $32, %cl
-; CHECK-NEXT: .LBB1_8: # %outer_loop_header
+; CHECK-NEXT: .LBB1_4: # %outer_loop_header
; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB1_9 Depth 2
-; CHECK-NEXT: testl %r13d, %r13d
-; CHECK-NEXT: je .LBB1_16
+; CHECK-NEXT: # Child Loop BB1_8 Depth 2
+; CHECK-NEXT: testl %r12d, %r12d
+; CHECK-NEXT: je .LBB1_5
; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB1_9: # %shared_loop_header
-; CHECK-NEXT: # Parent Loop BB1_8 Depth=1
+; CHECK-NEXT: .LBB1_8: # %shared_loop_header
+; CHECK-NEXT: # Parent Loop BB1_4 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: testq %r14, %r14
-; CHECK-NEXT: jne .LBB1_25
-; CHECK-NEXT: # %bb.10: # %inner_loop_body
-; CHECK-NEXT: # in Loop: Header=BB1_9 Depth=2
+; CHECK-NEXT: jne .LBB1_18
+; CHECK-NEXT: # %bb.9: # %inner_loop_body
+; CHECK-NEXT: # in Loop: Header=BB1_8 Depth=2
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: je .LBB1_9
-; CHECK-NEXT: # %bb.11: # %if.end96.i
-; CHECK-NEXT: # in Loop: Header=BB1_8 Depth=1
-; CHECK-NEXT: cmpl $3, %r13d
-; CHECK-NEXT: jae .LBB1_20
-; CHECK-NEXT: # %bb.12: # %if.end287.i
-; CHECK-NEXT: # in Loop: Header=BB1_8 Depth=1
+; CHECK-NEXT: je .LBB1_8
+; CHECK-NEXT: # %bb.10: # %if.end96.i
+; CHECK-NEXT: # in Loop: Header=BB1_4 Depth=1
+; CHECK-NEXT: cmpl $3, %r12d
+; CHECK-NEXT: jae .LBB1_11
+; CHECK-NEXT: # %bb.13: # %if.end287.i
+; CHECK-NEXT: # in Loop: Header=BB1_4 Depth=1
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: # implicit-def: $cl
-; CHECK-NEXT: jne .LBB1_8
-; CHECK-NEXT: # %bb.13: # %if.end308.i
-; CHECK-NEXT: # in Loop: Header=BB1_8 Depth=1
+; CHECK-NEXT: jne .LBB1_4
+; CHECK-NEXT: # %bb.14: # %if.end308.i
+; CHECK-NEXT: # in Loop: Header=BB1_4 Depth=1
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: je .LBB1_7
-; CHECK-NEXT: # %bb.14: # %if.end335.i
-; CHECK-NEXT: # in Loop: Header=BB1_8 Depth=1
+; CHECK-NEXT: je .LBB1_15
+; CHECK-NEXT: # %bb.16: # %if.end335.i
+; CHECK-NEXT: # in Loop: Header=BB1_4 Depth=1
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: testb %cl, %cl
-; CHECK-NEXT: jne .LBB1_8
-; CHECK-NEXT: # %bb.15: # %merge_other
-; CHECK-NEXT: # in Loop: Header=BB1_8 Depth=1
+; CHECK-NEXT: jne .LBB1_4
+; CHECK-NEXT: # %bb.17: # %merge_other
+; CHECK-NEXT: # in Loop: Header=BB1_4 Depth=1
; CHECK-NEXT: # implicit-def: $cl
-; CHECK-NEXT: jmp .LBB1_8
-; CHECK-NEXT: .LBB1_23:
-; CHECK-NEXT: movl $1, %ebx
-; CHECK-NEXT: jmp .LBB1_24
-; CHECK-NEXT: .LBB1_16: # %while.cond.us1412.i
+; CHECK-NEXT: jmp .LBB1_4
+; CHECK-NEXT: .LBB1_5: # %while.cond.us1412.i
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: movl $1, %ebx
-; CHECK-NEXT: jne .LBB1_18
-; CHECK-NEXT: # %bb.17: # %while.cond.us1412.i
+; CHECK-NEXT: jne .LBB1_7
+; CHECK-NEXT: # %bb.6: # %while.cond.us1412.i
; CHECK-NEXT: decb %cl
-; CHECK-NEXT: jne .LBB1_24
-; CHECK-NEXT: .LBB1_18: # %if.end41.us1436.i
-; CHECK-NEXT: .LBB1_20: # %if.then99.i
+; CHECK-NEXT: jne .LBB1_12
+; CHECK-NEXT: .LBB1_7: # %if.end41.us1436.i
+; CHECK-NEXT: .LBB1_11: # %if.then99.i
; CHECK-NEXT: movq .str.6@GOTPCREL(%rip), %rdi
; CHECK-NEXT: xorl %ebx, %ebx
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: callq cli_dbgmsg@PLT
-; CHECK-NEXT: .LBB1_24: # %cleanup
+; CHECK-NEXT: .LBB1_12: # %cleanup
; CHECK-NEXT: movl %ebx, %eax
-; CHECK-NEXT: addq $8, %rsp
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r12
-; CHECK-NEXT: popq %r13
; CHECK-NEXT: popq %r14
; CHECK-NEXT: popq %r15
; CHECK-NEXT: popq %rbp
; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB1_25: # %wunpsect.exit.thread.loopexit389
-; CHECK-NEXT: .LBB1_26: # %wunpsect.exit.thread.loopexit391
+; CHECK-NEXT: .LBB1_18: # %wunpsect.exit.thread.loopexit389
+; CHECK-NEXT: .LBB1_19: # %wunpsect.exit.thread.loopexit391
entry:
%0 = load i32, ptr undef, align 4
%mul = shl nsw i32 %0, 2
diff --git a/llvm/test/CodeGen/X86/tail-opts.ll b/llvm/test/CodeGen/X86/tail-opts.ll
index d54110d1fa81..d9ab2f7d1f5f 100644
--- a/llvm/test/CodeGen/X86/tail-opts.ll
+++ b/llvm/test/CodeGen/X86/tail-opts.ll
@@ -300,10 +300,9 @@ define fastcc void @c_expand_expr_stmt(ptr %expr) nounwind {
; CHECK-NEXT: cmpl $23, %ecx
; CHECK-NEXT: jne .LBB3_9
; CHECK-NEXT: .LBB3_16: # %lvalue_p.exit4
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: jne .LBB3_9
-; CHECK-NEXT: # %bb.17: # %lvalue_p.exit4
; CHECK-NEXT: testb %bl, %bl
+; CHECK-NEXT: sete %cl
+; CHECK-NEXT: orb %al, %cl
entry:
%tmp4 = load i8, ptr null, align 8 ; <i8> [#uses=3]
switch i8 %tmp4, label %bb3 [
diff --git a/llvm/test/CodeGen/X86/tailcall-extract.ll b/llvm/test/CodeGen/X86/tailcall-extract.ll
index 7a6c75c44ca7..aff6146198c8 100644
--- a/llvm/test/CodeGen/X86/tailcall-extract.ll
+++ b/llvm/test/CodeGen/X86/tailcall-extract.ll
@@ -6,7 +6,7 @@
; containing call. And later tail call can be generated.
; CHECK-LABEL: test1:
-; CHECK: je foo # TAILCALL
+; CHECK: jne foo # TAILCALL
; CHECK: jmp bar # TAILCALL
; OPT-LABEL: test1
@@ -48,8 +48,8 @@ exit:
; can't be duplicated.
; CHECK-LABEL: test2:
-; CHECK: callq bar
; CHECK: callq foo
+; CHECK: callq bar
; OPT-LABEL: test2
; OPT: if.then.i:
@@ -93,7 +93,7 @@ exit:
; offset, so the exit block can still be duplicated, and tail call generated.
; CHECK-LABEL: test3:
-; CHECK: je qux # TAILCALL
+; CHECK: jne qux # TAILCALL
; CHECK: jmp baz # TAILCALL
; OPT-LABEL: test3
@@ -136,8 +136,8 @@ exit:
; block can't be duplicated.
; CHECK-LABEL: test4:
-; CHECK: callq baz
; CHECK: callq qux
+; CHECK: callq baz
; OPT-LABEL: test4
; OPT: if.then.i:
diff --git a/llvm/test/CodeGen/X86/test-shrink-bug.ll b/llvm/test/CodeGen/X86/test-shrink-bug.ll
index 74c5179b1878..953a0d65c538 100644
--- a/llvm/test/CodeGen/X86/test-shrink-bug.ll
+++ b/llvm/test/CodeGen/X86/test-shrink-bug.ll
@@ -48,37 +48,39 @@ define dso_local void @fail(i16 %a, <2 x i8> %b) {
; CHECK-X86: ## %bb.0:
; CHECK-X86-NEXT: subl $12, %esp
; CHECK-X86-NEXT: .cfi_def_cfa_offset 16
-; CHECK-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; CHECK-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; CHECK-X86-NEXT: cmpb $123, {{[0-9]+}}(%esp)
-; CHECK-X86-NEXT: sete %al
-; CHECK-X86-NEXT: testl $263, %ecx ## imm = 0x107
-; CHECK-X86-NEXT: je LBB1_3
-; CHECK-X86-NEXT: ## %bb.1:
-; CHECK-X86-NEXT: testb %al, %al
-; CHECK-X86-NEXT: jne LBB1_3
-; CHECK-X86-NEXT: ## %bb.2: ## %no
+; CHECK-X86-NEXT: setne %cl
+; CHECK-X86-NEXT: testl $263, %eax ## imm = 0x107
+; CHECK-X86-NEXT: setne %al
+; CHECK-X86-NEXT: testb %cl, %al
+; CHECK-X86-NEXT: jne LBB1_2
+; CHECK-X86-NEXT: ## %bb.1: ## %yes
+; CHECK-X86-NEXT: addl $12, %esp
+; CHECK-X86-NEXT: retl
+; CHECK-X86-NEXT: LBB1_2: ## %no
; CHECK-X86-NEXT: calll _bar
-; CHECK-X86-NEXT: LBB1_3: ## %yes
; CHECK-X86-NEXT: addl $12, %esp
; CHECK-X86-NEXT: retl
;
; CHECK-X64-LABEL: fail:
; CHECK-X64: # %bb.0:
-; CHECK-X64-NEXT: testl $263, %edi # imm = 0x107
-; CHECK-X64-NEXT: je .LBB1_3
-; CHECK-X64-NEXT: # %bb.1:
-; CHECK-X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; CHECK-X64-NEXT: pslld $8, %xmm0
; CHECK-X64-NEXT: pcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-X64-NEXT: pextrw $1, %xmm0, %eax
-; CHECK-X64-NEXT: testb $1, %al
-; CHECK-X64-NEXT: jne .LBB1_3
-; CHECK-X64-NEXT: # %bb.2: # %no
+; CHECK-X64-NEXT: xorb $1, %al
+; CHECK-X64-NEXT: testl $263, %edi # imm = 0x107
+; CHECK-X64-NEXT: setne %cl
+; CHECK-X64-NEXT: testb %al, %cl
+; CHECK-X64-NEXT: jne .LBB1_2
+; CHECK-X64-NEXT: # %bb.1: # %yes
+; CHECK-X64-NEXT: retq
+; CHECK-X64-NEXT: .LBB1_2: # %no
; CHECK-X64-NEXT: pushq %rax
; CHECK-X64-NEXT: .cfi_def_cfa_offset 16
; CHECK-X64-NEXT: callq bar@PLT
; CHECK-X64-NEXT: popq %rax
; CHECK-X64-NEXT: .cfi_def_cfa_offset 8
-; CHECK-X64-NEXT: .LBB1_3: # %yes
; CHECK-X64-NEXT: retq
%1 = icmp eq <2 x i8> %b, <i8 40, i8 123>
%2 = extractelement <2 x i1> %1, i32 1
diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll
index a15de8b8e0f6..6a36cd2a86d5 100644
--- a/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll
+++ b/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll
@@ -264,7 +264,6 @@ define <4 x i1> @t32_tautological(<4 x i32> %X) nounwind {
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
index 6d99bedd40b9..cdeca96732dc 100644
--- a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
+++ b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
@@ -25,13 +25,7 @@ define <4 x i1> @t0_all_tautological(<4 x i32> %X) nounwind {
define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: t1_all_odd_eq:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -82,13 +76,7 @@ define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind {
define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: t1_all_odd_ne:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -256,7 +244,9 @@ define <2 x i1> @t3_wide(<2 x i64> %X) nounwind {
; CHECK-AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; CHECK-AVX1-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: movabsq $-3074457345618258603, %rax # imm = 0xD555555555555555
+; CHECK-AVX1-NEXT: vmovq %rax, %xmm1
+; CHECK-AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
@@ -273,7 +263,9 @@ define <2 x i1> @t3_wide(<2 x i64> %X) nounwind {
; CHECK-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; CHECK-AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT: movabsq $-3074457345618258603, %rax # imm = 0xD555555555555555
+; CHECK-AVX2-NEXT: vmovq %rax, %xmm1
+; CHECK-AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index f59960f06f4a..ba21af231985 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -21,15 +21,13 @@ define float @cvt_i16_to_f32(i16 %a0) nounwind {
;
; F16C-LABEL: cvt_i16_to_f32:
; F16C: # %bb.0:
-; F16C-NEXT: movzwl %di, %eax
-; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vmovd %edi, %xmm0
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: retq
;
; AVX512-LABEL: cvt_i16_to_f32:
; AVX512: # %bb.0:
-; AVX512-NEXT: movzwl %di, %eax
-; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vmovd %edi, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = bitcast i16 %a0 to half
@@ -1370,16 +1368,14 @@ define double @cvt_i16_to_f64(i16 %a0) nounwind {
;
; F16C-LABEL: cvt_i16_to_f64:
; F16C: # %bb.0:
-; F16C-NEXT: movzwl %di, %eax
-; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vmovd %edi, %xmm0
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; F16C-NEXT: retq
;
; AVX512-LABEL: cvt_i16_to_f64:
; AVX512: # %bb.0:
-; AVX512-NEXT: movzwl %di, %eax
-; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vmovd %edi, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -1410,14 +1406,12 @@ define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
;
; F16C-LABEL: cvt_2i16_to_2f64:
; F16C: # %bb.0:
-; F16C-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: vcvtps2pd %xmm0, %xmm0
; F16C-NEXT: retq
;
; AVX512-LABEL: cvt_2i16_to_2f64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -1503,14 +1497,12 @@ define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
;
; F16C-LABEL: cvt_8i16_to_2f64:
; F16C: # %bb.0:
-; F16C-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: vcvtps2pd %xmm0, %xmm0
; F16C-NEXT: retq
;
; AVX512-LABEL: cvt_8i16_to_2f64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -1877,16 +1869,14 @@ define <2 x double> @load_cvt_2i16_to_2f64(ptr %a0) nounwind {
;
; F16C-LABEL: load_cvt_2i16_to_2f64:
; F16C: # %bb.0:
-; F16C-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; F16C-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; F16C-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: vcvtps2pd %xmm0, %xmm0
; F16C-NEXT: retq
;
; AVX512-LABEL: load_cvt_2i16_to_2f64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -4976,32 +4966,22 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
;
; F16C-LABEL: fptosi_2f16_to_4i32:
; F16C: # %bb.0:
-; F16C-NEXT: vpextrw $0, %xmm0, %eax
-; F16C-NEXT: movzwl %ax, %eax
-; F16C-NEXT: vmovd %eax, %xmm1
+; F16C-NEXT: vpsrld $16, %xmm0, %xmm1
; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
-; F16C-NEXT: vpsrld $16, %xmm0, %xmm0
-; F16C-NEXT: vpextrw $0, %xmm0, %eax
-; F16C-NEXT: movzwl %ax, %eax
-; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
-; F16C-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; F16C-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; F16C-NEXT: vcvttps2dq %xmm0, %xmm0
; F16C-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; F16C-NEXT: retq
;
; AVX512-LABEL: fptosi_2f16_to_4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512-NEXT: movzwl %ax, %eax
-; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX512-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512-NEXT: movzwl %ax, %eax
-; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
-; AVX512-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX512-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX512-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-popcnt-128-ult-ugt.ll b/llvm/test/CodeGen/X86/vector-popcnt-128-ult-ugt.ll
index 8aafec7427b4..c3d5a4b32edb 100644
--- a/llvm/test/CodeGen/X86/vector-popcnt-128-ult-ugt.ll
+++ b/llvm/test/CodeGen/X86/vector-popcnt-128-ult-ugt.ll
@@ -16972,7 +16972,6 @@ define <2 x i64> @ugt_2_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -16995,7 +16994,6 @@ define <2 x i64> @ugt_2_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -17014,7 +17012,6 @@ define <2 x i64> @ugt_2_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -17033,7 +17030,6 @@ define <2 x i64> @ugt_2_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -17126,8 +17122,7 @@ define <2 x i64> @ult_3_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483651,2147483651,2147483651,2147483651]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [3,3,3,3]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -17150,8 +17145,7 @@ define <2 x i64> @ult_3_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483651,2147483651,2147483651,2147483651]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [3,3,3,3]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -17170,8 +17164,7 @@ define <2 x i64> @ult_3_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483651,2147483651,2147483651,2147483651]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [3,3,3,3]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -17190,8 +17183,7 @@ define <2 x i64> @ult_3_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483651,2147483651,2147483651,2147483651]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [3,3,3,3]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -17288,7 +17280,6 @@ define <2 x i64> @ugt_3_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -17311,7 +17302,6 @@ define <2 x i64> @ugt_3_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -17330,7 +17320,6 @@ define <2 x i64> @ugt_3_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -17349,7 +17338,6 @@ define <2 x i64> @ugt_3_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -17442,8 +17430,7 @@ define <2 x i64> @ult_4_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483652,2147483652,2147483652,2147483652]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [4,4,4,4]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -17466,8 +17453,7 @@ define <2 x i64> @ult_4_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483652,2147483652,2147483652,2147483652]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [4,4,4,4]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -17486,8 +17472,7 @@ define <2 x i64> @ult_4_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483652,2147483652,2147483652,2147483652]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [4,4,4,4]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -17506,8 +17491,7 @@ define <2 x i64> @ult_4_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483652,2147483652,2147483652,2147483652]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [4,4,4,4]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -17604,7 +17588,6 @@ define <2 x i64> @ugt_4_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -17627,7 +17610,6 @@ define <2 x i64> @ugt_4_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -17646,7 +17628,6 @@ define <2 x i64> @ugt_4_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -17665,7 +17646,6 @@ define <2 x i64> @ugt_4_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -17758,8 +17738,7 @@ define <2 x i64> @ult_5_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483653,2147483653,2147483653,2147483653]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [5,5,5,5]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -17782,8 +17761,7 @@ define <2 x i64> @ult_5_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483653,2147483653,2147483653,2147483653]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [5,5,5,5]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -17802,8 +17780,7 @@ define <2 x i64> @ult_5_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483653,2147483653,2147483653,2147483653]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [5,5,5,5]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -17822,8 +17799,7 @@ define <2 x i64> @ult_5_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483653,2147483653,2147483653,2147483653]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [5,5,5,5]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -17920,7 +17896,6 @@ define <2 x i64> @ugt_5_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -17943,7 +17918,6 @@ define <2 x i64> @ugt_5_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -17962,7 +17936,6 @@ define <2 x i64> @ugt_5_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -17981,7 +17954,6 @@ define <2 x i64> @ugt_5_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -18074,8 +18046,7 @@ define <2 x i64> @ult_6_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483654,2147483654,2147483654,2147483654]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [6,6,6,6]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -18098,8 +18069,7 @@ define <2 x i64> @ult_6_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483654,2147483654,2147483654,2147483654]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [6,6,6,6]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -18118,8 +18088,7 @@ define <2 x i64> @ult_6_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483654,2147483654,2147483654,2147483654]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [6,6,6,6]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -18138,8 +18107,7 @@ define <2 x i64> @ult_6_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483654,2147483654,2147483654,2147483654]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [6,6,6,6]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -18236,7 +18204,6 @@ define <2 x i64> @ugt_6_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -18259,7 +18226,6 @@ define <2 x i64> @ugt_6_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -18278,7 +18244,6 @@ define <2 x i64> @ugt_6_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -18297,7 +18262,6 @@ define <2 x i64> @ugt_6_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -18390,8 +18354,7 @@ define <2 x i64> @ult_7_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483655,2147483655,2147483655,2147483655]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [7,7,7,7]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -18414,8 +18377,7 @@ define <2 x i64> @ult_7_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483655,2147483655,2147483655,2147483655]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [7,7,7,7]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -18434,8 +18396,7 @@ define <2 x i64> @ult_7_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483655,2147483655,2147483655,2147483655]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [7,7,7,7]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -18454,8 +18415,7 @@ define <2 x i64> @ult_7_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483655,2147483655,2147483655,2147483655]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [7,7,7,7]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -18552,7 +18512,6 @@ define <2 x i64> @ugt_7_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -18575,7 +18534,6 @@ define <2 x i64> @ugt_7_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -18594,7 +18552,6 @@ define <2 x i64> @ugt_7_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -18613,7 +18570,6 @@ define <2 x i64> @ugt_7_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -18706,8 +18662,7 @@ define <2 x i64> @ult_8_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483656,2147483656,2147483656,2147483656]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -18730,8 +18685,7 @@ define <2 x i64> @ult_8_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483656,2147483656,2147483656,2147483656]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -18750,8 +18704,7 @@ define <2 x i64> @ult_8_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483656,2147483656,2147483656,2147483656]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -18770,8 +18723,7 @@ define <2 x i64> @ult_8_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483656,2147483656,2147483656,2147483656]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [8,8,8,8]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -18868,7 +18820,6 @@ define <2 x i64> @ugt_8_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -18891,7 +18842,6 @@ define <2 x i64> @ugt_8_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -18910,7 +18860,6 @@ define <2 x i64> @ugt_8_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -18929,7 +18878,6 @@ define <2 x i64> @ugt_8_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -19022,8 +18970,7 @@ define <2 x i64> @ult_9_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483657,2147483657,2147483657,2147483657]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [9,9,9,9]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -19046,8 +18993,7 @@ define <2 x i64> @ult_9_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483657,2147483657,2147483657,2147483657]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [9,9,9,9]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -19066,8 +19012,7 @@ define <2 x i64> @ult_9_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483657,2147483657,2147483657,2147483657]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [9,9,9,9]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -19086,8 +19031,7 @@ define <2 x i64> @ult_9_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483657,2147483657,2147483657,2147483657]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [9,9,9,9]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -19184,7 +19128,6 @@ define <2 x i64> @ugt_9_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -19207,7 +19150,6 @@ define <2 x i64> @ugt_9_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -19226,7 +19168,6 @@ define <2 x i64> @ugt_9_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -19245,7 +19186,6 @@ define <2 x i64> @ugt_9_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -19338,8 +19278,7 @@ define <2 x i64> @ult_10_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483658,2147483658,2147483658,2147483658]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [10,10,10,10]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -19362,8 +19301,7 @@ define <2 x i64> @ult_10_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483658,2147483658,2147483658,2147483658]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [10,10,10,10]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -19382,8 +19320,7 @@ define <2 x i64> @ult_10_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483658,2147483658,2147483658,2147483658]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [10,10,10,10]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -19402,8 +19339,7 @@ define <2 x i64> @ult_10_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483658,2147483658,2147483658,2147483658]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [10,10,10,10]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -19500,7 +19436,6 @@ define <2 x i64> @ugt_10_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -19523,7 +19458,6 @@ define <2 x i64> @ugt_10_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -19542,7 +19476,6 @@ define <2 x i64> @ugt_10_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -19561,7 +19494,6 @@ define <2 x i64> @ugt_10_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -19654,8 +19586,7 @@ define <2 x i64> @ult_11_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483659,2147483659,2147483659,2147483659]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [11,11,11,11]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -19678,8 +19609,7 @@ define <2 x i64> @ult_11_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483659,2147483659,2147483659,2147483659]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [11,11,11,11]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -19698,8 +19628,7 @@ define <2 x i64> @ult_11_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483659,2147483659,2147483659,2147483659]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [11,11,11,11]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -19718,8 +19647,7 @@ define <2 x i64> @ult_11_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483659,2147483659,2147483659,2147483659]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [11,11,11,11]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -19816,7 +19744,6 @@ define <2 x i64> @ugt_11_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -19839,7 +19766,6 @@ define <2 x i64> @ugt_11_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -19858,7 +19784,6 @@ define <2 x i64> @ugt_11_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -19877,7 +19802,6 @@ define <2 x i64> @ugt_11_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -19970,8 +19894,7 @@ define <2 x i64> @ult_12_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483660,2147483660,2147483660,2147483660]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [12,12,12,12]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -19994,8 +19917,7 @@ define <2 x i64> @ult_12_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483660,2147483660,2147483660,2147483660]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [12,12,12,12]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -20014,8 +19936,7 @@ define <2 x i64> @ult_12_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483660,2147483660,2147483660,2147483660]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [12,12,12,12]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -20034,8 +19955,7 @@ define <2 x i64> @ult_12_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483660,2147483660,2147483660,2147483660]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [12,12,12,12]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -20132,7 +20052,6 @@ define <2 x i64> @ugt_12_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -20155,7 +20074,6 @@ define <2 x i64> @ugt_12_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -20174,7 +20092,6 @@ define <2 x i64> @ugt_12_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -20193,7 +20110,6 @@ define <2 x i64> @ugt_12_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -20286,8 +20202,7 @@ define <2 x i64> @ult_13_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483661,2147483661,2147483661,2147483661]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [13,13,13,13]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -20310,8 +20225,7 @@ define <2 x i64> @ult_13_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483661,2147483661,2147483661,2147483661]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [13,13,13,13]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -20330,8 +20244,7 @@ define <2 x i64> @ult_13_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483661,2147483661,2147483661,2147483661]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [13,13,13,13]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -20350,8 +20263,7 @@ define <2 x i64> @ult_13_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483661,2147483661,2147483661,2147483661]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [13,13,13,13]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -20448,7 +20360,6 @@ define <2 x i64> @ugt_13_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -20471,7 +20382,6 @@ define <2 x i64> @ugt_13_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -20490,7 +20400,6 @@ define <2 x i64> @ugt_13_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -20509,7 +20418,6 @@ define <2 x i64> @ugt_13_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -20602,8 +20510,7 @@ define <2 x i64> @ult_14_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483662,2147483662,2147483662,2147483662]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [14,14,14,14]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -20626,8 +20533,7 @@ define <2 x i64> @ult_14_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483662,2147483662,2147483662,2147483662]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [14,14,14,14]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -20646,8 +20552,7 @@ define <2 x i64> @ult_14_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483662,2147483662,2147483662,2147483662]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [14,14,14,14]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -20666,8 +20571,7 @@ define <2 x i64> @ult_14_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483662,2147483662,2147483662,2147483662]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [14,14,14,14]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -20764,7 +20668,6 @@ define <2 x i64> @ugt_14_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -20787,7 +20690,6 @@ define <2 x i64> @ugt_14_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -20806,7 +20708,6 @@ define <2 x i64> @ugt_14_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -20825,7 +20726,6 @@ define <2 x i64> @ugt_14_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -20918,8 +20818,7 @@ define <2 x i64> @ult_15_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483663,2147483663,2147483663,2147483663]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -20942,8 +20841,7 @@ define <2 x i64> @ult_15_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483663,2147483663,2147483663,2147483663]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -20962,8 +20860,7 @@ define <2 x i64> @ult_15_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483663,2147483663,2147483663,2147483663]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -20982,8 +20879,7 @@ define <2 x i64> @ult_15_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483663,2147483663,2147483663,2147483663]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [15,15,15,15]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -21080,7 +20976,6 @@ define <2 x i64> @ugt_15_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -21103,7 +20998,6 @@ define <2 x i64> @ugt_15_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -21122,7 +21016,6 @@ define <2 x i64> @ugt_15_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -21141,7 +21034,6 @@ define <2 x i64> @ugt_15_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -21234,8 +21126,7 @@ define <2 x i64> @ult_16_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483664,2147483664,2147483664,2147483664]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [16,16,16,16]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -21258,8 +21149,7 @@ define <2 x i64> @ult_16_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483664,2147483664,2147483664,2147483664]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [16,16,16,16]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -21278,8 +21168,7 @@ define <2 x i64> @ult_16_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483664,2147483664,2147483664,2147483664]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [16,16,16,16]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -21298,8 +21187,7 @@ define <2 x i64> @ult_16_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483664,2147483664,2147483664,2147483664]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [16,16,16,16]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -21396,7 +21284,6 @@ define <2 x i64> @ugt_16_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -21419,7 +21306,6 @@ define <2 x i64> @ugt_16_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -21438,7 +21324,6 @@ define <2 x i64> @ugt_16_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -21457,7 +21342,6 @@ define <2 x i64> @ugt_16_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -21550,8 +21434,7 @@ define <2 x i64> @ult_17_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483665,2147483665,2147483665,2147483665]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [17,17,17,17]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -21574,8 +21457,7 @@ define <2 x i64> @ult_17_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483665,2147483665,2147483665,2147483665]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [17,17,17,17]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -21594,8 +21476,7 @@ define <2 x i64> @ult_17_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483665,2147483665,2147483665,2147483665]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [17,17,17,17]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -21614,8 +21495,7 @@ define <2 x i64> @ult_17_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483665,2147483665,2147483665,2147483665]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [17,17,17,17]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -21712,7 +21592,6 @@ define <2 x i64> @ugt_17_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -21735,7 +21614,6 @@ define <2 x i64> @ugt_17_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -21754,7 +21632,6 @@ define <2 x i64> @ugt_17_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -21773,7 +21650,6 @@ define <2 x i64> @ugt_17_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -21866,8 +21742,7 @@ define <2 x i64> @ult_18_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483666,2147483666,2147483666,2147483666]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [18,18,18,18]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -21890,8 +21765,7 @@ define <2 x i64> @ult_18_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483666,2147483666,2147483666,2147483666]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [18,18,18,18]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -21910,8 +21784,7 @@ define <2 x i64> @ult_18_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483666,2147483666,2147483666,2147483666]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [18,18,18,18]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -21930,8 +21803,7 @@ define <2 x i64> @ult_18_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483666,2147483666,2147483666,2147483666]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [18,18,18,18]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -22028,7 +21900,6 @@ define <2 x i64> @ugt_18_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -22051,7 +21922,6 @@ define <2 x i64> @ugt_18_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -22070,7 +21940,6 @@ define <2 x i64> @ugt_18_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -22089,7 +21958,6 @@ define <2 x i64> @ugt_18_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -22182,8 +22050,7 @@ define <2 x i64> @ult_19_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483667,2147483667,2147483667,2147483667]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [19,19,19,19]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -22206,8 +22073,7 @@ define <2 x i64> @ult_19_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483667,2147483667,2147483667,2147483667]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [19,19,19,19]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -22226,8 +22092,7 @@ define <2 x i64> @ult_19_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483667,2147483667,2147483667,2147483667]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [19,19,19,19]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -22246,8 +22111,7 @@ define <2 x i64> @ult_19_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483667,2147483667,2147483667,2147483667]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [19,19,19,19]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -22344,7 +22208,6 @@ define <2 x i64> @ugt_19_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -22367,7 +22230,6 @@ define <2 x i64> @ugt_19_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -22386,7 +22248,6 @@ define <2 x i64> @ugt_19_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -22405,7 +22266,6 @@ define <2 x i64> @ugt_19_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -22498,8 +22358,7 @@ define <2 x i64> @ult_20_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483668,2147483668,2147483668,2147483668]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [20,20,20,20]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -22522,8 +22381,7 @@ define <2 x i64> @ult_20_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483668,2147483668,2147483668,2147483668]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [20,20,20,20]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -22542,8 +22400,7 @@ define <2 x i64> @ult_20_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483668,2147483668,2147483668,2147483668]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [20,20,20,20]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -22562,8 +22419,7 @@ define <2 x i64> @ult_20_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483668,2147483668,2147483668,2147483668]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [20,20,20,20]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -22660,7 +22516,6 @@ define <2 x i64> @ugt_20_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -22683,7 +22538,6 @@ define <2 x i64> @ugt_20_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -22702,7 +22556,6 @@ define <2 x i64> @ugt_20_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -22721,7 +22574,6 @@ define <2 x i64> @ugt_20_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -22814,8 +22666,7 @@ define <2 x i64> @ult_21_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483669,2147483669,2147483669,2147483669]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [21,21,21,21]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -22838,8 +22689,7 @@ define <2 x i64> @ult_21_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483669,2147483669,2147483669,2147483669]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [21,21,21,21]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -22858,8 +22708,7 @@ define <2 x i64> @ult_21_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483669,2147483669,2147483669,2147483669]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [21,21,21,21]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -22878,8 +22727,7 @@ define <2 x i64> @ult_21_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483669,2147483669,2147483669,2147483669]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [21,21,21,21]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -22976,7 +22824,6 @@ define <2 x i64> @ugt_21_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -22999,7 +22846,6 @@ define <2 x i64> @ugt_21_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -23018,7 +22864,6 @@ define <2 x i64> @ugt_21_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -23037,7 +22882,6 @@ define <2 x i64> @ugt_21_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -23130,8 +22974,7 @@ define <2 x i64> @ult_22_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483670,2147483670,2147483670,2147483670]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [22,22,22,22]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -23154,8 +22997,7 @@ define <2 x i64> @ult_22_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483670,2147483670,2147483670,2147483670]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [22,22,22,22]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -23174,8 +23016,7 @@ define <2 x i64> @ult_22_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483670,2147483670,2147483670,2147483670]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [22,22,22,22]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -23194,8 +23035,7 @@ define <2 x i64> @ult_22_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483670,2147483670,2147483670,2147483670]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [22,22,22,22]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -23292,7 +23132,6 @@ define <2 x i64> @ugt_22_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -23315,7 +23154,6 @@ define <2 x i64> @ugt_22_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -23334,7 +23172,6 @@ define <2 x i64> @ugt_22_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -23353,7 +23190,6 @@ define <2 x i64> @ugt_22_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -23446,8 +23282,7 @@ define <2 x i64> @ult_23_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483671,2147483671,2147483671,2147483671]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [23,23,23,23]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -23470,8 +23305,7 @@ define <2 x i64> @ult_23_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483671,2147483671,2147483671,2147483671]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [23,23,23,23]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -23490,8 +23324,7 @@ define <2 x i64> @ult_23_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483671,2147483671,2147483671,2147483671]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [23,23,23,23]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -23510,8 +23343,7 @@ define <2 x i64> @ult_23_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483671,2147483671,2147483671,2147483671]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [23,23,23,23]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -23608,7 +23440,6 @@ define <2 x i64> @ugt_23_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -23631,7 +23462,6 @@ define <2 x i64> @ugt_23_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -23650,7 +23480,6 @@ define <2 x i64> @ugt_23_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -23669,7 +23498,6 @@ define <2 x i64> @ugt_23_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -23762,8 +23590,7 @@ define <2 x i64> @ult_24_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483672,2147483672,2147483672,2147483672]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [24,24,24,24]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -23786,8 +23613,7 @@ define <2 x i64> @ult_24_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483672,2147483672,2147483672,2147483672]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [24,24,24,24]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -23806,8 +23632,7 @@ define <2 x i64> @ult_24_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483672,2147483672,2147483672,2147483672]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [24,24,24,24]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -23826,8 +23651,7 @@ define <2 x i64> @ult_24_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483672,2147483672,2147483672,2147483672]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [24,24,24,24]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -23924,7 +23748,6 @@ define <2 x i64> @ugt_24_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -23947,7 +23770,6 @@ define <2 x i64> @ugt_24_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -23966,7 +23788,6 @@ define <2 x i64> @ugt_24_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -23985,7 +23806,6 @@ define <2 x i64> @ugt_24_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -24078,8 +23898,7 @@ define <2 x i64> @ult_25_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483673,2147483673,2147483673,2147483673]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [25,25,25,25]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -24102,8 +23921,7 @@ define <2 x i64> @ult_25_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483673,2147483673,2147483673,2147483673]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [25,25,25,25]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -24122,8 +23940,7 @@ define <2 x i64> @ult_25_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483673,2147483673,2147483673,2147483673]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [25,25,25,25]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -24142,8 +23959,7 @@ define <2 x i64> @ult_25_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483673,2147483673,2147483673,2147483673]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [25,25,25,25]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -24240,7 +24056,6 @@ define <2 x i64> @ugt_25_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -24263,7 +24078,6 @@ define <2 x i64> @ugt_25_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -24282,7 +24096,6 @@ define <2 x i64> @ugt_25_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -24301,7 +24114,6 @@ define <2 x i64> @ugt_25_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -24394,8 +24206,7 @@ define <2 x i64> @ult_26_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483674,2147483674,2147483674,2147483674]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [26,26,26,26]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -24418,8 +24229,7 @@ define <2 x i64> @ult_26_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483674,2147483674,2147483674,2147483674]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [26,26,26,26]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -24438,8 +24248,7 @@ define <2 x i64> @ult_26_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483674,2147483674,2147483674,2147483674]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [26,26,26,26]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -24458,8 +24267,7 @@ define <2 x i64> @ult_26_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483674,2147483674,2147483674,2147483674]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [26,26,26,26]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -24556,7 +24364,6 @@ define <2 x i64> @ugt_26_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -24579,7 +24386,6 @@ define <2 x i64> @ugt_26_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -24598,7 +24404,6 @@ define <2 x i64> @ugt_26_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -24617,7 +24422,6 @@ define <2 x i64> @ugt_26_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -24710,8 +24514,7 @@ define <2 x i64> @ult_27_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483675,2147483675,2147483675,2147483675]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [27,27,27,27]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -24734,8 +24537,7 @@ define <2 x i64> @ult_27_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483675,2147483675,2147483675,2147483675]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [27,27,27,27]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -24754,8 +24556,7 @@ define <2 x i64> @ult_27_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483675,2147483675,2147483675,2147483675]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [27,27,27,27]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -24774,8 +24575,7 @@ define <2 x i64> @ult_27_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483675,2147483675,2147483675,2147483675]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [27,27,27,27]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -24872,7 +24672,6 @@ define <2 x i64> @ugt_27_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -24895,7 +24694,6 @@ define <2 x i64> @ugt_27_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -24914,7 +24712,6 @@ define <2 x i64> @ugt_27_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -24933,7 +24730,6 @@ define <2 x i64> @ugt_27_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -25026,8 +24822,7 @@ define <2 x i64> @ult_28_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483676,2147483676,2147483676,2147483676]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [28,28,28,28]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -25050,8 +24845,7 @@ define <2 x i64> @ult_28_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483676,2147483676,2147483676,2147483676]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [28,28,28,28]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -25070,8 +24864,7 @@ define <2 x i64> @ult_28_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483676,2147483676,2147483676,2147483676]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [28,28,28,28]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -25090,8 +24883,7 @@ define <2 x i64> @ult_28_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483676,2147483676,2147483676,2147483676]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [28,28,28,28]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -25188,7 +24980,6 @@ define <2 x i64> @ugt_28_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -25211,7 +25002,6 @@ define <2 x i64> @ugt_28_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -25230,7 +25020,6 @@ define <2 x i64> @ugt_28_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -25249,7 +25038,6 @@ define <2 x i64> @ugt_28_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -25342,8 +25130,7 @@ define <2 x i64> @ult_29_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483677,2147483677,2147483677,2147483677]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [29,29,29,29]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -25366,8 +25153,7 @@ define <2 x i64> @ult_29_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483677,2147483677,2147483677,2147483677]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [29,29,29,29]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -25386,8 +25172,7 @@ define <2 x i64> @ult_29_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483677,2147483677,2147483677,2147483677]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [29,29,29,29]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -25406,8 +25191,7 @@ define <2 x i64> @ult_29_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483677,2147483677,2147483677,2147483677]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [29,29,29,29]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -25504,7 +25288,6 @@ define <2 x i64> @ugt_29_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -25527,7 +25310,6 @@ define <2 x i64> @ugt_29_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -25546,7 +25328,6 @@ define <2 x i64> @ugt_29_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -25565,7 +25346,6 @@ define <2 x i64> @ugt_29_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -25658,8 +25438,7 @@ define <2 x i64> @ult_30_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483678,2147483678,2147483678,2147483678]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [30,30,30,30]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -25682,8 +25461,7 @@ define <2 x i64> @ult_30_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483678,2147483678,2147483678,2147483678]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [30,30,30,30]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -25702,8 +25480,7 @@ define <2 x i64> @ult_30_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483678,2147483678,2147483678,2147483678]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [30,30,30,30]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -25722,8 +25499,7 @@ define <2 x i64> @ult_30_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483678,2147483678,2147483678,2147483678]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [30,30,30,30]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -25820,7 +25596,6 @@ define <2 x i64> @ugt_30_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -25843,7 +25618,6 @@ define <2 x i64> @ugt_30_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -25862,7 +25636,6 @@ define <2 x i64> @ugt_30_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -25881,7 +25654,6 @@ define <2 x i64> @ugt_30_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -25974,8 +25746,7 @@ define <2 x i64> @ult_31_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483679,2147483679,2147483679,2147483679]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [31,31,31,31]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -25998,8 +25769,7 @@ define <2 x i64> @ult_31_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483679,2147483679,2147483679,2147483679]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [31,31,31,31]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -26018,8 +25788,7 @@ define <2 x i64> @ult_31_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483679,2147483679,2147483679,2147483679]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [31,31,31,31]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -26038,8 +25807,7 @@ define <2 x i64> @ult_31_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483679,2147483679,2147483679,2147483679]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [31,31,31,31]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -26136,7 +25904,6 @@ define <2 x i64> @ugt_31_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -26159,7 +25926,6 @@ define <2 x i64> @ugt_31_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -26178,7 +25944,6 @@ define <2 x i64> @ugt_31_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -26197,7 +25962,6 @@ define <2 x i64> @ugt_31_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -26290,8 +26054,7 @@ define <2 x i64> @ult_32_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483680,2147483680,2147483680,2147483680]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [32,32,32,32]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -26314,8 +26077,7 @@ define <2 x i64> @ult_32_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483680,2147483680,2147483680,2147483680]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [32,32,32,32]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -26334,8 +26096,7 @@ define <2 x i64> @ult_32_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483680,2147483680,2147483680,2147483680]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [32,32,32,32]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -26354,8 +26115,7 @@ define <2 x i64> @ult_32_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483680,2147483680,2147483680,2147483680]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [32,32,32,32]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -26452,7 +26212,6 @@ define <2 x i64> @ugt_32_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -26475,7 +26234,6 @@ define <2 x i64> @ugt_32_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -26494,7 +26252,6 @@ define <2 x i64> @ugt_32_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -26513,7 +26270,6 @@ define <2 x i64> @ugt_32_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -26606,8 +26362,7 @@ define <2 x i64> @ult_33_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483681,2147483681,2147483681,2147483681]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [33,33,33,33]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -26630,8 +26385,7 @@ define <2 x i64> @ult_33_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483681,2147483681,2147483681,2147483681]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [33,33,33,33]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -26650,8 +26404,7 @@ define <2 x i64> @ult_33_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483681,2147483681,2147483681,2147483681]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [33,33,33,33]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -26670,8 +26423,7 @@ define <2 x i64> @ult_33_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483681,2147483681,2147483681,2147483681]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [33,33,33,33]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -26768,7 +26520,6 @@ define <2 x i64> @ugt_33_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -26791,7 +26542,6 @@ define <2 x i64> @ugt_33_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -26810,7 +26560,6 @@ define <2 x i64> @ugt_33_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -26829,7 +26578,6 @@ define <2 x i64> @ugt_33_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -26922,8 +26670,7 @@ define <2 x i64> @ult_34_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483682,2147483682,2147483682,2147483682]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [34,34,34,34]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -26946,8 +26693,7 @@ define <2 x i64> @ult_34_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483682,2147483682,2147483682,2147483682]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [34,34,34,34]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -26966,8 +26712,7 @@ define <2 x i64> @ult_34_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483682,2147483682,2147483682,2147483682]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [34,34,34,34]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -26986,8 +26731,7 @@ define <2 x i64> @ult_34_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483682,2147483682,2147483682,2147483682]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [34,34,34,34]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -27084,7 +26828,6 @@ define <2 x i64> @ugt_34_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -27107,7 +26850,6 @@ define <2 x i64> @ugt_34_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -27126,7 +26868,6 @@ define <2 x i64> @ugt_34_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -27145,7 +26886,6 @@ define <2 x i64> @ugt_34_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -27238,8 +26978,7 @@ define <2 x i64> @ult_35_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483683,2147483683,2147483683,2147483683]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [35,35,35,35]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -27262,8 +27001,7 @@ define <2 x i64> @ult_35_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483683,2147483683,2147483683,2147483683]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [35,35,35,35]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -27282,8 +27020,7 @@ define <2 x i64> @ult_35_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483683,2147483683,2147483683,2147483683]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [35,35,35,35]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -27302,8 +27039,7 @@ define <2 x i64> @ult_35_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483683,2147483683,2147483683,2147483683]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [35,35,35,35]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -27400,7 +27136,6 @@ define <2 x i64> @ugt_35_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -27423,7 +27158,6 @@ define <2 x i64> @ugt_35_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -27442,7 +27176,6 @@ define <2 x i64> @ugt_35_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -27461,7 +27194,6 @@ define <2 x i64> @ugt_35_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -27554,8 +27286,7 @@ define <2 x i64> @ult_36_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483684,2147483684,2147483684,2147483684]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [36,36,36,36]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -27578,8 +27309,7 @@ define <2 x i64> @ult_36_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483684,2147483684,2147483684,2147483684]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [36,36,36,36]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -27598,8 +27328,7 @@ define <2 x i64> @ult_36_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483684,2147483684,2147483684,2147483684]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [36,36,36,36]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -27618,8 +27347,7 @@ define <2 x i64> @ult_36_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483684,2147483684,2147483684,2147483684]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [36,36,36,36]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -27716,7 +27444,6 @@ define <2 x i64> @ugt_36_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -27739,7 +27466,6 @@ define <2 x i64> @ugt_36_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -27758,7 +27484,6 @@ define <2 x i64> @ugt_36_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -27777,7 +27502,6 @@ define <2 x i64> @ugt_36_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -27870,8 +27594,7 @@ define <2 x i64> @ult_37_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483685,2147483685,2147483685,2147483685]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [37,37,37,37]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -27894,8 +27617,7 @@ define <2 x i64> @ult_37_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483685,2147483685,2147483685,2147483685]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [37,37,37,37]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -27914,8 +27636,7 @@ define <2 x i64> @ult_37_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483685,2147483685,2147483685,2147483685]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [37,37,37,37]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -27934,8 +27655,7 @@ define <2 x i64> @ult_37_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483685,2147483685,2147483685,2147483685]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [37,37,37,37]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -28032,7 +27752,6 @@ define <2 x i64> @ugt_37_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -28055,7 +27774,6 @@ define <2 x i64> @ugt_37_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -28074,7 +27792,6 @@ define <2 x i64> @ugt_37_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -28093,7 +27810,6 @@ define <2 x i64> @ugt_37_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -28186,8 +27902,7 @@ define <2 x i64> @ult_38_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483686,2147483686,2147483686,2147483686]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [38,38,38,38]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -28210,8 +27925,7 @@ define <2 x i64> @ult_38_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483686,2147483686,2147483686,2147483686]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [38,38,38,38]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -28230,8 +27944,7 @@ define <2 x i64> @ult_38_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483686,2147483686,2147483686,2147483686]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [38,38,38,38]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -28250,8 +27963,7 @@ define <2 x i64> @ult_38_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483686,2147483686,2147483686,2147483686]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [38,38,38,38]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -28348,7 +28060,6 @@ define <2 x i64> @ugt_38_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -28371,7 +28082,6 @@ define <2 x i64> @ugt_38_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -28390,7 +28100,6 @@ define <2 x i64> @ugt_38_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -28409,7 +28118,6 @@ define <2 x i64> @ugt_38_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -28502,8 +28210,7 @@ define <2 x i64> @ult_39_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483687,2147483687,2147483687,2147483687]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [39,39,39,39]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -28526,8 +28233,7 @@ define <2 x i64> @ult_39_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483687,2147483687,2147483687,2147483687]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [39,39,39,39]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -28546,8 +28252,7 @@ define <2 x i64> @ult_39_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483687,2147483687,2147483687,2147483687]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [39,39,39,39]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -28566,8 +28271,7 @@ define <2 x i64> @ult_39_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483687,2147483687,2147483687,2147483687]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [39,39,39,39]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -28664,7 +28368,6 @@ define <2 x i64> @ugt_39_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -28687,7 +28390,6 @@ define <2 x i64> @ugt_39_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -28706,7 +28408,6 @@ define <2 x i64> @ugt_39_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -28725,7 +28426,6 @@ define <2 x i64> @ugt_39_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -28818,8 +28518,7 @@ define <2 x i64> @ult_40_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483688,2147483688,2147483688,2147483688]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [40,40,40,40]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -28842,8 +28541,7 @@ define <2 x i64> @ult_40_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483688,2147483688,2147483688,2147483688]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [40,40,40,40]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -28862,8 +28560,7 @@ define <2 x i64> @ult_40_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483688,2147483688,2147483688,2147483688]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [40,40,40,40]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -28882,8 +28579,7 @@ define <2 x i64> @ult_40_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483688,2147483688,2147483688,2147483688]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [40,40,40,40]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -28980,7 +28676,6 @@ define <2 x i64> @ugt_40_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -29003,7 +28698,6 @@ define <2 x i64> @ugt_40_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -29022,7 +28716,6 @@ define <2 x i64> @ugt_40_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -29041,7 +28734,6 @@ define <2 x i64> @ugt_40_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -29134,8 +28826,7 @@ define <2 x i64> @ult_41_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483689,2147483689,2147483689,2147483689]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [41,41,41,41]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -29158,8 +28849,7 @@ define <2 x i64> @ult_41_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483689,2147483689,2147483689,2147483689]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [41,41,41,41]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -29178,8 +28868,7 @@ define <2 x i64> @ult_41_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483689,2147483689,2147483689,2147483689]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [41,41,41,41]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -29198,8 +28887,7 @@ define <2 x i64> @ult_41_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483689,2147483689,2147483689,2147483689]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [41,41,41,41]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -29296,7 +28984,6 @@ define <2 x i64> @ugt_41_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -29319,7 +29006,6 @@ define <2 x i64> @ugt_41_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -29338,7 +29024,6 @@ define <2 x i64> @ugt_41_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -29357,7 +29042,6 @@ define <2 x i64> @ugt_41_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -29450,8 +29134,7 @@ define <2 x i64> @ult_42_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483690,2147483690,2147483690,2147483690]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [42,42,42,42]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -29474,8 +29157,7 @@ define <2 x i64> @ult_42_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483690,2147483690,2147483690,2147483690]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [42,42,42,42]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -29494,8 +29176,7 @@ define <2 x i64> @ult_42_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483690,2147483690,2147483690,2147483690]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [42,42,42,42]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -29514,8 +29195,7 @@ define <2 x i64> @ult_42_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483690,2147483690,2147483690,2147483690]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [42,42,42,42]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -29612,7 +29292,6 @@ define <2 x i64> @ugt_42_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -29635,7 +29314,6 @@ define <2 x i64> @ugt_42_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -29654,7 +29332,6 @@ define <2 x i64> @ugt_42_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -29673,7 +29350,6 @@ define <2 x i64> @ugt_42_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -29766,8 +29442,7 @@ define <2 x i64> @ult_43_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483691,2147483691,2147483691,2147483691]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [43,43,43,43]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -29790,8 +29465,7 @@ define <2 x i64> @ult_43_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483691,2147483691,2147483691,2147483691]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [43,43,43,43]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -29810,8 +29484,7 @@ define <2 x i64> @ult_43_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483691,2147483691,2147483691,2147483691]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [43,43,43,43]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -29830,8 +29503,7 @@ define <2 x i64> @ult_43_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483691,2147483691,2147483691,2147483691]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [43,43,43,43]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -29928,7 +29600,6 @@ define <2 x i64> @ugt_43_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -29951,7 +29622,6 @@ define <2 x i64> @ugt_43_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -29970,7 +29640,6 @@ define <2 x i64> @ugt_43_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -29989,7 +29658,6 @@ define <2 x i64> @ugt_43_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -30082,8 +29750,7 @@ define <2 x i64> @ult_44_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483692,2147483692,2147483692,2147483692]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [44,44,44,44]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -30106,8 +29773,7 @@ define <2 x i64> @ult_44_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483692,2147483692,2147483692,2147483692]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [44,44,44,44]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -30126,8 +29792,7 @@ define <2 x i64> @ult_44_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483692,2147483692,2147483692,2147483692]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [44,44,44,44]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -30146,8 +29811,7 @@ define <2 x i64> @ult_44_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483692,2147483692,2147483692,2147483692]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [44,44,44,44]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -30244,7 +29908,6 @@ define <2 x i64> @ugt_44_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -30267,7 +29930,6 @@ define <2 x i64> @ugt_44_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -30286,7 +29948,6 @@ define <2 x i64> @ugt_44_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -30305,7 +29966,6 @@ define <2 x i64> @ugt_44_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -30398,8 +30058,7 @@ define <2 x i64> @ult_45_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483693,2147483693,2147483693,2147483693]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [45,45,45,45]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -30422,8 +30081,7 @@ define <2 x i64> @ult_45_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483693,2147483693,2147483693,2147483693]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [45,45,45,45]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -30442,8 +30100,7 @@ define <2 x i64> @ult_45_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483693,2147483693,2147483693,2147483693]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [45,45,45,45]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -30462,8 +30119,7 @@ define <2 x i64> @ult_45_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483693,2147483693,2147483693,2147483693]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [45,45,45,45]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -30560,7 +30216,6 @@ define <2 x i64> @ugt_45_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -30583,7 +30238,6 @@ define <2 x i64> @ugt_45_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -30602,7 +30256,6 @@ define <2 x i64> @ugt_45_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -30621,7 +30274,6 @@ define <2 x i64> @ugt_45_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -30714,8 +30366,7 @@ define <2 x i64> @ult_46_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483694,2147483694,2147483694,2147483694]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [46,46,46,46]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -30738,8 +30389,7 @@ define <2 x i64> @ult_46_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483694,2147483694,2147483694,2147483694]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [46,46,46,46]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -30758,8 +30408,7 @@ define <2 x i64> @ult_46_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483694,2147483694,2147483694,2147483694]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [46,46,46,46]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -30778,8 +30427,7 @@ define <2 x i64> @ult_46_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483694,2147483694,2147483694,2147483694]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [46,46,46,46]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -30876,7 +30524,6 @@ define <2 x i64> @ugt_46_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -30899,7 +30546,6 @@ define <2 x i64> @ugt_46_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -30918,7 +30564,6 @@ define <2 x i64> @ugt_46_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -30937,7 +30582,6 @@ define <2 x i64> @ugt_46_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -31030,8 +30674,7 @@ define <2 x i64> @ult_47_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483695,2147483695,2147483695,2147483695]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [47,47,47,47]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -31054,8 +30697,7 @@ define <2 x i64> @ult_47_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483695,2147483695,2147483695,2147483695]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [47,47,47,47]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -31074,8 +30716,7 @@ define <2 x i64> @ult_47_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483695,2147483695,2147483695,2147483695]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [47,47,47,47]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -31094,8 +30735,7 @@ define <2 x i64> @ult_47_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483695,2147483695,2147483695,2147483695]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [47,47,47,47]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -31192,7 +30832,6 @@ define <2 x i64> @ugt_47_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -31215,7 +30854,6 @@ define <2 x i64> @ugt_47_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -31234,7 +30872,6 @@ define <2 x i64> @ugt_47_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -31253,7 +30890,6 @@ define <2 x i64> @ugt_47_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -31346,8 +30982,7 @@ define <2 x i64> @ult_48_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483696,2147483696,2147483696,2147483696]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [48,48,48,48]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -31370,8 +31005,7 @@ define <2 x i64> @ult_48_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483696,2147483696,2147483696,2147483696]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [48,48,48,48]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -31390,8 +31024,7 @@ define <2 x i64> @ult_48_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483696,2147483696,2147483696,2147483696]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [48,48,48,48]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -31410,8 +31043,7 @@ define <2 x i64> @ult_48_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483696,2147483696,2147483696,2147483696]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [48,48,48,48]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -31508,7 +31140,6 @@ define <2 x i64> @ugt_48_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -31531,7 +31162,6 @@ define <2 x i64> @ugt_48_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -31550,7 +31180,6 @@ define <2 x i64> @ugt_48_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -31569,7 +31198,6 @@ define <2 x i64> @ugt_48_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -31662,8 +31290,7 @@ define <2 x i64> @ult_49_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483697,2147483697,2147483697,2147483697]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [49,49,49,49]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -31686,8 +31313,7 @@ define <2 x i64> @ult_49_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483697,2147483697,2147483697,2147483697]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [49,49,49,49]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -31706,8 +31332,7 @@ define <2 x i64> @ult_49_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483697,2147483697,2147483697,2147483697]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [49,49,49,49]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -31726,8 +31351,7 @@ define <2 x i64> @ult_49_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483697,2147483697,2147483697,2147483697]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [49,49,49,49]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -31824,7 +31448,6 @@ define <2 x i64> @ugt_49_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -31847,7 +31470,6 @@ define <2 x i64> @ugt_49_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -31866,7 +31488,6 @@ define <2 x i64> @ugt_49_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -31885,7 +31506,6 @@ define <2 x i64> @ugt_49_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -31978,8 +31598,7 @@ define <2 x i64> @ult_50_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483698,2147483698,2147483698,2147483698]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [50,50,50,50]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -32002,8 +31621,7 @@ define <2 x i64> @ult_50_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483698,2147483698,2147483698,2147483698]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [50,50,50,50]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -32022,8 +31640,7 @@ define <2 x i64> @ult_50_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483698,2147483698,2147483698,2147483698]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [50,50,50,50]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -32042,8 +31659,7 @@ define <2 x i64> @ult_50_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483698,2147483698,2147483698,2147483698]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [50,50,50,50]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -32140,7 +31756,6 @@ define <2 x i64> @ugt_50_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -32163,7 +31778,6 @@ define <2 x i64> @ugt_50_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -32182,7 +31796,6 @@ define <2 x i64> @ugt_50_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -32201,7 +31814,6 @@ define <2 x i64> @ugt_50_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -32294,8 +31906,7 @@ define <2 x i64> @ult_51_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483699,2147483699,2147483699,2147483699]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -32318,8 +31929,7 @@ define <2 x i64> @ult_51_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483699,2147483699,2147483699,2147483699]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -32338,8 +31948,7 @@ define <2 x i64> @ult_51_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483699,2147483699,2147483699,2147483699]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -32358,8 +31967,7 @@ define <2 x i64> @ult_51_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483699,2147483699,2147483699,2147483699]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [51,51,51,51]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -32456,7 +32064,6 @@ define <2 x i64> @ugt_51_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -32479,7 +32086,6 @@ define <2 x i64> @ugt_51_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -32498,7 +32104,6 @@ define <2 x i64> @ugt_51_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -32517,7 +32122,6 @@ define <2 x i64> @ugt_51_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -32610,8 +32214,7 @@ define <2 x i64> @ult_52_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483700,2147483700,2147483700,2147483700]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [52,52,52,52]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -32634,8 +32237,7 @@ define <2 x i64> @ult_52_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483700,2147483700,2147483700,2147483700]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [52,52,52,52]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -32654,8 +32256,7 @@ define <2 x i64> @ult_52_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483700,2147483700,2147483700,2147483700]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [52,52,52,52]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -32674,8 +32275,7 @@ define <2 x i64> @ult_52_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483700,2147483700,2147483700,2147483700]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [52,52,52,52]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -32772,7 +32372,6 @@ define <2 x i64> @ugt_52_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -32795,7 +32394,6 @@ define <2 x i64> @ugt_52_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -32814,7 +32412,6 @@ define <2 x i64> @ugt_52_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -32833,7 +32430,6 @@ define <2 x i64> @ugt_52_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -32926,8 +32522,7 @@ define <2 x i64> @ult_53_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483701,2147483701,2147483701,2147483701]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [53,53,53,53]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -32950,8 +32545,7 @@ define <2 x i64> @ult_53_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483701,2147483701,2147483701,2147483701]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [53,53,53,53]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -32970,8 +32564,7 @@ define <2 x i64> @ult_53_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483701,2147483701,2147483701,2147483701]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [53,53,53,53]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -32990,8 +32583,7 @@ define <2 x i64> @ult_53_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483701,2147483701,2147483701,2147483701]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [53,53,53,53]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -33088,7 +32680,6 @@ define <2 x i64> @ugt_53_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -33111,7 +32702,6 @@ define <2 x i64> @ugt_53_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -33130,7 +32720,6 @@ define <2 x i64> @ugt_53_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -33149,7 +32738,6 @@ define <2 x i64> @ugt_53_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -33242,8 +32830,7 @@ define <2 x i64> @ult_54_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483702,2147483702,2147483702,2147483702]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [54,54,54,54]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -33266,8 +32853,7 @@ define <2 x i64> @ult_54_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483702,2147483702,2147483702,2147483702]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [54,54,54,54]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -33286,8 +32872,7 @@ define <2 x i64> @ult_54_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483702,2147483702,2147483702,2147483702]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [54,54,54,54]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -33306,8 +32891,7 @@ define <2 x i64> @ult_54_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483702,2147483702,2147483702,2147483702]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [54,54,54,54]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -33404,7 +32988,6 @@ define <2 x i64> @ugt_54_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -33427,7 +33010,6 @@ define <2 x i64> @ugt_54_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -33446,7 +33028,6 @@ define <2 x i64> @ugt_54_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -33465,7 +33046,6 @@ define <2 x i64> @ugt_54_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -33558,8 +33138,7 @@ define <2 x i64> @ult_55_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483703,2147483703,2147483703,2147483703]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [55,55,55,55]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -33582,8 +33161,7 @@ define <2 x i64> @ult_55_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483703,2147483703,2147483703,2147483703]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [55,55,55,55]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -33602,8 +33180,7 @@ define <2 x i64> @ult_55_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483703,2147483703,2147483703,2147483703]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [55,55,55,55]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -33622,8 +33199,7 @@ define <2 x i64> @ult_55_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483703,2147483703,2147483703,2147483703]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [55,55,55,55]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -33720,7 +33296,6 @@ define <2 x i64> @ugt_55_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -33743,7 +33318,6 @@ define <2 x i64> @ugt_55_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -33762,7 +33336,6 @@ define <2 x i64> @ugt_55_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -33781,7 +33354,6 @@ define <2 x i64> @ugt_55_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -33874,8 +33446,7 @@ define <2 x i64> @ult_56_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483704,2147483704,2147483704,2147483704]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [56,56,56,56]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -33898,8 +33469,7 @@ define <2 x i64> @ult_56_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483704,2147483704,2147483704,2147483704]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [56,56,56,56]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -33918,8 +33488,7 @@ define <2 x i64> @ult_56_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483704,2147483704,2147483704,2147483704]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [56,56,56,56]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -33938,8 +33507,7 @@ define <2 x i64> @ult_56_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483704,2147483704,2147483704,2147483704]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [56,56,56,56]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -34036,7 +33604,6 @@ define <2 x i64> @ugt_56_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -34059,7 +33626,6 @@ define <2 x i64> @ugt_56_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -34078,7 +33644,6 @@ define <2 x i64> @ugt_56_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -34097,7 +33662,6 @@ define <2 x i64> @ugt_56_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -34190,8 +33754,7 @@ define <2 x i64> @ult_57_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483705,2147483705,2147483705,2147483705]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [57,57,57,57]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -34214,8 +33777,7 @@ define <2 x i64> @ult_57_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483705,2147483705,2147483705,2147483705]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [57,57,57,57]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -34234,8 +33796,7 @@ define <2 x i64> @ult_57_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483705,2147483705,2147483705,2147483705]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [57,57,57,57]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -34254,8 +33815,7 @@ define <2 x i64> @ult_57_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483705,2147483705,2147483705,2147483705]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [57,57,57,57]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -34352,7 +33912,6 @@ define <2 x i64> @ugt_57_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -34375,7 +33934,6 @@ define <2 x i64> @ugt_57_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -34394,7 +33952,6 @@ define <2 x i64> @ugt_57_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -34413,7 +33970,6 @@ define <2 x i64> @ugt_57_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -34506,8 +34062,7 @@ define <2 x i64> @ult_58_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483706,2147483706,2147483706,2147483706]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [58,58,58,58]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -34530,8 +34085,7 @@ define <2 x i64> @ult_58_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483706,2147483706,2147483706,2147483706]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [58,58,58,58]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -34550,8 +34104,7 @@ define <2 x i64> @ult_58_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483706,2147483706,2147483706,2147483706]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [58,58,58,58]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -34570,8 +34123,7 @@ define <2 x i64> @ult_58_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483706,2147483706,2147483706,2147483706]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [58,58,58,58]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -34668,7 +34220,6 @@ define <2 x i64> @ugt_58_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -34691,7 +34242,6 @@ define <2 x i64> @ugt_58_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -34710,7 +34260,6 @@ define <2 x i64> @ugt_58_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -34729,7 +34278,6 @@ define <2 x i64> @ugt_58_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -34822,8 +34370,7 @@ define <2 x i64> @ult_59_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483707,2147483707,2147483707,2147483707]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [59,59,59,59]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -34846,8 +34393,7 @@ define <2 x i64> @ult_59_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483707,2147483707,2147483707,2147483707]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [59,59,59,59]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -34866,8 +34412,7 @@ define <2 x i64> @ult_59_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483707,2147483707,2147483707,2147483707]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [59,59,59,59]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -34886,8 +34431,7 @@ define <2 x i64> @ult_59_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483707,2147483707,2147483707,2147483707]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [59,59,59,59]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -34984,7 +34528,6 @@ define <2 x i64> @ugt_59_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -35007,7 +34550,6 @@ define <2 x i64> @ugt_59_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -35026,7 +34568,6 @@ define <2 x i64> @ugt_59_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -35045,7 +34586,6 @@ define <2 x i64> @ugt_59_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -35138,8 +34678,7 @@ define <2 x i64> @ult_60_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483708,2147483708,2147483708,2147483708]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [60,60,60,60]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -35162,8 +34701,7 @@ define <2 x i64> @ult_60_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483708,2147483708,2147483708,2147483708]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [60,60,60,60]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -35182,8 +34720,7 @@ define <2 x i64> @ult_60_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483708,2147483708,2147483708,2147483708]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [60,60,60,60]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -35202,8 +34739,7 @@ define <2 x i64> @ult_60_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483708,2147483708,2147483708,2147483708]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [60,60,60,60]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -35300,7 +34836,6 @@ define <2 x i64> @ugt_60_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -35323,7 +34858,6 @@ define <2 x i64> @ugt_60_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -35342,7 +34876,6 @@ define <2 x i64> @ugt_60_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -35361,7 +34894,6 @@ define <2 x i64> @ugt_60_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -35454,8 +34986,7 @@ define <2 x i64> @ult_61_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483709,2147483709,2147483709,2147483709]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [61,61,61,61]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -35478,8 +35009,7 @@ define <2 x i64> @ult_61_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483709,2147483709,2147483709,2147483709]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [61,61,61,61]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -35498,8 +35028,7 @@ define <2 x i64> @ult_61_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483709,2147483709,2147483709,2147483709]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [61,61,61,61]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -35518,8 +35047,7 @@ define <2 x i64> @ult_61_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483709,2147483709,2147483709,2147483709]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [61,61,61,61]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -35616,7 +35144,6 @@ define <2 x i64> @ugt_61_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -35639,7 +35166,6 @@ define <2 x i64> @ugt_61_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -35658,7 +35184,6 @@ define <2 x i64> @ugt_61_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -35677,7 +35202,6 @@ define <2 x i64> @ugt_61_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -35770,8 +35294,7 @@ define <2 x i64> @ult_62_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483710,2147483710,2147483710,2147483710]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [62,62,62,62]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -35794,8 +35317,7 @@ define <2 x i64> @ult_62_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483710,2147483710,2147483710,2147483710]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [62,62,62,62]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -35814,8 +35336,7 @@ define <2 x i64> @ult_62_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483710,2147483710,2147483710,2147483710]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [62,62,62,62]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -35834,8 +35355,7 @@ define <2 x i64> @ult_62_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483710,2147483710,2147483710,2147483710]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [62,62,62,62]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -35932,7 +35452,6 @@ define <2 x i64> @ugt_62_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -35955,7 +35474,6 @@ define <2 x i64> @ugt_62_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -35974,7 +35492,6 @@ define <2 x i64> @ugt_62_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
@@ -35993,7 +35510,6 @@ define <2 x i64> @ugt_62_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: retq
;
@@ -36086,8 +35602,7 @@ define <2 x i64> @ult_63_v2i64(<2 x i64> %0) {
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483711,2147483711,2147483711,2147483711]
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [63,63,63,63]
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -36110,8 +35625,7 @@ define <2 x i64> @ult_63_v2i64(<2 x i64> %0) {
; SSE3-NEXT: pxor %xmm0, %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483711,2147483711,2147483711,2147483711]
+; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [63,63,63,63]
; SSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE3-NEXT: retq
;
@@ -36130,8 +35644,7 @@ define <2 x i64> @ult_63_v2i64(<2 x i64> %0) {
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: psadbw %xmm3, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSSE3-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483711,2147483711,2147483711,2147483711]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [63,63,63,63]
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -36150,8 +35663,7 @@ define <2 x i64> @ult_63_v2i64(<2 x i64> %0) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: psadbw %xmm3, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483711,2147483711,2147483711,2147483711]
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [63,63,63,63]
; SSE41-NEXT: pcmpgtd %xmm1, %xmm0
; SSE41-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll b/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
index 71c4427da962..24113441a4e2 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
@@ -413,14 +413,8 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512F-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512F-NEXT: movzwl %ax, %eax
-; AVX512F-NEXT: vmovd %eax, %xmm2
-; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX512F-NEXT: vpextrw $0, %xmm1, %eax
-; AVX512F-NEXT: movzwl %ax, %eax
-; AVX512F-NEXT: vmovd %eax, %xmm3
-; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm2
+; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm3
; AVX512F-NEXT: xorl %eax, %eax
; AVX512F-NEXT: vucomiss %xmm3, %xmm2
; AVX512F-NEXT: movl $255, %ecx
@@ -434,14 +428,8 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
; AVX512VL-LABEL: test_v2f16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512VL-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512VL-NEXT: movzwl %ax, %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm2
-; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrw $0, %xmm1, %eax
-; AVX512VL-NEXT: movzwl %ax, %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm3
-; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm2
+; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm3
; AVX512VL-NEXT: xorl %eax, %eax
; AVX512VL-NEXT: vucomiss %xmm3, %xmm2
; AVX512VL-NEXT: movl $255, %ecx
diff --git a/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll b/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
index 0b2f9d69f062..edefb16d40e6 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
@@ -412,14 +412,8 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512F-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512F-NEXT: movzwl %ax, %eax
-; AVX512F-NEXT: vmovd %eax, %xmm2
-; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX512F-NEXT: vpextrw $0, %xmm1, %eax
-; AVX512F-NEXT: movzwl %ax, %eax
-; AVX512F-NEXT: vmovd %eax, %xmm3
-; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm2
+; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm3
; AVX512F-NEXT: xorl %eax, %eax
; AVX512F-NEXT: vucomiss %xmm3, %xmm2
; AVX512F-NEXT: movl $255, %ecx
@@ -433,14 +427,8 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
; AVX512VL-LABEL: test_v2f16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512VL-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512VL-NEXT: movzwl %ax, %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm2
-; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrw $0, %xmm1, %eax
-; AVX512VL-NEXT: movzwl %ax, %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm3
-; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm2
+; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm3
; AVX512VL-NEXT: xorl %eax, %eax
; AVX512VL-NEXT: vucomiss %xmm3, %xmm2
; AVX512VL-NEXT: movl $255, %ecx
diff --git a/llvm/test/CodeGen/X86/vector-reduce-umax.ll b/llvm/test/CodeGen/X86/vector-reduce-umax.ll
index 4799b8e7e585..3b25a6e033f2 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-umax.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-umax.ll
@@ -210,7 +210,7 @@ define i64 @test_v4i64(<4 x i64> %a0) {
; AVX2-LABEL: test_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm4
; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-reduce-umin.ll b/llvm/test/CodeGen/X86/vector-reduce-umin.ll
index 75eeec456c9a..2d68cf9d6374 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-umin.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-umin.ll
@@ -211,7 +211,7 @@ define i64 @test_v4i64(<4 x i64> %a0) {
; AVX2-LABEL: test_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm4
; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
index 468fec66c028..6360c68e62cc 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -2012,24 +2012,18 @@ define <4 x i32> @extract3_insert0_v4i32_7123(<4 x i32> %a0, <4 x i32> %a1) {
; SSE2-LABEL: extract3_insert0_v4i32_7123:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: retq
;
; SSE3-LABEL: extract3_insert0_v4i32_7123:
; SSE3: # %bb.0:
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; SSE3-NEXT: movd %xmm1, %eax
-; SSE3-NEXT: movd %eax, %xmm1
; SSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: extract3_insert0_v4i32_7123:
; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; SSSE3-NEXT: movd %xmm1, %eax
-; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSSE3-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vselect.ll b/llvm/test/CodeGen/X86/vselect.ll
index ce3dc8cc873c..cc4eb0c8f734 100644
--- a/llvm/test/CodeGen/X86/vselect.ll
+++ b/llvm/test/CodeGen/X86/vselect.ll
@@ -741,14 +741,24 @@ define i64 @vselect_any_extend_vector_inreg_crash(ptr %x) {
; SSE-NEXT: shll $15, %eax
; SSE-NEXT: retq
;
-; AVX-LABEL: vselect_any_extend_vector_inreg_crash:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: andl $1, %eax
-; AVX-NEXT: shll $15, %eax
-; AVX-NEXT: retq
+; AVX1-LABEL: vselect_any_extend_vector_inreg_crash:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: shll $15, %eax
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: vselect_any_extend_vector_inreg_crash:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [49,49,49,49]
+; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: shll $15, %eax
+; AVX2-NEXT: retq
0:
%1 = load <8 x i8>, ptr %x
%2 = icmp eq <8 x i8> %1, <i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49, i8 49>
diff --git a/llvm/test/CodeGen/X86/x86-32-intrcc.ll b/llvm/test/CodeGen/X86/x86-32-intrcc.ll
index 2e482753e268..3c3944c2082b 100644
--- a/llvm/test/CodeGen/X86/x86-32-intrcc.ll
+++ b/llvm/test/CodeGen/X86/x86-32-intrcc.ll
@@ -149,7 +149,6 @@ define x86_intrcc void @test_isr_x87(ptr byval(%struct.interrupt_frame) %frame)
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: andl $-16, %esp
-; CHECK-NEXT: cld
; CHECK-NEXT: fldt f80
; CHECK-NEXT: fld1
; CHECK-NEXT: faddp %st, %st(1)
@@ -163,7 +162,6 @@ define x86_intrcc void @test_isr_x87(ptr byval(%struct.interrupt_frame) %frame)
; CHECK0-NEXT: pushl %ebp
; CHECK0-NEXT: movl %esp, %ebp
; CHECK0-NEXT: andl $-16, %esp
-; CHECK0-NEXT: cld
; CHECK0-NEXT: fldt f80
; CHECK0-NEXT: fld1
; CHECK0-NEXT: faddp %st, %st(1)
@@ -188,7 +186,6 @@ define dso_local x86_intrcc void @test_fp_1(ptr byval(%struct.interrupt_frame) %
; CHECK-NEXT: pushl %ecx
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: andl $-16, %esp
-; CHECK-NEXT: cld
; CHECK-NEXT: leal 20(%ebp), %eax
; CHECK-NEXT: leal 4(%ebp), %ecx
; CHECK-NEXT: movl %ecx, sink_address
@@ -206,7 +203,6 @@ define dso_local x86_intrcc void @test_fp_1(ptr byval(%struct.interrupt_frame) %
; CHECK0-NEXT: pushl %ecx
; CHECK0-NEXT: pushl %eax
; CHECK0-NEXT: andl $-16, %esp
-; CHECK0-NEXT: cld
; CHECK0-NEXT: leal 4(%ebp), %ecx
; CHECK0-NEXT: movl %ecx, %eax
; CHECK0-NEXT: addl $16, %eax
@@ -234,7 +230,6 @@ define dso_local x86_intrcc void @test_fp_2(ptr byval(%struct.interrupt_frame) %
; CHECK-NEXT: pushl %ecx
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: andl $-16, %esp
-; CHECK-NEXT: cld
; CHECK-NEXT: movl 4(%ebp), %eax
; CHECK-NEXT: leal 24(%ebp), %ecx
; CHECK-NEXT: leal 8(%ebp), %edx
@@ -257,7 +252,6 @@ define dso_local x86_intrcc void @test_fp_2(ptr byval(%struct.interrupt_frame) %
; CHECK0-NEXT: pushl %ecx
; CHECK0-NEXT: pushl %eax
; CHECK0-NEXT: andl $-16, %esp
-; CHECK0-NEXT: cld
; CHECK0-NEXT: movl 4(%ebp), %eax
; CHECK0-NEXT: leal 8(%ebp), %edx
; CHECK0-NEXT: movl %edx, %ecx
@@ -288,7 +282,6 @@ define x86_intrcc void @test_copy_elide(ptr byval(%struct.interrupt_frame) %fram
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: andl $-16, %esp
-; CHECK-NEXT: cld
; CHECK-NEXT: leal 4(%ebp), %eax
; CHECK-NEXT: movl %eax, sink_address
; CHECK-NEXT: leal -4(%ebp), %esp
@@ -303,7 +296,6 @@ define x86_intrcc void @test_copy_elide(ptr byval(%struct.interrupt_frame) %fram
; CHECK0-NEXT: movl %esp, %ebp
; CHECK0-NEXT: pushl %eax
; CHECK0-NEXT: andl $-16, %esp
-; CHECK0-NEXT: cld
; CHECK0-NEXT: movl 4(%ebp), %eax
; CHECK0-NEXT: leal 4(%ebp), %eax
; CHECK0-NEXT: movl %eax, sink_address
@@ -358,7 +350,6 @@ define x86_intrcc void @test_isr_realign(ptr byval(%struct.interrupt_frame) %fra
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: andl $-32, %esp
; CHECK-NEXT: subl $32, %esp
-; CHECK-NEXT: cld
; CHECK-NEXT: movl 4(%ebp), %eax
; CHECK-NEXT: movl %eax, (%esp)
; CHECK-NEXT: leal -4(%ebp), %esp
@@ -374,7 +365,6 @@ define x86_intrcc void @test_isr_realign(ptr byval(%struct.interrupt_frame) %fra
; CHECK0-NEXT: pushl %eax
; CHECK0-NEXT: andl $-32, %esp
; CHECK0-NEXT: subl $32, %esp
-; CHECK0-NEXT: cld
; CHECK0-NEXT: movl 4(%ebp), %eax
; CHECK0-NEXT: movl %eax, (%esp)
; CHECK0-NEXT: leal -4(%ebp), %esp
diff --git a/llvm/test/CodeGen/X86/x86-64-intrcc-uintr.ll b/llvm/test/CodeGen/X86/x86-64-intrcc-uintr.ll
index a46b9d9ba5a1..1fe395b84d46 100644
--- a/llvm/test/CodeGen/X86/x86-64-intrcc-uintr.ll
+++ b/llvm/test/CodeGen/X86/x86-64-intrcc-uintr.ll
@@ -21,28 +21,24 @@ define dso_local x86_intrcc void @test_uintr_isr_cc_empty(ptr nocapture byval(%s
; CHECK-USER-LABEL: test_uintr_isr_cc_empty:
; CHECK-USER: # %bb.0: # %entry
; CHECK-USER-NEXT: pushq %rax
-; CHECK-USER-NEXT: cld
; CHECK-USER-NEXT: addq $16, %rsp
; CHECK-USER-NEXT: uiret
;
; CHECK0-USER-LABEL: test_uintr_isr_cc_empty:
; CHECK0-USER: # %bb.0: # %entry
; CHECK0-USER-NEXT: pushq %rax
-; CHECK0-USER-NEXT: cld
; CHECK0-USER-NEXT: addq $16, %rsp
; CHECK0-USER-NEXT: uiret
;
; CHECK-KERNEL-LABEL: test_uintr_isr_cc_empty:
; CHECK-KERNEL: # %bb.0: # %entry
; CHECK-KERNEL-NEXT: pushq %rax
-; CHECK-KERNEL-NEXT: cld
; CHECK-KERNEL-NEXT: addq $16, %rsp
; CHECK-KERNEL-NEXT: iretq
;
; CHECK0-KERNEL-LABEL: test_uintr_isr_cc_empty:
; CHECK0-KERNEL: # %bb.0: # %entry
; CHECK0-KERNEL-NEXT: pushq %rax
-; CHECK0-KERNEL-NEXT: cld
; CHECK0-KERNEL-NEXT: addq $16, %rsp
; CHECK0-KERNEL-NEXT: iretq
entry:
@@ -75,7 +71,6 @@ define dso_local x86_intrcc void @test_uintr_isr_cc_args(ptr nocapture readonly
; CHECK-USER-NEXT: pushq %rax
; CHECK-USER-NEXT: pushq %rdx
; CHECK-USER-NEXT: pushq %rcx
-; CHECK-USER-NEXT: cld
; CHECK-USER-NEXT: movq 32(%rsp), %rax
; CHECK-USER-NEXT: movq 40(%rsp), %rcx
; CHECK-USER-NEXT: movq 48(%rsp), %rdx
@@ -96,7 +91,6 @@ define dso_local x86_intrcc void @test_uintr_isr_cc_args(ptr nocapture readonly
; CHECK0-USER-NEXT: pushq %rax
; CHECK0-USER-NEXT: pushq %rdx
; CHECK0-USER-NEXT: pushq %rcx
-; CHECK0-USER-NEXT: cld
; CHECK0-USER-NEXT: movq 32(%rsp), %rax
; CHECK0-USER-NEXT: leaq 40(%rsp), %rcx
; CHECK0-USER-NEXT: movq (%rcx), %rdx
@@ -118,7 +112,6 @@ define dso_local x86_intrcc void @test_uintr_isr_cc_args(ptr nocapture readonly
; CHECK-KERNEL-NEXT: pushq %rax
; CHECK-KERNEL-NEXT: pushq %rdx
; CHECK-KERNEL-NEXT: pushq %rcx
-; CHECK-KERNEL-NEXT: cld
; CHECK-KERNEL-NEXT: movq 32(%rsp), %rax
; CHECK-KERNEL-NEXT: movq 40(%rsp), %rcx
; CHECK-KERNEL-NEXT: movq 48(%rsp), %rdx
@@ -139,7 +132,6 @@ define dso_local x86_intrcc void @test_uintr_isr_cc_args(ptr nocapture readonly
; CHECK0-KERNEL-NEXT: pushq %rax
; CHECK0-KERNEL-NEXT: pushq %rdx
; CHECK0-KERNEL-NEXT: pushq %rcx
-; CHECK0-KERNEL-NEXT: cld
; CHECK0-KERNEL-NEXT: movq 32(%rsp), %rax
; CHECK0-KERNEL-NEXT: leaq 40(%rsp), %rcx
; CHECK0-KERNEL-NEXT: movq (%rcx), %rdx
diff --git a/llvm/test/CodeGen/X86/x86-64-intrcc.ll b/llvm/test/CodeGen/X86/x86-64-intrcc.ll
index 443d4c2fa464..5fc606eb566e 100644
--- a/llvm/test/CodeGen/X86/x86-64-intrcc.ll
+++ b/llvm/test/CodeGen/X86/x86-64-intrcc.ll
@@ -114,7 +114,6 @@ define dso_local x86_intrcc void @test_fp_1(ptr byval(%struct.interrupt_frame) %
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: movq %rsp, %rbp
- ; CHECK: cld
; CHECK-DAG: leaq 8(%rbp), %[[R1:[^ ]*]]
; CHECK-DAG: leaq 40(%rbp), %[[R2:[^ ]*]]
; CHECK: movq %[[R1]], sink_address
@@ -136,7 +135,6 @@ define dso_local x86_intrcc void @test_fp_2(ptr byval(%struct.interrupt_frame) %
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: movq %rsp, %rbp
- ; CHECK: cld
; CHECK-DAG: movq 16(%rbp), %[[R3:[^ ]*]]
; CHECK-DAG: leaq 24(%rbp), %[[R1:[^ ]*]]
; CHECK-DAG: leaq 56(%rbp), %[[R2:[^ ]*]]
@@ -164,7 +162,6 @@ define x86_intrcc void @test_copy_elide(ptr byval(%struct.interrupt_frame) %fram
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: movq %rsp, %rbp
- ; CHECK: cld
; CHECK: leaq 16(%rbp), %[[R1:[^ ]*]]
; CHECK: movq %[[R1]], sink_address(%rip)
entry:
diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
index b9e490888d9b..3349d31cad4b 100644
--- a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
+++ b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
@@ -181,38 +181,40 @@ define zeroext i1 @segmentedStack(ptr readonly %vk1, ptr readonly %vk2, i64 %key
; CHECK-LABEL: segmentedStack:
; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %gs:816, %rsp
-; CHECK-NEXT: jbe LBB3_7
+; CHECK-NEXT: jbe LBB3_6
; CHECK-NEXT: LBB3_1: ## %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: testq %rsi, %rsi
+; CHECK-NEXT: sete %cl
+; CHECK-NEXT: orb %al, %cl
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: sete %al
-; CHECK-NEXT: testq %rdi, %rdi
-; CHECK-NEXT: je LBB3_5
-; CHECK-NEXT: ## %bb.2: ## %entry
-; CHECK-NEXT: testq %rsi, %rsi
-; CHECK-NEXT: je LBB3_5
-; CHECK-NEXT: ## %bb.3: ## %if.end4.i
+; CHECK-NEXT: testb %cl, %cl
+; CHECK-NEXT: jne LBB3_4
+; CHECK-NEXT: ## %bb.2: ## %if.end4.i
; CHECK-NEXT: movq 8(%rdi), %rdx
; CHECK-NEXT: cmpq 8(%rsi), %rdx
-; CHECK-NEXT: jne LBB3_6
-; CHECK-NEXT: ## %bb.4: ## %land.rhs.i.i
+; CHECK-NEXT: jne LBB3_5
+; CHECK-NEXT: ## %bb.3: ## %land.rhs.i.i
; CHECK-NEXT: movq (%rsi), %rsi
; CHECK-NEXT: movq (%rdi), %rdi
; CHECK-NEXT: callq _memcmp
; CHECK-NEXT: testl %eax, %eax
; CHECK-NEXT: sete %al
-; CHECK-NEXT: LBB3_5: ## %__go_ptr_strings_equal.exit
+; CHECK-NEXT: LBB3_4: ## %__go_ptr_strings_equal.exit
; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
-; CHECK-NEXT: LBB3_6:
+; CHECK-NEXT: LBB3_5:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
-; CHECK-NEXT: LBB3_7:
+; CHECK-NEXT: LBB3_6:
; CHECK-NEXT: movl $8, %r10d
; CHECK-NEXT: movl $0, %r11d
; CHECK-NEXT: callq ___morestack
@@ -222,41 +224,43 @@ define zeroext i1 @segmentedStack(ptr readonly %vk1, ptr readonly %vk2, i64 %key
; NOCOMPACTUNWIND-LABEL: segmentedStack:
; NOCOMPACTUNWIND: # %bb.0:
; NOCOMPACTUNWIND-NEXT: cmpq %fs:112, %rsp
-; NOCOMPACTUNWIND-NEXT: jbe .LBB3_7
+; NOCOMPACTUNWIND-NEXT: jbe .LBB3_6
; NOCOMPACTUNWIND-NEXT: .LBB3_1: # %entry
; NOCOMPACTUNWIND-NEXT: pushq %rax
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16
+; NOCOMPACTUNWIND-NEXT: testq %rdi, %rdi
+; NOCOMPACTUNWIND-NEXT: sete %al
+; NOCOMPACTUNWIND-NEXT: testq %rsi, %rsi
+; NOCOMPACTUNWIND-NEXT: sete %cl
+; NOCOMPACTUNWIND-NEXT: orb %al, %cl
; NOCOMPACTUNWIND-NEXT: movq %rdi, %rax
; NOCOMPACTUNWIND-NEXT: orq %rsi, %rax
; NOCOMPACTUNWIND-NEXT: sete %al
-; NOCOMPACTUNWIND-NEXT: testq %rdi, %rdi
-; NOCOMPACTUNWIND-NEXT: je .LBB3_5
-; NOCOMPACTUNWIND-NEXT: # %bb.2: # %entry
-; NOCOMPACTUNWIND-NEXT: testq %rsi, %rsi
-; NOCOMPACTUNWIND-NEXT: je .LBB3_5
-; NOCOMPACTUNWIND-NEXT: # %bb.3: # %if.end4.i
+; NOCOMPACTUNWIND-NEXT: testb %cl, %cl
+; NOCOMPACTUNWIND-NEXT: jne .LBB3_4
+; NOCOMPACTUNWIND-NEXT: # %bb.2: # %if.end4.i
; NOCOMPACTUNWIND-NEXT: movq 8(%rdi), %rdx
; NOCOMPACTUNWIND-NEXT: cmpq 8(%rsi), %rdx
-; NOCOMPACTUNWIND-NEXT: jne .LBB3_6
-; NOCOMPACTUNWIND-NEXT: # %bb.4: # %land.rhs.i.i
+; NOCOMPACTUNWIND-NEXT: jne .LBB3_5
+; NOCOMPACTUNWIND-NEXT: # %bb.3: # %land.rhs.i.i
; NOCOMPACTUNWIND-NEXT: movq (%rsi), %rsi
; NOCOMPACTUNWIND-NEXT: movq (%rdi), %rdi
; NOCOMPACTUNWIND-NEXT: callq memcmp@PLT
; NOCOMPACTUNWIND-NEXT: testl %eax, %eax
; NOCOMPACTUNWIND-NEXT: sete %al
-; NOCOMPACTUNWIND-NEXT: .LBB3_5: # %__go_ptr_strings_equal.exit
+; NOCOMPACTUNWIND-NEXT: .LBB3_4: # %__go_ptr_strings_equal.exit
; NOCOMPACTUNWIND-NEXT: # kill: def $al killed $al killed $eax
; NOCOMPACTUNWIND-NEXT: popq %rcx
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 8
; NOCOMPACTUNWIND-NEXT: retq
-; NOCOMPACTUNWIND-NEXT: .LBB3_6:
+; NOCOMPACTUNWIND-NEXT: .LBB3_5:
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16
; NOCOMPACTUNWIND-NEXT: xorl %eax, %eax
; NOCOMPACTUNWIND-NEXT: # kill: def $al killed $al killed $eax
; NOCOMPACTUNWIND-NEXT: popq %rcx
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 8
; NOCOMPACTUNWIND-NEXT: retq
-; NOCOMPACTUNWIND-NEXT: .LBB3_7:
+; NOCOMPACTUNWIND-NEXT: .LBB3_6:
; NOCOMPACTUNWIND-NEXT: movl $8, %r10d
; NOCOMPACTUNWIND-NEXT: movl $0, %r11d
; NOCOMPACTUNWIND-NEXT: callq __morestack
diff --git a/llvm/test/DebugInfo/AArch64/ptrauth.ll b/llvm/test/DebugInfo/AArch64/ptrauth.ll
new file mode 100644
index 000000000000..4f84fe4f9629
--- /dev/null
+++ b/llvm/test/DebugInfo/AArch64/ptrauth.ll
@@ -0,0 +1,70 @@
+; RUN: llc %s -filetype=obj -mtriple arm64e-apple-darwin -o - \
+; RUN: | llvm-dwarfdump - | FileCheck %s
+
+; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "void *__ptrauth(4, 0, 0x04d2)")
+; CHECK: 0x{{0+}}[[TY]]: DW_TAG_LLVM_ptrauth_type
+; CHECK-NEXT: DW_AT_type {{.*}}"void *"
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_key (0x04)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_extra_discriminator (0x04d2)
+
+; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "void *__ptrauth(4, 1, 0x04d3)")
+; CHECK: 0x{{0+}}[[TY]]: DW_TAG_LLVM_ptrauth_type
+; CHECK-NEXT: DW_AT_type {{.*}}"void *"
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_key (0x04)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_address_discriminated (true)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_extra_discriminator (0x04d3)
+
+; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "void *__ptrauth(4, 1, 0x04d4, "isa-pointer")")
+; CHECK: 0x{{0+}}[[TY]]: DW_TAG_LLVM_ptrauth_type
+; CHECK-NEXT: DW_AT_type {{.*}}"void *"
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_key (0x04)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_address_discriminated (true)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_extra_discriminator (0x04d4)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_isa_pointer (true)
+
+; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "void *__ptrauth(4, 1, 0x04d5, "authenticates-null-values")")
+; CHECK: 0x{{0+}}[[TY]]: DW_TAG_LLVM_ptrauth_type
+; CHECK-NEXT: DW_AT_type {{.*}}"void *"
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_key (0x04)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_address_discriminated (true)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_extra_discriminator (0x04d5)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_authenticates_null_values (true)
+
+; CHECK: DW_AT_type (0x{{0+}}[[TY:.*]] "void *__ptrauth(4, 1, 0x04d6, "isa-pointer,authenticates-null-values")")
+; CHECK: 0x{{0+}}[[TY]]: DW_TAG_LLVM_ptrauth_type
+; CHECK-NEXT: DW_AT_type {{.*}}"void *"
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_key (0x04)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_address_discriminated (true)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_extra_discriminator (0x04d6)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_isa_pointer (true)
+; CHECK-NEXT: DW_AT_LLVM_ptrauth_authenticates_null_values (true)
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+@p = common global i8* null, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!10}
+!llvm.module.flags = !{!19, !20}
+
+!0 = !DIGlobalVariableExpression(var: !5, expr: !DIExpression())
+!1 = !DIGlobalVariableExpression(var: !6, expr: !DIExpression())
+!2 = !DIGlobalVariableExpression(var: !7, expr: !DIExpression())
+!3 = !DIGlobalVariableExpression(var: !8, expr: !DIExpression())
+!4 = !DIGlobalVariableExpression(var: !9, expr: !DIExpression())
+!5 = distinct !DIGlobalVariable(name: "p1", scope: !10, file: !11, line: 1, type: !14, isLocal: false, isDefinition: true)
+!6 = distinct !DIGlobalVariable(name: "p2", scope: !10, file: !11, line: 1, type: !15, isLocal: false, isDefinition: true)
+!7 = distinct !DIGlobalVariable(name: "p3", scope: !10, file: !11, line: 1, type: !16, isLocal: false, isDefinition: true)
+!8 = distinct !DIGlobalVariable(name: "p4", scope: !10, file: !11, line: 1, type: !17, isLocal: false, isDefinition: true)
+!9 = distinct !DIGlobalVariable(name: "p5", scope: !10, file: !11, line: 1, type: !18, isLocal: false, isDefinition: true)
+!10 = distinct !DICompileUnit(language: DW_LANG_C99, file: !11, emissionKind: FullDebug, globals: !13)
+!11 = !DIFile(filename: "/tmp/p.c", directory: "/")
+!12 = !{}
+!13 = !{!0,!1,!2,!3,!4}
+!14 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !21, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: false, ptrAuthExtraDiscriminator: 1234)
+!15 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !21, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1235)
+!16 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !21, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1236, ptrAuthIsaPointer: true)
+!17 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !21, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1237, ptrAuthAuthenticatesNullValues: true)
+!18 = !DIDerivedType(tag: DW_TAG_LLVM_ptrauth_type, baseType: !21, ptrAuthKey: 4, ptrAuthIsAddressDiscriminated: true, ptrAuthExtraDiscriminator: 1238, ptrAuthIsaPointer: true, ptrAuthAuthenticatesNullValues: true)
+!19 = !{i32 2, !"Dwarf Version", i32 4}
+!20 = !{i32 2, !"Debug Info Version", i32 3}
+!21 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null)
diff --git a/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test b/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test
index ec71011d545e..c09513a7d370 100644
--- a/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test
+++ b/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test
@@ -8,4 +8,9 @@
# Jitlink does not support ARM64 COFF files.
# UNSUPPORTED: target=aarch64-pc-windows-{{.*}}
+# On MinGW targets, when compiling the main() function, it gets
+# an implicitly generated call to __main(), which is missing in
+# this context.
+# XFAIL: target={{.*}}-windows-gnu
+
# jitlink-check: *{4}foo = 0x2a2a5a5a \ No newline at end of file
diff --git a/llvm/test/Instrumentation/AddressSanitizer/asan-funclet.ll b/llvm/test/Instrumentation/AddressSanitizer/asan-funclet.ll
new file mode 100644
index 000000000000..f0a5c67365ab
--- /dev/null
+++ b/llvm/test/Instrumentation/AddressSanitizer/asan-funclet.ll
@@ -0,0 +1,459 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+
+; Warning! The output of this test is currently invalid.
+; It serves as a base for the bugfix patch to highlight the modified generated code.
+
+; Test appropriate tagging of funclet for function calls generated by asan.
+; RUN: opt -S -passes=asan,win-eh-prepare -asan-use-stack-safety=0 -asan-max-inline-poisoning-size=0 \
+; RUN: -asan-detect-invalid-pointer-cmp -asan-detect-invalid-pointer-sub -asan-use-after-scope < %s | FileCheck %s --check-prefixes=CHECK,CHECK-INLINE
+; RUN: opt -S -passes=asan,win-eh-prepare -asan-use-stack-safety=0 -asan-max-inline-poisoning-size=0 -asan-instrumentation-with-call-threshold=0 \
+; RUN: -asan-detect-invalid-pointer-cmp -asan-detect-invalid-pointer-sub -asan-use-after-scope < %s | FileCheck %s --check-prefixes=CHECK,CHECK-OUTLINE
+
+; REQUIRES: x86-registered-target
+
+target triple = "x86_64-pc-windows-msvc"
+
+declare void @DeInit(ptr)
+declare void @MayThrowFunc()
+declare void @NoReturn() noreturn
+
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+
+declare i32 @__CxxFrameHandler3(...)
+declare i32 @dummyPersonality(...)
+
+define void @FuncletPersonality(ptr %ptrParam) sanitize_address personality ptr @__CxxFrameHandler3 {
+; CHECK-INLINE-LABEL: define void @FuncletPersonality(
+; CHECK-INLINE-SAME: ptr [[PTRPARAM:%.*]]) #[[ATTR4:[0-9]+]] personality ptr @__CxxFrameHandler3 {
+; CHECK-INLINE-NEXT: entry:
+; CHECK-INLINE-NEXT: [[TMP0:%.*]] = alloca i64, align 32
+; CHECK-INLINE-NEXT: store i64 0, ptr [[TMP0]], align 8
+; CHECK-INLINE-NEXT: [[TMP1:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
+; CHECK-INLINE-NEXT: [[ASAN_LOCAL_STACK_BASE:%.*]] = alloca i64, align 8
+; CHECK-INLINE-NEXT: [[TMP2:%.*]] = load i32, ptr @__asan_option_detect_stack_use_after_return, align 4
+; CHECK-INLINE-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
+; CHECK-INLINE-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP6:%.*]]
+; CHECK-INLINE: 4:
+; CHECK-INLINE-NEXT: [[TMP5:%.*]] = call i64 @__asan_stack_malloc_8(i64 8544)
+; CHECK-INLINE-NEXT: br label [[TMP6]]
+; CHECK-INLINE: 6:
+; CHECK-INLINE-NEXT: [[TMP7:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP5]], [[TMP4]] ]
+; CHECK-INLINE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
+; CHECK-INLINE-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP11:%.*]]
+; CHECK-INLINE: 9:
+; CHECK-INLINE-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 8544, align 32
+; CHECK-INLINE-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
+; CHECK-INLINE-NEXT: br label [[TMP11]]
+; CHECK-INLINE: 11:
+; CHECK-INLINE-NEXT: [[TMP12:%.*]] = phi i64 [ [[TMP7]], [[TMP6]] ], [ [[TMP10]], [[TMP9]] ]
+; CHECK-INLINE-NEXT: store i64 [[TMP12]], ptr [[ASAN_LOCAL_STACK_BASE]], align 8
+; CHECK-INLINE-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], 32
+; CHECK-INLINE-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-INLINE-NEXT: [[TMP15:%.*]] = add i64 [[TMP12]], 8528
+; CHECK-INLINE-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-INLINE-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-INLINE-NEXT: store i64 1102416563, ptr [[TMP17]], align 8
+; CHECK-INLINE-NEXT: [[TMP18:%.*]] = add i64 [[TMP12]], 8
+; CHECK-INLINE-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-INLINE-NEXT: store i64 ptrtoint (ptr @___asan_gen_ to i64), ptr [[TMP19]], align 8
+; CHECK-INLINE-NEXT: [[TMP20:%.*]] = add i64 [[TMP12]], 16
+; CHECK-INLINE-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-INLINE-NEXT: store i64 ptrtoint (ptr @FuncletPersonality to i64), ptr [[TMP21]], align 8
+; CHECK-INLINE-NEXT: [[TMP22:%.*]] = lshr i64 [[TMP12]], 3
+; CHECK-INLINE-NEXT: [[TMP23:%.*]] = add i64 [[TMP22]], [[TMP1]]
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_f1(i64 [[TMP23]], i64 4)
+; CHECK-INLINE-NEXT: [[TMP24:%.*]] = add i64 [[TMP23]], 1028
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_f2(i64 [[TMP24]], i64 32)
+; CHECK-INLINE-NEXT: [[TMP25:%.*]] = add i64 [[TMP23]], 1060
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_04(i64 [[TMP25]], i64 1)
+; CHECK-INLINE-NEXT: [[TMP26:%.*]] = add i64 [[TMP23]], 1061
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_f2(i64 [[TMP26]], i64 1)
+; CHECK-INLINE-NEXT: [[TMP27:%.*]] = add i64 [[TMP23]], 1062
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_04(i64 [[TMP27]], i64 1)
+; CHECK-INLINE-NEXT: [[TMP28:%.*]] = add i64 [[TMP23]], 1063
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_f2(i64 [[TMP28]], i64 1)
+; CHECK-INLINE-NEXT: [[TMP29:%.*]] = add i64 [[TMP23]], 1064
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_04(i64 [[TMP29]], i64 1)
+; CHECK-INLINE-NEXT: [[TMP30:%.*]] = add i64 [[TMP23]], 1065
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_f2(i64 [[TMP30]], i64 1)
+; CHECK-INLINE-NEXT: [[TMP31:%.*]] = add i64 [[TMP23]], 1066
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_f8(i64 [[TMP31]], i64 1)
+; CHECK-INLINE-NEXT: [[TMP32:%.*]] = add i64 [[TMP23]], 1067
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_f3(i64 [[TMP32]], i64 1)
+; CHECK-INLINE-NEXT: [[TMP33:%.*]] = add i64 [[TMP23]], 1066
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_04(i64 [[TMP33]], i64 1)
+; CHECK-INLINE-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP16]])
+; CHECK-INLINE-NEXT: [[TMP34:%.*]] = lshr i64 [[TMP15]], 3
+; CHECK-INLINE-NEXT: [[TMP35:%.*]] = add i64 [[TMP34]], [[TMP1]]
+; CHECK-INLINE-NEXT: [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-INLINE-NEXT: [[TMP37:%.*]] = load i8, ptr [[TMP36]], align 1
+; CHECK-INLINE-NEXT: [[TMP38:%.*]] = icmp ne i8 [[TMP37]], 0
+; CHECK-INLINE-NEXT: br i1 [[TMP38]], label [[TMP39:%.*]], label [[TMP44:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK-INLINE: 39:
+; CHECK-INLINE-NEXT: [[TMP40:%.*]] = and i64 [[TMP15]], 7
+; CHECK-INLINE-NEXT: [[TMP41:%.*]] = trunc i64 [[TMP40]] to i8
+; CHECK-INLINE-NEXT: [[TMP42:%.*]] = icmp sge i8 [[TMP41]], [[TMP37]]
+; CHECK-INLINE-NEXT: br i1 [[TMP42]], label [[TMP43:%.*]], label [[TMP44]]
+; CHECK-INLINE: 43:
+; CHECK-INLINE-NEXT: call void @__asan_report_store1(i64 [[TMP15]]) #[[ATTR8:[0-9]+]]
+; CHECK-INLINE-NEXT: unreachable
+; CHECK-INLINE: 44:
+; CHECK-INLINE-NEXT: store volatile i8 0, ptr [[TMP16]], align 1
+; CHECK-INLINE-NEXT: [[TMP45:%.*]] = add i64 [[TMP23]], 1066
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_f8(i64 [[TMP45]], i64 1)
+; CHECK-INLINE-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP16]])
+; CHECK-INLINE-NEXT: [[TMP46:%.*]] = alloca i8, i64 96, align 32
+; CHECK-INLINE-NEXT: [[TMP47:%.*]] = ptrtoint ptr [[TMP46]] to i64
+; CHECK-INLINE-NEXT: [[TMP48:%.*]] = add i64 [[TMP47]], 32
+; CHECK-INLINE-NEXT: call void @__asan_alloca_poison(i64 [[TMP48]], i64 4)
+; CHECK-INLINE-NEXT: [[TMP49:%.*]] = ptrtoint ptr [[TMP46]] to i64
+; CHECK-INLINE-NEXT: store i64 [[TMP49]], ptr [[TMP0]], align 8
+; CHECK-INLINE-NEXT: [[TMP50:%.*]] = inttoptr i64 [[TMP48]] to ptr
+; CHECK-INLINE-NEXT: [[TMP51:%.*]] = alloca i8, i64 96, align 32
+; CHECK-INLINE-NEXT: [[TMP52:%.*]] = ptrtoint ptr [[TMP51]] to i64
+; CHECK-INLINE-NEXT: [[TMP53:%.*]] = add i64 [[TMP52]], 32
+; CHECK-INLINE-NEXT: call void @__asan_alloca_poison(i64 [[TMP53]], i64 8)
+; CHECK-INLINE-NEXT: [[TMP54:%.*]] = ptrtoint ptr [[TMP51]] to i64
+; CHECK-INLINE-NEXT: store i64 [[TMP54]], ptr [[TMP0]], align 8
+; CHECK-INLINE-NEXT: [[TMP55:%.*]] = inttoptr i64 [[TMP53]] to ptr
+; CHECK-INLINE-NEXT: [[TMP56:%.*]] = lshr i64 [[TMP53]], 3
+; CHECK-INLINE-NEXT: [[TMP57:%.*]] = add i64 [[TMP56]], [[TMP1]]
+; CHECK-INLINE-NEXT: [[TMP58:%.*]] = inttoptr i64 [[TMP57]] to ptr
+; CHECK-INLINE-NEXT: [[TMP59:%.*]] = load i8, ptr [[TMP58]], align 1
+; CHECK-INLINE-NEXT: [[TMP60:%.*]] = icmp ne i8 [[TMP59]], 0
+; CHECK-INLINE-NEXT: br i1 [[TMP60]], label [[TMP61:%.*]], label [[TMP62:%.*]]
+; CHECK-INLINE: 61:
+; CHECK-INLINE-NEXT: call void @__asan_report_store8(i64 [[TMP53]]) #[[ATTR8]]
+; CHECK-INLINE-NEXT: unreachable
+; CHECK-INLINE: 62:
+; CHECK-INLINE-NEXT: store volatile i64 0, ptr [[TMP55]], align 8
+; CHECK-INLINE-NEXT: [[TMPCOPYI64:%.*]] = load i64, ptr [[TMP55]], align 8
+; CHECK-INLINE-NEXT: [[TMP63:%.*]] = and i64 [[TMPCOPYI64]], 31
+; CHECK-INLINE-NEXT: [[TMP64:%.*]] = sub i64 32, [[TMP63]]
+; CHECK-INLINE-NEXT: [[TMP65:%.*]] = icmp ne i64 [[TMP64]], 32
+; CHECK-INLINE-NEXT: [[TMP66:%.*]] = select i1 [[TMP65]], i64 [[TMP64]], i64 0
+; CHECK-INLINE-NEXT: [[TMP67:%.*]] = add i64 64, [[TMP66]]
+; CHECK-INLINE-NEXT: [[TMP68:%.*]] = add i64 [[TMPCOPYI64]], [[TMP67]]
+; CHECK-INLINE-NEXT: [[TMP69:%.*]] = alloca i8, i64 [[TMP68]], align 32
+; CHECK-INLINE-NEXT: [[TMP70:%.*]] = ptrtoint ptr [[TMP69]] to i64
+; CHECK-INLINE-NEXT: [[TMP71:%.*]] = add i64 [[TMP70]], 32
+; CHECK-INLINE-NEXT: call void @__asan_alloca_poison(i64 [[TMP71]], i64 [[TMPCOPYI64]])
+; CHECK-INLINE-NEXT: [[TMP72:%.*]] = ptrtoint ptr [[TMP69]] to i64
+; CHECK-INLINE-NEXT: store i64 [[TMP72]], ptr [[TMP0]], align 8
+; CHECK-INLINE-NEXT: [[TMP73:%.*]] = inttoptr i64 [[TMP71]] to ptr
+; CHECK-INLINE-NEXT: [[TMP74:%.*]] = lshr i64 [[TMP71]], 3
+; CHECK-INLINE-NEXT: [[TMP75:%.*]] = add i64 [[TMP74]], [[TMP1]]
+; CHECK-INLINE-NEXT: [[TMP76:%.*]] = inttoptr i64 [[TMP75]] to ptr
+; CHECK-INLINE-NEXT: [[TMP77:%.*]] = load i8, ptr [[TMP76]], align 1
+; CHECK-INLINE-NEXT: [[TMP78:%.*]] = icmp ne i8 [[TMP77]], 0
+; CHECK-INLINE-NEXT: br i1 [[TMP78]], label [[TMP79:%.*]], label [[TMP84:%.*]], !prof [[PROF0]]
+; CHECK-INLINE: 79:
+; CHECK-INLINE-NEXT: [[TMP80:%.*]] = and i64 [[TMP71]], 7
+; CHECK-INLINE-NEXT: [[TMP81:%.*]] = trunc i64 [[TMP80]] to i8
+; CHECK-INLINE-NEXT: [[TMP82:%.*]] = icmp sge i8 [[TMP81]], [[TMP77]]
+; CHECK-INLINE-NEXT: br i1 [[TMP82]], label [[TMP83:%.*]], label [[TMP84]]
+; CHECK-INLINE: 83:
+; CHECK-INLINE-NEXT: call void @__asan_report_store1(i64 [[TMP71]]) #[[ATTR8]]
+; CHECK-INLINE-NEXT: unreachable
+; CHECK-INLINE: 84:
+; CHECK-INLINE-NEXT: store volatile i8 0, ptr [[TMP73]], align 1
+; CHECK-INLINE-NEXT: invoke void @MayThrowFunc()
+; CHECK-INLINE-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[EHCLEANUP:%.*]]
+; CHECK-INLINE: invoke.cont:
+; CHECK-INLINE-NEXT: call void @DeInit(ptr [[TMP14]])
+; CHECK-INLINE-NEXT: [[TMP85:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-INLINE-NEXT: [[TMP86:%.*]] = load i64, ptr [[TMP0]], align 8
+; CHECK-INLINE-NEXT: call void @__asan_allocas_unpoison(i64 [[TMP86]], i64 [[TMP85]])
+; CHECK-INLINE-NEXT: store i64 1172321806, ptr [[TMP17]], align 8
+; CHECK-INLINE-NEXT: [[TMP87:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-INLINE-NEXT: br i1 [[TMP87]], label [[TMP88:%.*]], label [[TMP89:%.*]]
+; CHECK-INLINE: 88:
+; CHECK-INLINE-NEXT: call void @__asan_stack_free_8(i64 [[TMP7]], i64 8544)
+; CHECK-INLINE-NEXT: br label [[TMP91:%.*]]
+; CHECK-INLINE: 89:
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_00(i64 [[TMP23]], i64 4)
+; CHECK-INLINE-NEXT: [[TMP90:%.*]] = add i64 [[TMP23]], 1028
+; CHECK-INLINE-NEXT: call void @__asan_set_shadow_00(i64 [[TMP90]], i64 40)
+; CHECK-INLINE-NEXT: br label [[TMP91]]
+; CHECK-INLINE: 91:
+; CHECK-INLINE-NEXT: ret void
+; CHECK-INLINE: ehcleanup:
+; CHECK-INLINE-NEXT: [[TMP92:%.*]] = cleanuppad within none []
+; CHECK-INLINE-NEXT: unreachable
+;
+; CHECK-OUTLINE-LABEL: define void @FuncletPersonality(
+; CHECK-OUTLINE-SAME: ptr [[PTRPARAM:%.*]]) #[[ATTR4:[0-9]+]] personality ptr @__CxxFrameHandler3 {
+; CHECK-OUTLINE-NEXT: entry:
+; CHECK-OUTLINE-NEXT: [[TMP0:%.*]] = alloca i64, align 32
+; CHECK-OUTLINE-NEXT: store i64 0, ptr [[TMP0]], align 8
+; CHECK-OUTLINE-NEXT: [[TMP1:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
+; CHECK-OUTLINE-NEXT: [[ASAN_LOCAL_STACK_BASE:%.*]] = alloca i64, align 8
+; CHECK-OUTLINE-NEXT: [[TMP2:%.*]] = load i32, ptr @__asan_option_detect_stack_use_after_return, align 4
+; CHECK-OUTLINE-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
+; CHECK-OUTLINE-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP6:%.*]]
+; CHECK-OUTLINE: 4:
+; CHECK-OUTLINE-NEXT: [[TMP5:%.*]] = call i64 @__asan_stack_malloc_8(i64 8608)
+; CHECK-OUTLINE-NEXT: br label [[TMP6]]
+; CHECK-OUTLINE: 6:
+; CHECK-OUTLINE-NEXT: [[TMP7:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP5]], [[TMP4]] ]
+; CHECK-OUTLINE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
+; CHECK-OUTLINE-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP11:%.*]]
+; CHECK-OUTLINE: 9:
+; CHECK-OUTLINE-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 8608, align 32
+; CHECK-OUTLINE-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
+; CHECK-OUTLINE-NEXT: br label [[TMP11]]
+; CHECK-OUTLINE: 11:
+; CHECK-OUTLINE-NEXT: [[TMP12:%.*]] = phi i64 [ [[TMP7]], [[TMP6]] ], [ [[TMP10]], [[TMP9]] ]
+; CHECK-OUTLINE-NEXT: store i64 [[TMP12]], ptr [[ASAN_LOCAL_STACK_BASE]], align 8
+; CHECK-OUTLINE-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], 32
+; CHECK-OUTLINE-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-OUTLINE-NEXT: [[TMP15:%.*]] = add i64 [[TMP12]], 8528
+; CHECK-OUTLINE-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-OUTLINE-NEXT: [[TMP17:%.*]] = add i64 [[TMP12]], 8560
+; CHECK-OUTLINE-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+; CHECK-OUTLINE-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-OUTLINE-NEXT: store i64 1102416563, ptr [[TMP19]], align 8
+; CHECK-OUTLINE-NEXT: [[TMP20:%.*]] = add i64 [[TMP12]], 8
+; CHECK-OUTLINE-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-OUTLINE-NEXT: store i64 ptrtoint (ptr @___asan_gen_ to i64), ptr [[TMP21]], align 8
+; CHECK-OUTLINE-NEXT: [[TMP22:%.*]] = add i64 [[TMP12]], 16
+; CHECK-OUTLINE-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-OUTLINE-NEXT: store i64 ptrtoint (ptr @FuncletPersonality to i64), ptr [[TMP23]], align 8
+; CHECK-OUTLINE-NEXT: [[TMP24:%.*]] = lshr i64 [[TMP12]], 3
+; CHECK-OUTLINE-NEXT: [[TMP25:%.*]] = add i64 [[TMP24]], [[TMP1]]
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f1(i64 [[TMP25]], i64 4)
+; CHECK-OUTLINE-NEXT: [[TMP26:%.*]] = add i64 [[TMP25]], 1028
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f2(i64 [[TMP26]], i64 32)
+; CHECK-OUTLINE-NEXT: [[TMP27:%.*]] = add i64 [[TMP25]], 1060
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_04(i64 [[TMP27]], i64 1)
+; CHECK-OUTLINE-NEXT: [[TMP28:%.*]] = add i64 [[TMP25]], 1061
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f2(i64 [[TMP28]], i64 1)
+; CHECK-OUTLINE-NEXT: [[TMP29:%.*]] = add i64 [[TMP25]], 1062
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_04(i64 [[TMP29]], i64 1)
+; CHECK-OUTLINE-NEXT: [[TMP30:%.*]] = add i64 [[TMP25]], 1063
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f2(i64 [[TMP30]], i64 1)
+; CHECK-OUTLINE-NEXT: [[TMP31:%.*]] = add i64 [[TMP25]], 1064
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_04(i64 [[TMP31]], i64 1)
+; CHECK-OUTLINE-NEXT: [[TMP32:%.*]] = add i64 [[TMP25]], 1065
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f2(i64 [[TMP32]], i64 1)
+; CHECK-OUTLINE-NEXT: [[TMP33:%.*]] = add i64 [[TMP25]], 1066
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f8(i64 [[TMP33]], i64 1)
+; CHECK-OUTLINE-NEXT: [[TMP34:%.*]] = add i64 [[TMP25]], 1067
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f2(i64 [[TMP34]], i64 1)
+; CHECK-OUTLINE-NEXT: [[TMP35:%.*]] = add i64 [[TMP25]], 1068
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f8(i64 [[TMP35]], i64 1)
+; CHECK-OUTLINE-NEXT: [[TMP36:%.*]] = add i64 [[TMP25]], 1069
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f2(i64 [[TMP36]], i64 1)
+; CHECK-OUTLINE-NEXT: [[TMP37:%.*]] = add i64 [[TMP25]], 1071
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f3(i64 [[TMP37]], i64 5)
+; CHECK-OUTLINE-NEXT: [[TMP38:%.*]] = add i64 [[TMP25]], 1066
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_04(i64 [[TMP38]], i64 1)
+; CHECK-OUTLINE-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP16]])
+; CHECK-OUTLINE-NEXT: call void @__asan_store1(i64 [[TMP15]])
+; CHECK-OUTLINE-NEXT: store volatile i8 0, ptr [[TMP16]], align 1
+; CHECK-OUTLINE-NEXT: [[TMP39:%.*]] = add i64 [[TMP25]], 1066
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_f8(i64 [[TMP39]], i64 1)
+; CHECK-OUTLINE-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP16]])
+; CHECK-OUTLINE-NEXT: call void @__asan_store8(i64 [[TMP17]])
+; CHECK-OUTLINE-NEXT: store volatile i64 0, ptr [[TMP18]], align 8
+; CHECK-OUTLINE-NEXT: [[TMPCOPYI64:%.*]] = load i64, ptr [[TMP18]], align 8
+; CHECK-OUTLINE-NEXT: [[TMP40:%.*]] = and i64 [[TMPCOPYI64]], 31
+; CHECK-OUTLINE-NEXT: [[TMP41:%.*]] = sub i64 32, [[TMP40]]
+; CHECK-OUTLINE-NEXT: [[TMP42:%.*]] = icmp ne i64 [[TMP41]], 32
+; CHECK-OUTLINE-NEXT: [[TMP43:%.*]] = select i1 [[TMP42]], i64 [[TMP41]], i64 0
+; CHECK-OUTLINE-NEXT: [[TMP44:%.*]] = add i64 64, [[TMP43]]
+; CHECK-OUTLINE-NEXT: [[TMP45:%.*]] = add i64 [[TMPCOPYI64]], [[TMP44]]
+; CHECK-OUTLINE-NEXT: [[TMP46:%.*]] = alloca i8, i64 [[TMP45]], align 32
+; CHECK-OUTLINE-NEXT: [[TMP47:%.*]] = ptrtoint ptr [[TMP46]] to i64
+; CHECK-OUTLINE-NEXT: [[TMP48:%.*]] = add i64 [[TMP47]], 32
+; CHECK-OUTLINE-NEXT: call void @__asan_alloca_poison(i64 [[TMP48]], i64 [[TMPCOPYI64]])
+; CHECK-OUTLINE-NEXT: [[TMP49:%.*]] = ptrtoint ptr [[TMP46]] to i64
+; CHECK-OUTLINE-NEXT: store i64 [[TMP49]], ptr [[TMP0]], align 8
+; CHECK-OUTLINE-NEXT: [[TMP50:%.*]] = inttoptr i64 [[TMP48]] to ptr
+; CHECK-OUTLINE-NEXT: call void @__asan_store1(i64 [[TMP48]])
+; CHECK-OUTLINE-NEXT: store volatile i8 0, ptr [[TMP50]], align 1
+; CHECK-OUTLINE-NEXT: invoke void @MayThrowFunc()
+; CHECK-OUTLINE-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[EHCLEANUP:%.*]]
+; CHECK-OUTLINE: invoke.cont:
+; CHECK-OUTLINE-NEXT: call void @DeInit(ptr [[TMP14]])
+; CHECK-OUTLINE-NEXT: [[TMP51:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-OUTLINE-NEXT: [[TMP52:%.*]] = load i64, ptr [[TMP0]], align 8
+; CHECK-OUTLINE-NEXT: call void @__asan_allocas_unpoison(i64 [[TMP52]], i64 [[TMP51]])
+; CHECK-OUTLINE-NEXT: store i64 1172321806, ptr [[TMP19]], align 8
+; CHECK-OUTLINE-NEXT: [[TMP53:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-OUTLINE-NEXT: br i1 [[TMP53]], label [[TMP54:%.*]], label [[TMP55:%.*]]
+; CHECK-OUTLINE: 54:
+; CHECK-OUTLINE-NEXT: call void @__asan_stack_free_8(i64 [[TMP7]], i64 8608)
+; CHECK-OUTLINE-NEXT: br label [[TMP58:%.*]]
+; CHECK-OUTLINE: 55:
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_00(i64 [[TMP25]], i64 4)
+; CHECK-OUTLINE-NEXT: [[TMP56:%.*]] = add i64 [[TMP25]], 1028
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_00(i64 [[TMP56]], i64 42)
+; CHECK-OUTLINE-NEXT: [[TMP57:%.*]] = add i64 [[TMP25]], 1071
+; CHECK-OUTLINE-NEXT: call void @__asan_set_shadow_00(i64 [[TMP57]], i64 5)
+; CHECK-OUTLINE-NEXT: br label [[TMP58]]
+; CHECK-OUTLINE: 58:
+; CHECK-OUTLINE-NEXT: ret void
+; CHECK-OUTLINE: ehcleanup:
+; CHECK-OUTLINE-NEXT: [[TMP59:%.*]] = cleanuppad within none []
+; CHECK-OUTLINE-NEXT: unreachable
+;
+
+
+entry:
+ ; Large enough local alloca to have asan generate a __asan_stack_free_#() call
+ %largeObj = alloca [2048 x i32], align 16
+ %tmpInt1 = alloca i32, align 4
+ %tmpInt2 = alloca i32, align 4
+ %tmpInt3 = alloca i32, align 4
+
+ ; Creating %lifetimeInt and %lifetimeArr, and managing their lifetimes
+ ; to make asan generate stack poisoning calls
+ %lifetimeInt = alloca i32, align 4
+ call void @llvm.lifetime.start.p0(i64 4, ptr %lifetimeInt)
+ store volatile i8 0, ptr %lifetimeInt
+ call void @llvm.lifetime.end.p0(i64 4, ptr %lifetimeInt)
+ %lifetimeArr = alloca i32, align 4
+
+ ; Dynamic alloca to generate a @__asan_allocas_unpoison call in ehcleanup
+ %tmpVolatilei64 = alloca i64, align 8
+ store volatile i64 0, ptr %tmpVolatilei64, align 8
+ %tmpCopyi64 = load i64, ptr %tmpVolatilei64, align 8
+ %tmpVolatilei8 = alloca i8, i64 %tmpCopyi64, align 32
+ store volatile i8 0, ptr %tmpVolatilei8
+
+ invoke void @MayThrowFunc()
+ to label %invoke.cont unwind label %ehcleanup
+invoke.cont: ; preds = %entry
+ call void @DeInit(ptr %largeObj)
+ ret void
+
+ehcleanup: ; preds = %entry
+ %0 = cleanuppad within none []
+
+ ; Make asan add a call to __asan_unpoison_stack_memory
+ call void @llvm.lifetime.start.p0(i64 4, ptr %lifetimeArr)
+ ; Make asan add a call to __asan_report_store1
+ store volatile i8 0, ptr %lifetimeArr
+ ; Make asan add a call to __asan_poison_stack_memory
+ call void @llvm.lifetime.end.p0(i64 4, ptr %lifetimeArr)
+
+ call void @DeInit(ptr %largeObj) [ "funclet"(token %0) ]
+ call void @llvm.memset.p0.i64(ptr align 4 %tmpInt1, i8 0, i64 4, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmpInt2, ptr align 4 %tmpInt1, i64 4, i1 false)
+ call void @llvm.memmove.p0.p0.i64(ptr align 4 %tmpInt3, ptr align 4 %tmpInt1, i64 4, i1 false)
+ %cmpAddr = icmp ule ptr %tmpInt1, %tmpInt2
+ %addr1 = ptrtoint ptr %tmpInt1 to i64
+ %addr2 = ptrtoint ptr %tmpInt2 to i64
+ %subAddr = sub i64 %addr1, %addr2
+
+ store i64 0, ptr %ptrParam, align 1
+
+ %cmp = icmp ne i64 %subAddr, 0
+ br i1 %cmp, label %ehexit, label %noreturncall
+
+noreturncall:
+ call void @NoReturn(ptr null, ptr null) noreturn [ "funclet"(token %0) ]
+ unreachable
+
+ehexit:
+ cleanupret from %0 unwind to caller
+
+; Ensure unreachable basic block doesn't make the compiler assert, as it's a special case for coloring computation.
+nopredecessor:
+ call void @llvm.memset.p0.i64(ptr align 4 %tmpInt1, i8 0, i64 4, i1 false)
+ unreachable
+}
+
+; Non-Windows personality, ensure no funclet gets attached to asan runtime call.
+define void @OtherPersonality(ptr %ptrParam) sanitize_address personality ptr @dummyPersonality {
+; CHECK-LABEL: define void @OtherPersonality(
+; CHECK-SAME: ptr [[PTRPARAM:%.*]]) #[[ATTR4:[0-9]+]] personality ptr @dummyPersonality {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
+; CHECK-NEXT: [[ASAN_LOCAL_STACK_BASE:%.*]] = alloca i64, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__asan_option_detect_stack_use_after_return, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP5:%.*]]
+; CHECK: 3:
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @__asan_stack_malloc_0(i64 64)
+; CHECK-NEXT: br label [[TMP5]]
+; CHECK: 5:
+; CHECK-NEXT: [[TMP6:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[TMP4]], [[TMP3]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[TMP6]], 0
+; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP10:%.*]]
+; CHECK: 8:
+; CHECK-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32
+; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
+; CHECK-NEXT: br label [[TMP10]]
+; CHECK: 10:
+; CHECK-NEXT: [[TMP11:%.*]] = phi i64 [ [[TMP6]], [[TMP5]] ], [ [[TMP9]], [[TMP8]] ]
+; CHECK-NEXT: store i64 [[TMP11]], ptr [[ASAN_LOCAL_STACK_BASE]], align 8
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 32
+; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: store i64 1102416563, ptr [[TMP14]], align 8
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], 8
+; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT: store i64 ptrtoint (ptr @___asan_gen_.1 to i64), ptr [[TMP16]], align 8
+; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[TMP11]], 16
+; CHECK-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+; CHECK-NEXT: store i64 ptrtoint (ptr @OtherPersonality to i64), ptr [[TMP18]], align 8
+; CHECK-NEXT: [[TMP19:%.*]] = lshr i64 [[TMP11]], 3
+; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], [[TMP0]]
+; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[TMP20]], 0
+; CHECK-NEXT: call void @__asan_set_shadow_f1(i64 [[TMP21]], i64 4)
+; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[TMP20]], 4
+; CHECK-NEXT: call void @__asan_set_shadow_04(i64 [[TMP22]], i64 1)
+; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[TMP20]], 5
+; CHECK-NEXT: call void @__asan_set_shadow_f3(i64 [[TMP23]], i64 3)
+; CHECK-NEXT: invoke void @MayThrowFunc()
+; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[EHCLEANUP:%.*]]
+; CHECK: invoke.cont:
+; CHECK-NEXT: store i64 1172321806, ptr [[TMP14]], align 8
+; CHECK-NEXT: [[TMP24:%.*]] = icmp ne i64 [[TMP6]], 0
+; CHECK-NEXT: br i1 [[TMP24]], label [[TMP25:%.*]], label [[TMP26:%.*]]
+; CHECK: 25:
+; CHECK-NEXT: call void @__asan_stack_free_0(i64 [[TMP6]], i64 64)
+; CHECK-NEXT: br label [[TMP28:%.*]]
+; CHECK: 26:
+; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP20]], 0
+; CHECK-NEXT: call void @__asan_set_shadow_00(i64 [[TMP27]], i64 8)
+; CHECK-NEXT: br label [[TMP28]]
+; CHECK: 28:
+; CHECK-NEXT: ret void
+; CHECK: ehcleanup:
+; CHECK-NEXT: [[TMP29:%.*]] = cleanuppad within none []
+; CHECK-NEXT: [[TMP30:%.*]] = call ptr @__asan_memset(ptr [[TMP13]], i32 0, i64 4)
+; CHECK-NEXT: store i64 1172321806, ptr [[TMP14]], align 8
+; CHECK-NEXT: [[TMP31:%.*]] = icmp ne i64 [[TMP6]], 0
+; CHECK-NEXT: br i1 [[TMP31]], label [[TMP32:%.*]], label [[TMP33:%.*]]
+; CHECK: 32:
+; CHECK-NEXT: call void @__asan_stack_free_0(i64 [[TMP6]], i64 64)
+; CHECK-NEXT: br label [[TMP35:%.*]]
+; CHECK: 33:
+; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[TMP20]], 0
+; CHECK-NEXT: call void @__asan_set_shadow_00(i64 [[TMP34]], i64 8)
+; CHECK-NEXT: br label [[TMP35]]
+; CHECK: 35:
+; CHECK-NEXT: cleanupret from [[TMP29]] unwind to caller
+;
+entry:
+ %tmpInt = alloca i32, align 4
+ invoke void @MayThrowFunc()
+ to label %invoke.cont unwind label %ehcleanup
+invoke.cont: ; preds = %entry
+ ret void
+
+ehcleanup: ; preds = %entry
+ %0 = cleanuppad within none []
+ call void @llvm.memset.p0.i64(ptr align 4 %tmpInt, i8 0, i64 4, i1 false)
+ cleanupret from %0 unwind to caller
+}
+;.
+; CHECK-INLINE: [[PROF0]] = !{!"branch_weights", i32 1, i32 100000}
+;.
diff --git a/llvm/test/MC/AArch64/cfi-bad-nesting-darwin.s b/llvm/test/MC/AArch64/cfi-bad-nesting-darwin.s
index 235b7d448099..3a5af86defc5 100644
--- a/llvm/test/MC/AArch64/cfi-bad-nesting-darwin.s
+++ b/llvm/test/MC/AArch64/cfi-bad-nesting-darwin.s
@@ -8,6 +8,10 @@
.p2align 2
_locomotive:
.cfi_startproc
+ ; An N_ALT_ENTRY symbol can be defined in the middle of a subsection, so
+ ; these are opted out of the .cfi_{start,end}proc nesting check.
+ .alt_entry _engineer
+_engineer:
ret
; It is invalid to have a non-private label between .cfi_startproc and
@@ -17,7 +21,7 @@ _locomotive:
.p2align 2
_caboose:
; DARWIN: [[#@LINE-1]]:1: error: non-private labels cannot appear between .cfi_startproc / .cfi_endproc pairs
-; DARWIN: [[#@LINE-10]]:2: error: previous .cfi_startproc was here
+; DARWIN: [[#@LINE-14]]:2: error: previous .cfi_startproc was here
ret
.cfi_endproc
diff --git a/llvm/test/MC/AMDGPU/gfx1011_err.s b/llvm/test/MC/AMDGPU/gfx1011_err.s
index 4b37aaf221e3..a86e48a29c78 100644
--- a/llvm/test/MC/AMDGPU/gfx1011_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1011_err.s
@@ -17,7 +17,7 @@ v_dot8c_i32_i4 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
s_getreg_b32 s2, hwreg(HW_REG_SHADER_CYCLES)
-// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
v_fma_legacy_f32 v0, v1, v2, v3
// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1030_err.s b/llvm/test/MC/AMDGPU/gfx1030_err.s
index ba8784a39c36..f4ab5fe5b14a 100644
--- a/llvm/test/MC/AMDGPU/gfx1030_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1030_err.s
@@ -25,7 +25,7 @@ s_get_waveid_in_workgroup s0
// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
s_getreg_b32 s2, hwreg(HW_REG_XNACK_MASK)
-// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
v_mac_f32 v0, v1, v2
// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx10_err_pos.s b/llvm/test/MC/AMDGPU/gfx10_err_pos.s
index 1d34f00ee0f9..c2679db3b2ac 100644
--- a/llvm/test/MC/AMDGPU/gfx10_err_pos.s
+++ b/llvm/test/MC/AMDGPU/gfx10_err_pos.s
@@ -448,7 +448,7 @@ ds_swizzle_b32 v8, v2 offset:SWZ(QUAD_PERM, 0, 1, 2, 3)
// expected a hwreg macro or an absolute expression
s_setreg_b32 undef, s2
-// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: expected a hwreg macro or an absolute expression
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: expected a hwreg macro, structured immediate or an absolute expression
// CHECK-NEXT:{{^}}s_setreg_b32 undef, s2
// CHECK-NEXT:{{^}} ^
@@ -621,10 +621,10 @@ s_setreg_b32 hwreg(3,0,33), s2
// CHECK-NEXT:{{^}} ^
//==============================================================================
-// invalid code of hardware register: only 6-bit values are legal
+// invalid hardware register: only 6-bit values are legal
s_setreg_b32 hwreg(0x40), s2
-// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: invalid code of hardware register: only 6-bit values are legal
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: only 6-bit values are legal
// CHECK-NEXT:{{^}}s_setreg_b32 hwreg(0x40), s2
// CHECK-NEXT:{{^}} ^
@@ -1158,10 +1158,10 @@ v_movrels_b32_sdwa v0, shared_base
// CHECK-NEXT:{{^}} ^
//==============================================================================
-// specified hardware register is not supported on this GPU
+// invalid hardware register: not supported on this GPU
s_getreg_b32 s2, hwreg(HW_REG_SHADER_CYCLES)
-// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
// CHECK-NEXT:{{^}}s_getreg_b32 s2, hwreg(HW_REG_SHADER_CYCLES)
// CHECK-NEXT:{{^}} ^
diff --git a/llvm/test/MC/AMDGPU/gfx11_unsupported.s b/llvm/test/MC/AMDGPU/gfx11_unsupported.s
index bfca71ae3a01..f447263c3022 100644
--- a/llvm/test/MC/AMDGPU/gfx11_unsupported.s
+++ b/llvm/test/MC/AMDGPU/gfx11_unsupported.s
@@ -2052,3 +2052,15 @@ global_atomic_cond_sub_u32 v0, v2, s[0:1] offset:64
global_atomic_ordered_add_b64 v0, v[2:3], s[0:1] offset:64
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+ds_subrev_u32 v1, v2
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+ds_subrev_rtn_u32 v5, v1, v2
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+ds_subrev_u64 v1, v[2:3]
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+ds_subrev_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_ds_alias.s b/llvm/test/MC/AMDGPU/gfx12_asm_ds_alias.s
index aa063c8800aa..057e99330bca 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_ds_alias.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_ds_alias.s
@@ -27,5 +27,11 @@ ds_min_rtn_f64 v[5:6], v1, v[2:3]
ds_subrev_u32 v1, v2
// GFX12: ds_rsub_u32 v1, v2 ; encoding: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+ds_subrev_rtn_u32 v5, v1, v2
+// GFX12: ds_rsub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+
ds_subrev_u64 v1, v[2:3]
// GFX12: ds_rsub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_subrev_rtn_u64 v[5:6], v1, v[2:3]
+// GFX12: ds_rsub_rtn_u64 v[5:6], v1, v[2:3] ; encoding: [0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x05]
diff --git a/llvm/test/MC/AMDGPU/gfx940_asm_features.s b/llvm/test/MC/AMDGPU/gfx940_asm_features.s
index 5ee9480677be..e208b6cf903d 100644
--- a/llvm/test/MC/AMDGPU/gfx940_asm_features.s
+++ b/llvm/test/MC/AMDGPU/gfx940_asm_features.s
@@ -197,23 +197,23 @@ scratch_load_lds_ushort v2, off
// GFX940: scratch_load_lds_sshort v2, off ; encoding: [0x00,0x60,0xa4,0xdc,0x02,0x00,0x7f,0x00]
scratch_load_lds_sshort v2, off
-// NOT-GFX940: :[[@LINE+2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// NOT-GFX940: :[[@LINE+2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
// GFX940: s_getreg_b32 s1, hwreg(HW_REG_XCC_ID) ; encoding: [0x14,0xf8,0x81,0xb8]
s_getreg_b32 s1, hwreg(HW_REG_XCC_ID)
-// NOT-GFX940: :[[@LINE+2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// NOT-GFX940: :[[@LINE+2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
// GFX940: s_getreg_b32 s1, hwreg(HW_REG_SQ_PERF_SNAPSHOT_DATA) ; encoding: [0x15,0xf8,0x81,0xb8]
s_getreg_b32 s1, hwreg(HW_REG_SQ_PERF_SNAPSHOT_DATA)
-// NOT-GFX940: :[[@LINE+2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// NOT-GFX940: :[[@LINE+2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
// GFX940: s_getreg_b32 s1, hwreg(HW_REG_SQ_PERF_SNAPSHOT_DATA1) ; encoding: [0x16,0xf8,0x81,0xb8]
s_getreg_b32 s1, hwreg(HW_REG_SQ_PERF_SNAPSHOT_DATA1)
-// NOT-GFX940: :[[@LINE+2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// NOT-GFX940: :[[@LINE+2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
// GFX940: s_getreg_b32 s1, hwreg(HW_REG_SQ_PERF_SNAPSHOT_PC_LO) ; encoding: [0x17,0xf8,0x81,0xb8]
s_getreg_b32 s1, hwreg(HW_REG_SQ_PERF_SNAPSHOT_PC_LO)
-// NOT-GFX940: :[[@LINE+2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// NOT-GFX940: :[[@LINE+2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
// GFX940: s_getreg_b32 s1, hwreg(HW_REG_SQ_PERF_SNAPSHOT_PC_HI) ; encoding: [0x18,0xf8,0x81,0xb8]
s_getreg_b32 s1, hwreg(HW_REG_SQ_PERF_SNAPSHOT_PC_HI)
diff --git a/llvm/test/MC/AMDGPU/gfx940_err.s b/llvm/test/MC/AMDGPU/gfx940_err.s
index 515b89513a80..000f3decf960 100644
--- a/llvm/test/MC/AMDGPU/gfx940_err.s
+++ b/llvm/test/MC/AMDGPU/gfx940_err.s
@@ -97,22 +97,22 @@ v_cvt_pk_fp8_f32 v1, v2, v3 mul:2
// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
s_getreg_b32 s1, hwreg(HW_REG_FLAT_SCR_LO)
-// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
s_getreg_b32 s1, hwreg(HW_REG_FLAT_SCR_HI)
-// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
s_getreg_b32 s1, hwreg(HW_REG_XNACK_MASK)
-// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
s_getreg_b32 s1, hwreg(HW_REG_HW_ID1)
-// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
s_getreg_b32 s1, hwreg(HW_REG_HW_ID2)
-// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
s_getreg_b32 s1, hwreg(HW_REG_POPS_PACKER)
-// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
ds_ordered_count v5, v1 offset:65535 gds
// GFX940: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/sopk-err.s b/llvm/test/MC/AMDGPU/sopk-err.s
index 504ee1d11cbc..cd92343b0e7f 100644
--- a/llvm/test/MC/AMDGPU/sopk-err.s
+++ b/llvm/test/MC/AMDGPU/sopk-err.s
@@ -5,48 +5,127 @@
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1010 -show-encoding %s | FileCheck -check-prefix=GFX10 %s
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -show-encoding %s | FileCheck -check-prefix=GFX11 %s
-// RUN: not llvm-mc -triple=amdgcn %s 2>&1 | FileCheck -check-prefixes=GCN,SICIVI-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=tahiti %s 2>&1 | FileCheck -check-prefixes=GCN,SICIVI-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=tonga %s 2>&1 | FileCheck -check-prefixes=GCN,SICIVI-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx900 %s 2>&1 | FileCheck -check-prefixes=GCN,GFX9-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1010 %s 2>&1 | FileCheck -check-prefixes=GCN,GFX10-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 %s 2>&1 | FileCheck -check-prefixes=GCN,GFX11-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn %s 2>&1 | FileCheck -check-prefixes=GCN,SICIVI-ERR --implicit-check-not=error: --strict-whitespace %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=tahiti %s 2>&1 | FileCheck -check-prefixes=GCN,SICIVI-ERR --implicit-check-not=error: --strict-whitespace %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=tonga %s 2>&1 | FileCheck -check-prefixes=GCN,SICIVI-ERR --implicit-check-not=error: --strict-whitespace %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx900 %s 2>&1 | FileCheck -check-prefixes=GCN,GFX9-ERR --implicit-check-not=error: --strict-whitespace %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1010 %s 2>&1 | FileCheck -check-prefixes=GCN,GFX10-ERR --implicit-check-not=error: --strict-whitespace %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 %s 2>&1 | FileCheck -check-prefixes=GCN,GFX11-ERR --implicit-check-not=error: --strict-whitespace %s
s_setreg_b32 0x1f803, s2
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid immediate: only 16-bit values are legal
+// GCN-NEXT: {{^}}s_setreg_b32 0x1f803, s2
+// GCN-NEXT: {{^}} ^
s_setreg_b32 typo(0x40), s2
-// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: expected a hwreg macro or an absolute expression
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: expected a hwreg macro, structured immediate or an absolute expression
+// GCN-NEXT: {{^}}s_setreg_b32 typo(0x40), s2
+// GCN-NEXT: {{^}} ^
s_setreg_b32 hwreg(0x40), s2
-// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid code of hardware register: only 6-bit values are legal
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: only 6-bit values are legal
+// GCN-NEXT: {{^}}s_setreg_b32 hwreg(0x40), s2
+// GCN-NEXT: {{^}} ^
+
+s_setreg_b32 {id: 0x40}, s2
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: only 6-bit values are legal
+// GCN-NEXT: {{^}}s_setreg_b32 {id: 0x40}, s2
+// GCN-NEXT: {{^}} ^
s_setreg_b32 hwreg(HW_REG_WRONG), s2
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: expected a register name or an absolute expression
+// GCN-NEXT: {{^}}s_setreg_b32 hwreg(HW_REG_WRONG), s2
+// GCN-NEXT: {{^}} ^
s_setreg_b32 hwreg(1 2,3), s2
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: expected a comma or a closing parenthesis
+// GCN-NEXT: {{^}}s_setreg_b32 hwreg(1 2,3), s2
+// GCN-NEXT: {{^}} ^
s_setreg_b32 hwreg(1,2 3), s2
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: expected a comma
+// GCN-NEXT: {{^}}s_setreg_b32 hwreg(1,2 3), s2
+// GCN-NEXT: {{^}} ^
s_setreg_b32 hwreg(1,2,3, s2
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: expected a closing parenthesis
+// GCN-NEXT: {{^}}s_setreg_b32 hwreg(1,2,3, s2
+// GCN-NEXT: {{^}} ^
+
+s_setreg_b32 {id: 1 offset: 2, size: 3}, s2
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: comma or closing brace expected
+// GCN-NEXT: {{^}}s_setreg_b32 {id: 1 offset: 2, size: 3}, s2
+// GCN-NEXT: {{^}} ^
+
+s_setreg_b32 {id: 1 offset: 2, size: 3}, s2
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: comma or closing brace expected
+// GCN-NEXT: {{^}}s_setreg_b32 {id: 1 offset: 2, size: 3}, s2
+// GCN-NEXT: {{^}} ^
+
+s_setreg_b32 {id 1, offset: 2, size: 3}, s2
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: colon expected
+// GCN-NEXT: {{^}}s_setreg_b32 {id 1, offset: 2, size: 3}, s2
+// GCN-NEXT: {{^}} ^
+
+s_setreg_b32 {id: 1, offset: 2, size: 3, s2
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: colon expected
+// GCN-NEXT: {{^}}s_setreg_b32 {id: 1, offset: 2, size: 3, s2
+// GCN-NEXT: {{^}} ^
+
+s_setreg_b32 {id: 1, offset: 2, blah: 3}, s2
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: unknown field
+// GCN-NEXT: {{^}}s_setreg_b32 {id: 1, offset: 2, blah: 3}, s2
+// GCN-NEXT: {{^}} ^
+
+s_setreg_b32 {id: 1, id: 2}, s2
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: duplicate field
+// GCN-NEXT: {{^}}s_setreg_b32 {id: 1, id: 2}, s2
+// GCN-NEXT: {{^}} ^
s_setreg_b32 hwreg(3,32,32), s2
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid bit offset: only 5-bit values are legal
+// GCN-NEXT: {{^}}s_setreg_b32 hwreg(3,32,32), s2
+// GCN-NEXT: {{^}} ^
+
+s_setreg_b32 {id: 3, offset: 32, size: 32}, s2
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid bit offset: only 5-bit values are legal
+// GCN-NEXT: {{^}}s_setreg_b32 {id: 3, offset: 32, size: 32}, s2
+// GCN-NEXT: {{^}} ^
s_setreg_b32 hwreg(3,0,33), s2
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid bitfield width: only values from 1 to 32 are legal
+// GCN-NEXT: {{^}}s_setreg_b32 hwreg(3,0,33), s2
+// GCN-NEXT: {{^}} ^
+
+s_setreg_b32 {id: 3, offset: 0, size: 33}, s2
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid bitfield width: only values from 1 to 32 are legal
+// GCN-NEXT: {{^}}s_setreg_b32 {id: 3, offset: 0, size: 33}, s2
+// GCN-NEXT: {{^}} ^
s_setreg_imm32_b32 0x1f803, 0xff
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid immediate: only 16-bit values are legal
+// GCN-NEXT: {{^}}s_setreg_imm32_b32 0x1f803, 0xff
+// GCN-NEXT: {{^}} ^
s_setreg_imm32_b32 hwreg(3,0,33), 0xff
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid bitfield width: only values from 1 to 32 are legal
+// GCN-NEXT: {{^}}s_setreg_imm32_b32 hwreg(3,0,33), 0xff
+// GCN-NEXT: {{^}} ^
+
+s_setreg_imm32_b32 {id: 3, offset: 0, size: 33}, 0xff
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid bitfield width: only values from 1 to 32 are legal
+// GCN-NEXT: {{^}}s_setreg_imm32_b32 {id: 3, offset: 0, size: 33}, 0xff
+// GCN-NEXT: {{^}} ^
s_getreg_b32 s2, hwreg(3,32,32)
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid bit offset: only 5-bit values are legal
+// GCN-NEXT: {{^}}s_getreg_b32 s2, hwreg(3,32,32)
+// GCN-NEXT: {{^}} ^
+
+s_getreg_b32 s2, {id: 3, offset: 32, size: 32}
+// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid bit offset: only 5-bit values are legal
+// GCN-NEXT: {{^}}s_getreg_b32 s2, {id: 3, offset: 32, size: 32}
+// GCN-NEXT: {{^}} ^
s_cbranch_i_fork s[2:3], 0x6
// SICI: s_cbranch_i_fork s[2:3], 6 ; encoding: [0x06,0x00,0x82,0xb8]
@@ -57,69 +136,109 @@ s_cbranch_i_fork s[2:3], 0x6
s_getreg_b32 s2, hwreg(HW_REG_SH_MEM_BASES)
// GFX10: s_getreg_b32 s2, hwreg(HW_REG_SH_MEM_BASES) ; encoding: [0x0f,0xf8,0x02,0xb9]
-// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// SICIVI-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_SH_MEM_BASES)
+// SICIVI-ERR-NEXT: {{^}} ^
// GFX9: s_getreg_b32 s2, hwreg(HW_REG_SH_MEM_BASES) ; encoding: [0x0f,0xf8,0x82,0xb8]
// GFX11: s_getreg_b32 s2, hwreg(HW_REG_SH_MEM_BASES) ; encoding: [0x0f,0xf8,0x82,0xb8]
s_getreg_b32 s2, hwreg(HW_REG_TBA_LO)
// GFX10: s_getreg_b32 s2, hwreg(HW_REG_TBA_LO) ; encoding: [0x10,0xf8,0x02,0xb9]
-// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// GFX11-ERR: :[[@LINE-3]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// SICIVI-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_TBA_LO)
+// SICIVI-ERR-NEXT: {{^}} ^
+// GFX11-ERR: :[[@LINE-5]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX11-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_TBA_LO)
+// GFX11-ERR-NEXT: {{^}} ^
// GFX9: s_getreg_b32 s2, hwreg(HW_REG_TBA_LO) ; encoding: [0x10,0xf8,0x82,0xb8]
s_getreg_b32 s2, hwreg(HW_REG_TBA_HI)
// GFX10: s_getreg_b32 s2, hwreg(HW_REG_TBA_HI) ; encoding: [0x11,0xf8,0x02,0xb9]
-// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// GFX11-ERR: :[[@LINE-3]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// SICIVI-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_TBA_HI)
+// SICIVI-ERR-NEXT: {{^}} ^
+// GFX11-ERR: :[[@LINE-5]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX11-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_TBA_HI)
+// GFX11-ERR-NEXT: {{^}} ^
// GFX9: s_getreg_b32 s2, hwreg(HW_REG_TBA_HI) ; encoding: [0x11,0xf8,0x82,0xb8]
s_getreg_b32 s2, hwreg(HW_REG_TMA_LO)
// GFX10: s_getreg_b32 s2, hwreg(HW_REG_TMA_LO) ; encoding: [0x12,0xf8,0x02,0xb9]
-// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// GFX11-ERR: :[[@LINE-3]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// SICIVI-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_TMA_LO)
+// SICIVI-ERR-NEXT: {{^}} ^
+// GFX11-ERR: :[[@LINE-5]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX11-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_TMA_LO)
+// GFX11-ERR-NEXT: {{^}} ^
// GFX9: s_getreg_b32 s2, hwreg(HW_REG_TMA_LO) ; encoding: [0x12,0xf8,0x82,0xb8]
s_getreg_b32 s2, hwreg(HW_REG_TMA_HI)
// GFX10: s_getreg_b32 s2, hwreg(HW_REG_TMA_HI) ; encoding: [0x13,0xf8,0x02,0xb9]
-// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// GFX11-ERR: :[[@LINE-3]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// SICIVI-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_TMA_HI)
+// SICIVI-ERR-NEXT: {{^}} ^
+// GFX11-ERR: :[[@LINE-5]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX11-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_TMA_HI)
+// GFX11-ERR-NEXT: {{^}} ^
// GFX9: s_getreg_b32 s2, hwreg(HW_REG_TMA_HI) ; encoding: [0x13,0xf8,0x82,0xb8]
s_getreg_b32 s2, hwreg(HW_REG_FLAT_SCR_LO)
// GFX10: s_getreg_b32 s2, hwreg(HW_REG_FLAT_SCR_LO) ; encoding: [0x14,0xf8,0x02,0xb9]
-// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// GFX9-ERR: :[[@LINE-3]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// SICIVI-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_FLAT_SCR_LO)
+// SICIVI-ERR-NEXT: {{^}} ^
+// GFX9-ERR: :[[@LINE-5]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX9-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_FLAT_SCR_LO)
+// GFX9-ERR-NEXT: {{^}} ^
// GFX11: s_getreg_b32 s2, hwreg(HW_REG_FLAT_SCR_LO) ; encoding: [0x14,0xf8,0x82,0xb8]
s_getreg_b32 s2, hwreg(HW_REG_FLAT_SCR_HI)
// GFX10: s_getreg_b32 s2, hwreg(HW_REG_FLAT_SCR_HI) ; encoding: [0x15,0xf8,0x02,0xb9]
-// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// GFX9-ERR: :[[@LINE-3]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// SICIVI-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_FLAT_SCR_HI)
+// SICIVI-ERR-NEXT: {{^}} ^
+// GFX9-ERR: :[[@LINE-5]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX9-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_FLAT_SCR_HI)
+// GFX9-ERR-NEXT: {{^}} ^
// GFX11: s_getreg_b32 s2, hwreg(HW_REG_FLAT_SCR_HI) ; encoding: [0x15,0xf8,0x82,0xb8]
s_getreg_b32 s2, hwreg(HW_REG_XNACK_MASK)
// GFX10: s_getreg_b32 s2, hwreg(HW_REG_XNACK_MASK) ; encoding: [0x16,0xf8,0x02,0xb9]
-// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// GFX9-ERR: :[[@LINE-3]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// GFX11-ERR: :[[@LINE-4]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// SICIVI-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_XNACK_MASK)
+// SICIVI-ERR-NEXT: {{^}} ^
+// GFX9-ERR: :[[@LINE-5]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX11-ERR: :[[@LINE-6]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
s_getreg_b32 s2, hwreg(HW_REG_POPS_PACKER)
// GFX10: s_getreg_b32 s2, hwreg(HW_REG_POPS_PACKER) ; encoding: [0x19,0xf8,0x02,0xb9]
-// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// GFX9-ERR: :[[@LINE-3]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// GFX11-ERR: :[[@LINE-4]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// SICIVI-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// SICIVI-ERR-NEXT: {{^}}s_getreg_b32 s2, hwreg(HW_REG_POPS_PACKER)
+// SICIVI-ERR-NEXT: {{^}} ^
+// GFX9-ERR: :[[@LINE-5]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX11-ERR: :[[@LINE-6]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
s_cmpk_le_u32 s2, -1
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GCN-NEXT: {{^}}s_cmpk_le_u32 s2, -1
+// GCN-NEXT: {{^}} ^
s_cmpk_le_u32 s2, 0x1ffff
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GCN-NEXT: {{^}}s_cmpk_le_u32 s2, 0x1ffff
+// GCN-NEXT: {{^}} ^
s_cmpk_le_u32 s2, 0x10000
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GCN-NEXT: {{^}}s_cmpk_le_u32 s2, 0x10000
+// GCN-NEXT: {{^}} ^
s_mulk_i32 s2, 0xFFFFFFFFFFFF0000
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GCN-NEXT: {{^}}s_mulk_i32 s2, 0xFFFFFFFFFFFF0000
+// GCN-NEXT: {{^}} ^
s_mulk_i32 s2, 0x10000
// GCN: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GCN-NEXT: {{^}}s_mulk_i32 s2, 0x10000
+// GCN-NEXT: {{^}} ^
diff --git a/llvm/test/MC/AMDGPU/sopk.s b/llvm/test/MC/AMDGPU/sopk.s
index 2b20c35aa771..c912b83ca61c 100644
--- a/llvm/test/MC/AMDGPU/sopk.s
+++ b/llvm/test/MC/AMDGPU/sopk.s
@@ -158,6 +158,12 @@ s_getreg_b32 s2, hwreg(51, 1, 31)
// GFX10: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x02,0xb9]
// GFX11: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x82,0xb8]
+s_getreg_b32 s2, {id: 51, offset: 1, size: 31}
+// SICI: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x02,0xb9]
+// VI9: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x82,0xb8]
+// GFX10: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x02,0xb9]
+// GFX11: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x82,0xb8]
+
// HW register code of unknown HW register, default offset/width
s_getreg_b32 s2, hwreg(51)
// SICI: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x02,0xb9]
@@ -165,6 +171,27 @@ s_getreg_b32 s2, hwreg(51)
// GFX10: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x02,0xb9]
// GFX11: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x82,0xb8]
+// Structured form using default values.
+s_getreg_b32 s2, {id: 51}
+// SICI: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x02,0xb9]
+// VI9: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x82,0xb8]
+// GFX10: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x02,0xb9]
+// GFX11: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x82,0xb8]
+
+// Fields may come in any order.
+s_getreg_b32 s2, {size: 32, id: 51}
+// SICI: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x02,0xb9]
+// VI9: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x82,0xb8]
+// GFX10: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x02,0xb9]
+// GFX11: s_getreg_b32 s2, hwreg(51) ; encoding: [0x33,0xf8,0x82,0xb8]
+
+// Empty field lists are allowed.
+s_getreg_b32 s2, {}
+// SICI: s_getreg_b32 s2, hwreg(0) ; encoding: [0x00,0xf8,0x02,0xb9]
+// VI9: s_getreg_b32 s2, hwreg(0) ; encoding: [0x00,0xf8,0x82,0xb8]
+// GFX10: s_getreg_b32 s2, hwreg(0) ; encoding: [0x00,0xf8,0x02,0xb9]
+// GFX11: s_getreg_b32 s2, hwreg(0) ; encoding: [0x00,0xf8,0x82,0xb8]
+
// HW register code of unknown HW register, valid symbolic name range but no name available
s_getreg_b32 s2, hwreg(10)
// SICI: s_getreg_b32 s2, hwreg(10) ; encoding: [0x0a,0xf8,0x02,0xb9]
@@ -271,17 +298,17 @@ s_setreg_b32 hwreg(HW_REG_HW_ID), s2
// SICI: s_setreg_b32 hwreg(HW_REG_HW_ID), s2 ; encoding: [0x04,0xf8,0x82,0xb9]
// VI9: s_setreg_b32 hwreg(HW_REG_HW_ID), s2 ; encoding: [0x04,0xf8,0x02,0xb9]
// GFX10: s_setreg_b32 hwreg(HW_REG_HW_ID1), s2 ; encoding: [0x17,0xf8,0x82,0xb9]
-// NOGFX11: :[[@LINE-4]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// NOGFX11: :[[@LINE-4]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
s_setreg_b32 hwreg(HW_REG_HW_ID1), s2
-// NOSICIVI: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// NOGFX9: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// NOSICIVI: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// NOGFX9: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
// GFX10: s_setreg_b32 hwreg(HW_REG_HW_ID1), s2 ; encoding: [0x17,0xf8,0x82,0xb9]
// GFX11: s_setreg_b32 hwreg(HW_REG_HW_ID1), s2 ; encoding: [0x17,0xf8,0x02,0xb9]
s_setreg_b32 hwreg(HW_REG_HW_ID2), s2
-// NOSICIVI: :[[@LINE-1]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
-// NOGFX9: :[[@LINE-2]]:{{[0-9]+}}: error: specified hardware register is not supported on this GPU
+// NOSICIVI: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// NOGFX9: :[[@LINE-2]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
// GFX10: s_setreg_b32 hwreg(HW_REG_HW_ID2), s2 ; encoding: [0x18,0xf8,0x82,0xb9]
// GFX11: s_setreg_b32 hwreg(HW_REG_HW_ID2), s2 ; encoding: [0x18,0xf8,0x02,0xb9]
@@ -427,12 +454,24 @@ s_getreg_b32 s2, hwreg(reg + 1, offset - 1, width + 1)
// GFX10: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x02,0xb9]
// GFX11: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x82,0xb8]
+s_getreg_b32 s2, {id: reg + 1, offset: offset - 1, size: width + 1}
+// SICI: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x02,0xb9]
+// VI9: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x82,0xb8]
+// GFX10: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x02,0xb9]
+// GFX11: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x82,0xb8]
+
s_getreg_b32 s2, hwreg(1 + reg, -1 + offset, 1 + width)
// SICI: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x02,0xb9]
// VI9: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x82,0xb8]
// GFX10: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x02,0xb9]
// GFX11: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x82,0xb8]
+s_getreg_b32 s2, {id: 1 + reg, offset: -1 + offset, size: 1 + width}
+// SICI: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x02,0xb9]
+// VI9: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x82,0xb8]
+// GFX10: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x02,0xb9]
+// GFX11: s_getreg_b32 s2, hwreg(51, 1, 31) ; encoding: [0x73,0xf0,0x82,0xb8]
+
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/MC/Disassembler/X86/apx/IgnoreW.txt b/llvm/test/MC/Disassembler/X86/apx/IgnoreW.txt
new file mode 100644
index 000000000000..df41bdf39fd6
--- /dev/null
+++ b/llvm/test/MC/Disassembler/X86/apx/IgnoreW.txt
@@ -0,0 +1,118 @@
+# RUN: llvm-mc --disassemble %s -triple=x86_64 | FileCheck %s --check-prefixes=ATT
+# RUN: llvm-mc --disassemble %s -triple=x86_64 -x86-asm-syntax=intel --output-asm-variant=1 | FileCheck %s --check-prefixes=INTEL
+
+## invpcid
+
+# ATT: invpcid 123(%rax,%rbx,4), %r9
+# INTEL: invpcid r9, xmmword ptr [rax + 4*rbx + 123]
+0x62,0x74,0xfe,0x08,0xf2,0x4c,0x98,0x7b
+
+# ATT: invpcid 291(%r28,%r29,4), %r19
+# INTEL: invpcid r19, xmmword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xfa,0x08,0xf2,0x9c,0xac,0x23,0x01,0x00,0x00
+
+## invept
+
+# ATT: invept 291(%r28,%r29,4), %r19
+# INTEL: invept r19, xmmword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xfa,0x08,0xf0,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: invept 123(%rax,%rbx,4), %r9
+# INTEL: invept r9, xmmword ptr [rax + 4*rbx + 123]
+0x62,0x74,0xfe,0x08,0xf0,0x4c,0x98,0x7b
+
+## invvpid
+
+# ATT: invvpid 291(%r28,%r29,4), %r19
+# INTEL: invvpid r19, xmmword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xfa,0x08,0xf1,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: invvpid 123(%rax,%rbx,4), %r9
+# INTEL: invvpid r9, xmmword ptr [rax + 4*rbx + 123]
+0x62,0x74,0xfe,0x08,0xf1,0x4c,0x98,0x7b
+
+## adc
+
+# ATT: {evex} adcb $123, %bl
+# INTEL: {evex} adc bl, 123
+0x62,0xf4,0xfc,0x08,0x80,0xd3,0x7b
+
+# ATT: adcb $123, %bl, %cl
+# INTEL: adc cl, bl, 123
+0x62,0xf4,0xf4,0x18,0x80,0xd3,0x7b
+
+# ATT: adcb $123, %r16b
+# INTEL: adc r16b, 123
+0xd5,0x18,0x80,0xd0,0x7b
+
+## add
+
+# ATT: {evex} addb $123, %bl
+# INTEL: {evex} add bl, 123
+0x62,0xf4,0xfc,0x08,0x80,0xc3,0x7b
+
+# ATT: {nf} addb $123, %bl
+# INTEL: {nf} add bl, 123
+0x62,0xf4,0xfc,0x0c,0x80,0xc3,0x7b
+
+# ATT: addb $123, %bl, %cl
+# INTEL: add cl, bl, 123
+0x62,0xf4,0xf4,0x18,0x80,0xc3,0x7b
+
+# ATT: {nf} addb $123, %bl, %cl
+# INTEL: {nf} add cl, bl, 123
+0x62,0xf4,0xf4,0x1c,0x80,0xc3,0x7b
+
+# ATT: addb $123, %r16b
+# INTEL: add r16b, 123
+0xd5,0x18,0x80,0xc0,0x7b
+
+## inc
+
+# ATT: {evex} incb %bl
+# INTEL: {evex} inc bl
+0x62,0xf4,0xfc,0x08,0xfe,0xc3
+
+# ATT: {nf} incb %bl
+# INTEL: {nf} inc bl
+0x62,0xf4,0xfc,0x0c,0xfe,0xc3
+
+# ATT: incb %bl, %bl
+# INTEL: inc bl, bl
+0x62,0xf4,0xe4,0x18,0xfe,0xc3
+
+# ATT: {nf} incb %bl, %bl
+# INTEL: {nf} inc bl, bl
+0x62,0xf4,0xe4,0x1c,0xfe,0xc3
+
+# ATT: incb %r16b
+# INTEL: inc r16b
+0xd5,0x18,0xfe,0xc0
+
+## mul
+
+# ATT: {evex} mulb %bl
+# INTEL: {evex} mul bl
+0x62,0xf4,0xfc,0x08,0xf6,0xe3
+
+# ATT: {nf} mulb %bl
+# INTEL: {nf} mul bl
+0x62,0xf4,0xfc,0x0c,0xf6,0xe3
+
+# ATT: mulb %r16b
+# INTEL: mul r16b
+0xd5,0x18,0xf6,0xe0
+
+## imul
+
+# ATT: {evex} imulb %bl
+# INTEL: {evex} imul bl
+0x62,0xf4,0xfc,0x08,0xf6,0xeb
+
+# ATT: {nf} imulb %bl
+# INTEL: {nf} imul bl
+0x62,0xf4,0xfc,0x0c,0xf6,0xeb
+
+# ATT: imulb %r16b
+# INTEL: imul r16b
+0xd5,0x18,0xf6,0xe8
diff --git a/llvm/test/MC/RISCV/rv32zacas-invalid.s b/llvm/test/MC/RISCV/rv32zacas-invalid.s
index b86246ca2ed1..11d20dacd8a7 100644
--- a/llvm/test/MC/RISCV/rv32zacas-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zacas-invalid.s
@@ -1,4 +1,4 @@
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zacas < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv32 -mattr=+zacas < %s 2>&1 | FileCheck %s
# Non-zero offsets not supported for the third operand (rs1).
amocas.w a1, a3, 1(a5) # CHECK: :[[@LINE]]:18: error: optional integer offset must be 0
diff --git a/llvm/test/MC/RISCV/rv32zacas-valid.s b/llvm/test/MC/RISCV/rv32zacas-valid.s
index d80b963f0a04..05a9cdd5cc21 100644
--- a/llvm/test/MC/RISCV/rv32zacas-valid.s
+++ b/llvm/test/MC/RISCV/rv32zacas-valid.s
@@ -1,12 +1,12 @@
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zacas -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+zacas -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zacas -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zacas -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zacas < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zacas -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+zacas < %s \
+# RUN: | llvm-objdump --mattr=+zacas -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zacas < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zacas -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zacas < %s \
+# RUN: | llvm-objdump --mattr=+zacas -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# RUN: not llvm-mc -triple=riscv32 -mattr=+a -show-encoding %s 2>&1 \
# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
diff --git a/llvm/test/MC/RISCV/rv64zacas-valid.s b/llvm/test/MC/RISCV/rv64zacas-valid.s
index 843401b50871..694f43b9b440 100644
--- a/llvm/test/MC/RISCV/rv64zacas-valid.s
+++ b/llvm/test/MC/RISCV/rv64zacas-valid.s
@@ -1,7 +1,7 @@
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zacas -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zacas -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zacas < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zacas -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zacas < %s \
+# RUN: | llvm-objdump --mattr=+zacas -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# RUN: not llvm-mc -triple=riscv64 -mattr=+a -show-encoding %s 2>&1 \
# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
diff --git a/llvm/test/MC/RISCV/rvzabha-zacas-valid.s b/llvm/test/MC/RISCV/rvzabha-zacas-valid.s
index 8ad2f99d3feb..f1f705e625b8 100644
--- a/llvm/test/MC/RISCV/rvzabha-zacas-valid.s
+++ b/llvm/test/MC/RISCV/rvzabha-zacas-valid.s
@@ -1,12 +1,12 @@
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zabha,+experimental-zacas -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zabha,+zacas -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zabha,+experimental-zacas -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zabha,+zacas -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zabha,+experimental-zacas < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zabha,+experimental-zacas -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zabha,+zacas < %s \
+# RUN: | llvm-objdump --mattr=+experimental-zabha,+zacas -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zabha,+experimental-zacas < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zabha,+experimental-zacas -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zabha,+zacas < %s \
+# RUN: | llvm-objdump --mattr=+experimental-zabha,+zacas -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# RUN: not llvm-mc -triple=riscv32 -mattr=+experimental-zabha -show-encoding %s 2>&1 \
# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
diff --git a/llvm/test/TableGen/HwModeEncodeDecode3.td b/llvm/test/TableGen/HwModeEncodeDecode3.td
new file mode 100644
index 000000000000..406e52d25be7
--- /dev/null
+++ b/llvm/test/TableGen/HwModeEncodeDecode3.td
@@ -0,0 +1,168 @@
+// RUN: llvm-tblgen -gen-emitter -I %p/../../include %s | \
+// RUN: FileCheck %s --check-prefix=ENCODER
+// RUN: llvm-tblgen -gen-disassembler -I %p/../../include %s | \
+// RUN: FileCheck %s --check-prefix=DECODER
+// RUN: llvm-tblgen -gen-disassembler --suppress-per-hwmode-duplicates -I \
+// RUN: %p/../../include %s | FileCheck %s --check-prefix=DECODER-SUPPRESS
+
+include "llvm/Target/Target.td"
+
+def archInstrInfo : InstrInfo { }
+
+def arch : Target {
+ let InstructionSet = archInstrInfo;
+}
+
+def Myi32 : Operand<i32> {
+ let DecoderMethod = "DecodeMyi32";
+}
+
+def HasA : Predicate<"Subtarget->hasA()">;
+def HasB : Predicate<"Subtarget->hasB()">;
+
+def ModeA : HwMode<"+a", [HasA]>;
+def ModeB : HwMode<"+b", [HasB]>;
+
+
+def fooTypeEncDefault : InstructionEncoding {
+ let Size = 8;
+ field bits<64> SoftFail = 0;
+ bits<64> Inst;
+ bits<8> factor;
+ let Inst{7...0} = factor;
+ let Inst{3...2} = 0b10;
+ let Inst{1...0} = 0b00;
+}
+
+def fooTypeEncA : InstructionEncoding {
+ let Size = 4;
+ field bits<32> SoftFail = 0;
+ bits<32> Inst;
+ bits<8> factor;
+ let Inst{7...0} = factor;
+ let Inst{3...2} = 0b11;
+ let Inst{1...0} = 0b00;
+}
+
+def fooTypeEncB : InstructionEncoding {
+ let Size = 4;
+ field bits<32> SoftFail = 0;
+ bits<32> Inst;
+ bits<8> factor;
+ let Inst{15...8} = factor;
+ let Inst{1...0} = 0b11;
+}
+
+// Test for DefaultMode as a selector.
+def foo : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$factor);
+ let EncodingInfos = EncodingByHwMode<
+ [ModeA, ModeB, DefaultMode], [fooTypeEncA, fooTypeEncB, fooTypeEncDefault]
+ >;
+ let AsmString = "foo $factor";
+}
+
+def bar: Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$factor);
+ let Size = 4;
+ bits<32> Inst;
+ bits<32> SoftFail;
+ bits<8> factor;
+ let Inst{31...24} = factor;
+ let Inst{1...0} = 0b10;
+ let AsmString = "bar $factor";
+}
+
+def baz : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$factor);
+ bits<32> Inst;
+ let EncodingInfos = EncodingByHwMode<
+ [ModeB], [fooTypeEncA]
+ >;
+ let AsmString = "foo $factor";
+}
+
+def unrelated: Instruction {
+ let OutOperandList = (outs);
+ let DecoderNamespace = "Alt";
+ let InOperandList = (ins i32imm:$factor);
+ let Size = 4;
+ bits<32> Inst;
+ bits<32> SoftFail;
+ bits<8> factor;
+ let Inst{31...24} = factor;
+ let Inst{1...0} = 0b10;
+ let AsmString = "unrelated $factor";
+}
+
+
+// DECODER-LABEL: DecoderTableAlt_DefaultMode32[] =
+// DECODER-DAG: Opcode: unrelated
+// DECODER-LABEL: DecoderTableAlt_ModeA32[] =
+// DECODER-DAG: Opcode: unrelated
+// DECODER-LABEL: DecoderTableAlt_ModeB32[] =
+// DECODER-DAG: Opcode: unrelated
+// DECODER-LABEL: DecoderTable_DefaultMode32[] =
+// DECODER-DAG: Opcode: bar
+// DECODER-LABEL: DecoderTable_DefaultMode64[] =
+// DECODER-DAG: Opcode: fooTypeEncDefault:foo
+// DECODER-LABEL: DecoderTable_ModeA32[] =
+// DECODER-DAG: Opcode: fooTypeEncA:foo
+// DECODER-DAG: Opcode: bar
+// DECODER-LABEL: DecoderTable_ModeB32[] =
+// DECODER-DAG: Opcode: fooTypeEncB:foo
+// DECODER-DAG: Opcode: fooTypeEncA:baz
+// DECODER-DAG: Opcode: bar
+
+
+// DECODER-SUPPRESS-LABEL: DecoderTableAlt_AllModes32[] =
+// DECODER-SUPPRESS-DAG: Opcode: unrelated
+// DECODER-SUPPRESS-LABEL: DecoderTable_AllModes32[] =
+// DECODER-SUPPRESS-DAG: Opcode: bar
+// DECODER-SUPPRESS-LABEL: DecoderTable_DefaultMode64[] =
+// DECODER-SUPPRESS-NOT: Opcode: bar
+// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncDefault:foo
+// DECODER-SUPPRESS-LABEL: DecoderTable_ModeA32[] =
+// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncA:foo
+// DECODER-SUPPRESS-NOT: Opcode: bar
+// DECODER-SUPPRESS-LABEL: DecoderTable_ModeB32[] =
+// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncB:foo
+// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncA:baz
+// DECODER-SUPPRESS-NOT: Opcode: bar
+
+// ENCODER-LABEL: static const uint64_t InstBits_DefaultMode[] = {
+// ENCODER: UINT64_C(2), // bar
+// ENCODER: UINT64_C(0), // baz
+// ENCODER: UINT64_C(8), // foo
+// ENCODER: UINT64_C(2), // unrelated
+
+// ENCODER-LABEL: static const uint64_t InstBits_ModeA[] = {
+// ENCODER: UINT64_C(2), // bar
+// ENCODER: UINT64_C(0), // baz
+// ENCODER: UINT64_C(12), // foo
+// ENCODER: UINT64_C(2), // unrelated
+
+// ENCODER-LABEL: static const uint64_t InstBits_ModeB[] = {
+// ENCODER: UINT64_C(2), // bar
+// ENCODER: UINT64_C(12), // baz
+// ENCODER: UINT64_C(3), // foo
+// ENCODER: UINT64_C(2), // unrelated
+
+// ENCODER: unsigned HwMode = STI.getHwMode();
+// ENCODER: switch (HwMode) {
+// ENCODER: default: llvm_unreachable("Unknown hardware mode!"); break;
+// ENCODER: case 0: InstBits = InstBits_DefaultMode; break;
+// ENCODER: case 1: InstBits = InstBits_ModeA; break;
+// ENCODER: case 2: InstBits = InstBits_ModeB; break;
+// ENCODER: };
+
+// ENCODER: case ::foo: {
+// ENCODER: switch (HwMode) {
+// ENCODER: default: llvm_unreachable("Unhandled HwMode");
+// ENCODER: case 0: {
+// ENCODER: case 1: {
+// ENCODER: case 2: {
+
diff --git a/llvm/test/ThinLTO/X86/visibility-elf.ll b/llvm/test/ThinLTO/X86/visibility-elf.ll
index aa11c3e06ff3..fc7439bf001b 100644
--- a/llvm/test/ThinLTO/X86/visibility-elf.ll
+++ b/llvm/test/ThinLTO/X86/visibility-elf.ll
@@ -36,12 +36,12 @@ declare void @ext(ptr)
;; Currently the visibility is not propagated onto an unimported function,
;; because we don't have summaries for declarations.
; CHECK: declare extern_weak void @not_imported()
-; CHECK: define available_externally hidden void @hidden_def_ref() !thinlto_src_module !0
-; CHECK: define available_externally hidden void @hidden_def_weak_ref() !thinlto_src_module !0
+; CHECK: define available_externally hidden void @hidden_def_ref() !thinlto_src_module !0 !thinlto_src_file !1
+; CHECK: define available_externally hidden void @hidden_def_weak_ref() !thinlto_src_module !0 !thinlto_src_file !1
;; This can be hidden, but we cannot communicate the declaration's visibility
;; to other modules because declarations don't have summaries, and the IRLinker
;; overrides it when importing the protected def.
-; CHECK: define available_externally protected void @protected_def_hidden_ref() !thinlto_src_module !0
+; CHECK: define available_externally protected void @protected_def_hidden_ref() !thinlto_src_module !0 !thinlto_src_file !1
; CHECK2: define hidden i32 @hidden_def_weak_def()
; CHECK2: define protected void @protected_def_weak_def()
diff --git a/llvm/test/ThinLTO/X86/visibility-macho.ll b/llvm/test/ThinLTO/X86/visibility-macho.ll
index d41ab4f1ef39..1a48b477c96d 100644
--- a/llvm/test/ThinLTO/X86/visibility-macho.ll
+++ b/llvm/test/ThinLTO/X86/visibility-macho.ll
@@ -30,8 +30,8 @@ declare void @ext(ptr)
;; Currently the visibility is not propagated onto an unimported function,
;; because we don't have summaries for declarations.
; CHECK: declare extern_weak dso_local void @not_imported()
-; CHECK: define available_externally hidden void @hidden_def_ref() !thinlto_src_module !0
-; CHECK: define available_externally hidden void @hidden_def_weak_ref() !thinlto_src_module !0
+; CHECK: define available_externally hidden void @hidden_def_ref() !thinlto_src_module !0 !thinlto_src_file !1
+; CHECK: define available_externally hidden void @hidden_def_weak_ref() !thinlto_src_module !0 !thinlto_src_file !1
; CHECK2: define hidden i32 @hidden_def_weak_def()
; CHECK2: define hidden void @hidden_def_ref()
diff --git a/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-pointer-cmps.ll b/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-pointer-cmps.ll
index e3f2a54f321e..279238bea184 100644
--- a/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-pointer-cmps.ll
+++ b/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-pointer-cmps.ll
@@ -93,9 +93,8 @@ define void @some_checks_in_loops_removable(ptr %ptr, ptr %lower, ptr %upper, i8
; CHECK: loop.body:
; CHECK-NEXT: [[IV_1:%.*]] = add nuw nsw i16 [[IV]], 1
; CHECK-NEXT: [[PTR_IV_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i16 [[IV_1]]
-; CHECK-NEXT: [[CMP_PTR_IV_1_LOWER:%.*]] = icmp ugt ptr [[LOWER]], [[PTR_IV_1]]
; CHECK-NEXT: [[CMP_PTR_IV_1_UPPER:%.*]] = icmp ule ptr [[UPPER]], [[PTR_IV_1]]
-; CHECK-NEXT: [[OR_1:%.*]] = or i1 [[CMP_PTR_IV_1_LOWER]], [[CMP_PTR_IV_1_UPPER]]
+; CHECK-NEXT: [[OR_1:%.*]] = or i1 false, [[CMP_PTR_IV_1_UPPER]]
; CHECK-NEXT: br i1 [[OR]], label [[TRAP]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: store i8 0, ptr [[PTR_IV]], align 4
@@ -171,9 +170,8 @@ define void @no_checks_in_loops_removable(ptr %ptr, ptr %lower, ptr %upper, i8 %
; CHECK: loop.body:
; CHECK-NEXT: [[IV_1:%.*]] = add nuw nsw i16 [[IV]], 1
; CHECK-NEXT: [[PTR_IV_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i16 [[IV_1]]
-; CHECK-NEXT: [[CMP_PTR_IV_1_LOWER:%.*]] = icmp ugt ptr [[LOWER]], [[PTR_IV_1]]
; CHECK-NEXT: [[CMP_PTR_IV_1_UPPER:%.*]] = icmp ule ptr [[UPPER]], [[PTR_IV_1]]
-; CHECK-NEXT: [[OR_1:%.*]] = or i1 [[CMP_PTR_IV_1_LOWER]], [[CMP_PTR_IV_1_UPPER]]
+; CHECK-NEXT: [[OR_1:%.*]] = or i1 false, [[CMP_PTR_IV_1_UPPER]]
; CHECK-NEXT: br i1 [[OR]], label [[TRAP]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: store i8 0, ptr [[PTR_IV]], align 4
diff --git a/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-cmps.ll b/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-cmps.ll
index 66ce1ffc6ebc..1842ca2d8254 100644
--- a/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-cmps.ll
+++ b/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-cmps.ll
@@ -27,18 +27,16 @@ define void @test1(ptr %src, ptr noundef %lower, ptr noundef %upper, i8 %N) {
; CHECK-NEXT: store i32 0, ptr [[PTR_SRC_IV]], align 4
; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i8 [[IV]], 1
; CHECK-NEXT: [[SRC_IV_1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 [[ADD_1]]
-; CHECK-NEXT: [[CMP_IV_1_START:%.*]] = icmp ult ptr [[SRC_IV_1]], [[LOWER]]
; CHECK-NEXT: [[CMP_IV_1_END:%.*]] = icmp uge ptr [[SRC_IV_1]], [[UPPER]]
-; CHECK-NEXT: [[OR_2:%.*]] = or i1 [[CMP_IV_1_START]], [[CMP_IV_1_END]]
+; CHECK-NEXT: [[OR_2:%.*]] = or i1 false, [[CMP_IV_1_END]]
; CHECK-NEXT: br i1 [[OR_2]], label [[TRAP_BB]], label [[LOOP_BODY_2:%.*]]
; CHECK: loop.body.2:
; CHECK-NEXT: [[PTR_SRC_IV_1:%.*]] = bitcast ptr [[SRC_IV_1]] to ptr
; CHECK-NEXT: store i32 0, ptr [[PTR_SRC_IV_1]], align 4
; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i8 [[IV]], 2
; CHECK-NEXT: [[SRC_IV_2:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 [[ADD_2]]
-; CHECK-NEXT: [[CMP_IV_2_START:%.*]] = icmp ult ptr [[SRC_IV_2]], [[LOWER]]
; CHECK-NEXT: [[CMP_IV_2_END:%.*]] = icmp uge ptr [[SRC_IV_2]], [[UPPER]]
-; CHECK-NEXT: [[OR_3:%.*]] = or i1 [[CMP_IV_2_START]], [[CMP_IV_2_END]]
+; CHECK-NEXT: [[OR_3:%.*]] = or i1 false, [[CMP_IV_2_END]]
; CHECK-NEXT: br i1 [[OR_3]], label [[TRAP_BB]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[PTR_SRC_IV_2:%.*]] = bitcast ptr [[SRC_IV_2]] to ptr
@@ -125,16 +123,14 @@ define void @test2(ptr %src, ptr %lower, ptr %upper, i8 %N) {
; CHECK: loop.body.1:
; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i8 [[IV]], 1
; CHECK-NEXT: [[SRC_IV_1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 [[ADD_1]]
-; CHECK-NEXT: [[CMP_IV_1_START:%.*]] = icmp ult ptr [[SRC_IV_1]], [[LOWER]]
; CHECK-NEXT: [[CMP_IV_1_END:%.*]] = icmp uge ptr [[SRC_IV_1]], [[UPPER]]
-; CHECK-NEXT: [[OR_2:%.*]] = or i1 [[CMP_IV_1_START]], [[CMP_IV_1_END]]
+; CHECK-NEXT: [[OR_2:%.*]] = or i1 false, [[CMP_IV_1_END]]
; CHECK-NEXT: br i1 [[OR_2]], label [[TRAP_BB]], label [[LOOP_BODY_2:%.*]]
; CHECK: loop.body.2:
; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i8 [[IV]], 2
; CHECK-NEXT: [[SRC_IV_2:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 [[ADD_2]]
-; CHECK-NEXT: [[CMP_IV_2_START:%.*]] = icmp ult ptr [[SRC_IV_2]], [[LOWER]]
; CHECK-NEXT: [[CMP_IV_2_END:%.*]] = icmp uge ptr [[SRC_IV_2]], [[UPPER]]
-; CHECK-NEXT: [[OR_3:%.*]] = or i1 [[CMP_IV_2_START]], [[CMP_IV_2_END]]
+; CHECK-NEXT: [[OR_3:%.*]] = or i1 false, [[CMP_IV_2_END]]
; CHECK-NEXT: br i1 [[OR_3]], label [[TRAP_BB]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[PTR:%.*]] = bitcast ptr [[SRC_IV]] to ptr
@@ -221,16 +217,14 @@ define void @test2_with_ne(ptr %src, ptr %lower, ptr %upper, i8 %N) {
; CHECK: loop.body.1:
; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i8 [[IV]], 1
; CHECK-NEXT: [[SRC_IV_1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 [[ADD_1]]
-; CHECK-NEXT: [[CMP_IV_1_START:%.*]] = icmp ult ptr [[SRC_IV_1]], [[LOWER]]
; CHECK-NEXT: [[CMP_IV_1_END:%.*]] = icmp uge ptr [[SRC_IV_1]], [[UPPER]]
-; CHECK-NEXT: [[OR_2:%.*]] = or i1 [[CMP_IV_1_START]], [[CMP_IV_1_END]]
+; CHECK-NEXT: [[OR_2:%.*]] = or i1 false, [[CMP_IV_1_END]]
; CHECK-NEXT: br i1 [[OR_2]], label [[TRAP_BB]], label [[LOOP_BODY_2:%.*]]
; CHECK: loop.body.2:
; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i8 [[IV]], 2
; CHECK-NEXT: [[SRC_IV_2:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 [[ADD_2]]
-; CHECK-NEXT: [[CMP_IV_2_START:%.*]] = icmp ult ptr [[SRC_IV_2]], [[LOWER]]
; CHECK-NEXT: [[CMP_IV_2_END:%.*]] = icmp uge ptr [[SRC_IV_2]], [[UPPER]]
-; CHECK-NEXT: [[OR_3:%.*]] = or i1 [[CMP_IV_2_START]], [[CMP_IV_2_END]]
+; CHECK-NEXT: [[OR_3:%.*]] = or i1 false, [[CMP_IV_2_END]]
; CHECK-NEXT: br i1 [[OR_3]], label [[TRAP_BB]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[PTR:%.*]] = bitcast ptr [[SRC_IV]] to ptr
@@ -316,16 +310,14 @@ define void @test3(ptr %src, ptr %lower, ptr %upper, i8 %N) {
; CHECK-NEXT: br i1 [[OR_1]], label [[TRAP_BB]], label [[LOOP_BODY_1:%.*]]
; CHECK: loop.body.1:
; CHECK-NEXT: [[SRC_IV_1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 [[NEXT]]
-; CHECK-NEXT: [[CMP_IV_1_START:%.*]] = icmp ult ptr [[SRC_IV_1]], [[LOWER]]
; CHECK-NEXT: [[CMP_IV_1_END:%.*]] = icmp uge ptr [[SRC_IV_1]], [[UPPER]]
-; CHECK-NEXT: [[OR_2:%.*]] = or i1 [[CMP_IV_1_START]], [[CMP_IV_1_END]]
+; CHECK-NEXT: [[OR_2:%.*]] = or i1 false, [[CMP_IV_1_END]]
; CHECK-NEXT: br i1 [[OR_2]], label [[TRAP_BB]], label [[LOOP_BODY_2:%.*]]
; CHECK: loop.body.2:
; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i8 [[IV]], 2
; CHECK-NEXT: [[SRC_IV_2:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 [[ADD_2]]
-; CHECK-NEXT: [[CMP_IV_2_START:%.*]] = icmp ult ptr [[SRC_IV_2]], [[LOWER]]
; CHECK-NEXT: [[CMP_IV_2_END:%.*]] = icmp uge ptr [[SRC_IV_2]], [[UPPER]]
-; CHECK-NEXT: [[OR_3:%.*]] = or i1 [[CMP_IV_2_START]], [[CMP_IV_2_END]]
+; CHECK-NEXT: [[OR_3:%.*]] = or i1 false, [[CMP_IV_2_END]]
; CHECK-NEXT: br i1 [[OR_3]], label [[TRAP_BB]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[PTR:%.*]] = bitcast ptr [[SRC_IV]] to ptr
diff --git a/llvm/test/Transforms/ConstraintElimination/zext-for-per-formula-reasoning.ll b/llvm/test/Transforms/ConstraintElimination/zext-for-per-formula-reasoning.ll
index 63f5d4d4ba34..7844651a01f9 100644
--- a/llvm/test/Transforms/ConstraintElimination/zext-for-per-formula-reasoning.ll
+++ b/llvm/test/Transforms/ConstraintElimination/zext-for-per-formula-reasoning.ll
@@ -90,11 +90,9 @@ define i1 @gep_zext_idx_adds(ptr %p, i8 %cnt, i8 %off) {
; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[CNT]] to i16
; CHECK-NEXT: [[EXT_1:%.*]] = add nuw nsw i16 [[EXT]], 1
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i16 [[EXT_1]]
-; CHECK-NEXT: [[T_1:%.*]] = icmp uge ptr [[ADD_PTR]], [[P]]
-; CHECK-NEXT: [[F_1:%.*]] = icmp ult ptr [[ADD_PTR]], [[P]]
; CHECK-NEXT: [[GEP_11:%.*]] = getelementptr inbounds i32, ptr [[P]], i16 11
; CHECK-NEXT: [[C_1:%.*]] = icmp uge ptr [[ADD_PTR]], [[GEP_11]]
-; CHECK-NEXT: [[RES_1:%.*]] = xor i1 [[T_1]], [[F_1]]
+; CHECK-NEXT: [[RES_1:%.*]] = xor i1 true, false
; CHECK-NEXT: [[RES_2:%.*]] = xor i1 [[RES_1]], [[C_1]]
; CHECK-NEXT: ret i1 [[RES_2]]
;
diff --git a/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll b/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll
index 47b2ddafcfc6..dd9310fe34f3 100644
--- a/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll
+++ b/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll
@@ -9,6 +9,11 @@
; CHECK-SAME: !DIExpression(DW_OP_plus_uconst, [[OffsetX:[0-9]*]]))
; ^ No deref at the end, as this variable ("x") is an array;
; its value is its address. The entire array is in the frame.
+; CHECK: call void @llvm.dbg.assign(metadata ptr %[[frame]]
+; CHECK-SAME: !DIExpression(DW_OP_plus_uconst, [[OffsetX]])
+;; FIXME: Should we be updating the addresses on assigns here as well?
+; CHECK-SAME: , metadata ptr %[[frame]], metadata !DIExpression())
+
; CHECK: call void @llvm.dbg.value(metadata ptr %[[frame]]
; CHECK-SAME: !DIExpression(DW_OP_plus_uconst, [[OffsetSpill:[0-9]*]], DW_OP_deref))
; CHECK: call void @llvm.dbg.value(metadata ptr %[[frame]]
@@ -78,6 +83,7 @@ init.ready: ; preds = %init.suspend, %coro
%i.init.ready.inc = add nsw i32 0, 1
call void @llvm.dbg.value(metadata i32 %i.init.ready.inc, metadata !6, metadata !DIExpression()), !dbg !11
call void @llvm.dbg.value(metadata ptr %x, metadata !12, metadata !DIExpression()), !dbg !17
+ call void @llvm.dbg.assign(metadata ptr %x, metadata !12, metadata !DIExpression(), metadata !30, metadata ptr %x, metadata !DIExpression()), !dbg !17
call void @llvm.memset.p0.i64(ptr align 16 %x, i8 0, i64 40, i1 false), !dbg !17
call void @print(i32 %i.init.ready.inc)
%ready.again = call zeroext i1 @await_ready()
@@ -250,3 +256,4 @@ attributes #4 = { argmemonly nofree nosync nounwind willreturn writeonly }
!21 = !DILocation(line: 43, column: 3, scope: !7)
!22 = !DILocation(line: 43, column: 8, scope: !7)
!23 = !DILocalVariable(name: "produced", scope: !7, file: !1, line:24, type: !10)
+!30 = distinct !DIAssignID() \ No newline at end of file
diff --git a/llvm/test/Transforms/FunctionImport/funcimport.ll b/llvm/test/Transforms/FunctionImport/funcimport.ll
index 01298258c62e..a0968a67f5ce 100644
--- a/llvm/test/Transforms/FunctionImport/funcimport.ll
+++ b/llvm/test/Transforms/FunctionImport/funcimport.ll
@@ -57,7 +57,7 @@ declare void @linkoncealias(...) #1
; CHECK-DAG: define available_externally void @linkoncealias()
; INSTLIMDEF-DAG: Import referencestatics
-; INSTLIMDEF-DAG: define available_externally i32 @referencestatics(i32 %i) !thinlto_src_module !0 {
+; INSTLIMDEF-DAG: define available_externally i32 @referencestatics(i32 %i) !thinlto_src_module !0 !thinlto_src_file !1 {
; INSTLIM5-DAG: declare i32 @referencestatics(...)
declare i32 @referencestatics(...) #1
@@ -66,27 +66,27 @@ declare i32 @referencestatics(...) #1
; Ensure that the call is to the properly-renamed function.
; INSTLIMDEF-DAG: Import staticfunc
; INSTLIMDEF-DAG: %call = call i32 @staticfunc.llvm.
-; INSTLIMDEF-DAG: define available_externally hidden i32 @staticfunc.llvm.{{.*}} !thinlto_src_module !0 {
+; INSTLIMDEF-DAG: define available_externally hidden i32 @staticfunc.llvm.{{.*}} !thinlto_src_module !0 !thinlto_src_file !1 {
; INSTLIMDEF-DAG: Import referenceglobals
-; CHECK-DAG: define available_externally i32 @referenceglobals(i32 %i) !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally i32 @referenceglobals(i32 %i) !thinlto_src_module !0 !thinlto_src_file !1 {
declare i32 @referenceglobals(...) #1
; The import of referenceglobals will expose call to globalfunc1 that
; should in turn be imported.
; INSTLIMDEF-DAG: Import globalfunc1
-; CHECK-DAG: define available_externally void @globalfunc1() !thinlto_src_module !0
+; CHECK-DAG: define available_externally void @globalfunc1() !thinlto_src_module !0 !thinlto_src_file !1
; INSTLIMDEF-DAG: Import referencecommon
-; CHECK-DAG: define available_externally i32 @referencecommon(i32 %i) !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally i32 @referencecommon(i32 %i) !thinlto_src_module !0 !thinlto_src_file !1 {
declare i32 @referencecommon(...) #1
; INSTLIMDEF-DAG: Import setfuncptr
-; CHECK-DAG: define available_externally void @setfuncptr() !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally void @setfuncptr() !thinlto_src_module !0 !thinlto_src_file !1 {
declare void @setfuncptr(...) #1
; INSTLIMDEF-DAG: Import callfuncptr
-; CHECK-DAG: define available_externally void @callfuncptr() !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally void @callfuncptr() !thinlto_src_module !0 !thinlto_src_file !1 {
declare void @callfuncptr(...) #1
; Ensure that all uses of local variable @P which has used in setfuncptr
@@ -97,7 +97,7 @@ declare void @callfuncptr(...) #1
; Ensure that @referencelargelinkonce definition is pulled in, but later we
; also check that the linkonceodr function is not.
-; CHECK-DAG: define available_externally void @referencelargelinkonce() !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally void @referencelargelinkonce() !thinlto_src_module !0 !thinlto_src_file !1 {
; INSTLIM5-DAG: declare void @linkonceodr()
declare void @referencelargelinkonce(...)
@@ -110,13 +110,13 @@ declare void @weakfunc(...) #1
declare void @linkoncefunc2(...) #1
; INSTLIMDEF-DAG: Import funcwithpersonality
-; INSTLIMDEF-DAG: define available_externally hidden void @funcwithpersonality.llvm.{{.*}}() personality ptr @__gxx_personality_v0 !thinlto_src_module !0 {
+; INSTLIMDEF-DAG: define available_externally hidden void @funcwithpersonality.llvm.{{.*}}() personality ptr @__gxx_personality_v0 !thinlto_src_module !0 !thinlto_src_file !1 {
; INSTLIM5-DAG: declare hidden void @funcwithpersonality.llvm.{{.*}}()
; We can import variadic functions without a va_start, since the inliner
; can handle them.
; INSTLIMDEF-DAG: Import variadic_no_va_start
-; CHECK-DAG: define available_externally void @variadic_no_va_start(...) !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally void @variadic_no_va_start(...) !thinlto_src_module !0 !thinlto_src_file !1 {
declare void @variadic_no_va_start(...)
; We can import variadic functions with a va_start, since the inliner
@@ -128,7 +128,8 @@ declare void @variadic_va_start(...)
; INSTLIMDEF-DAG: 15 function-import - Number of functions imported
; INSTLIMDEF-DAG: 4 function-import - Number of global variables imported
-; CHECK-DAG: !0 = !{!"{{.*}}/Inputs/funcimport.ll"}
+; CHECK-DAG: !0 = !{!"{{.*}}.bc"}
+; CHECK-DAG: !1 = !{!"{{.*}}/Inputs/funcimport.ll"}
; The actual GUID values will depend on path to test.
; GUID-DAG: GUID {{.*}} is weakalias
diff --git a/llvm/test/Transforms/Inline/inline_stats.ll b/llvm/test/Transforms/Inline/inline_stats.ll
index c779054c3b1e..41c12b3015f7 100644
--- a/llvm/test/Transforms/Inline/inline_stats.ll
+++ b/llvm/test/Transforms/Inline/inline_stats.ll
@@ -44,7 +44,7 @@ define void @internal3() {
declare void @external_decl()
-define void @external1() alwaysinline !thinlto_src_module !0 {
+define void @external1() alwaysinline !thinlto_src_module !0 !thinlto_src_file !1 {
call fastcc void @internal2()
call fastcc void @external2();
call void @external_decl();
@@ -87,7 +87,7 @@ define void @external_big() noinline !thinlto_src_module !1 {
}
; It should not be imported, but it should not break anything.
-define void @external_notcalled() !thinlto_src_module !0 {
+define void @external_notcalled() !thinlto_src_module !0 !thinlto_src_file !1 {
call void @external_notcalled()
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/maxnum.ll b/llvm/test/Transforms/InstCombine/maxnum.ll
index 87288b18cbcd..e140a5b405ea 100644
--- a/llvm/test/Transforms/InstCombine/maxnum.ll
+++ b/llvm/test/Transforms/InstCombine/maxnum.ll
@@ -66,7 +66,7 @@ define float @constant_fold_maxnum_f32_p0_n0() {
define float @constant_fold_maxnum_f32_n0_p0() {
; CHECK-LABEL: @constant_fold_maxnum_f32_n0_p0(
-; CHECK-NEXT: ret float -0.000000e+00
+; CHECK-NEXT: ret float 0.000000e+00
;
%x = call float @llvm.maxnum.f32(float -0.0, float 0.0)
ret float %x
diff --git a/llvm/test/Transforms/InstCombine/minnum.ll b/llvm/test/Transforms/InstCombine/minnum.ll
index 8050f0755952..cc6171b9d8e6 100644
--- a/llvm/test/Transforms/InstCombine/minnum.ll
+++ b/llvm/test/Transforms/InstCombine/minnum.ll
@@ -60,7 +60,7 @@ define float @constant_fold_minnum_f32_p0_p0() {
define float @constant_fold_minnum_f32_p0_n0() {
; CHECK-LABEL: @constant_fold_minnum_f32_p0_n0(
-; CHECK-NEXT: ret float 0.000000e+00
+; CHECK-NEXT: ret float -0.000000e+00
;
%x = call float @llvm.minnum.f32(float 0.0, float -0.0)
ret float %x
@@ -199,7 +199,7 @@ define float @minnum_f32_1_minnum_p0_val_fmf3(float %x) {
define float @minnum_f32_p0_minnum_val_n0(float %x) {
; CHECK-LABEL: @minnum_f32_p0_minnum_val_n0(
-; CHECK-NEXT: [[Z:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float 0.000000e+00)
+; CHECK-NEXT: [[Z:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float -0.000000e+00)
; CHECK-NEXT: ret float [[Z]]
;
%y = call float @llvm.minnum.f32(float %x, float -0.0)
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/min-max.ll b/llvm/test/Transforms/InstSimplify/ConstProp/min-max.ll
index a5f5d4e12ed8..9120649eb5c4 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/min-max.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/min-max.ll
@@ -49,6 +49,38 @@ define float @minnum_float() {
ret float %1
}
+define float @minnum_float_p0_n0() {
+; CHECK-LABEL: @minnum_float_p0_n0(
+; CHECK-NEXT: ret float -0.000000e+00
+;
+ %min = call float @llvm.minnum.f32(float 0.0, float -0.0)
+ ret float %min
+}
+
+define float @minnum_float_n0_p0() {
+; CHECK-LABEL: @minnum_float_n0_p0(
+; CHECK-NEXT: ret float -0.000000e+00
+;
+ %min = call float @llvm.minnum.f32(float -0.0, float 0.0)
+ ret float %min
+}
+
+define float @minnum_float_p0_qnan() {
+; CHECK-LABEL: @minnum_float_p0_qnan(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %min = call float @llvm.minnum.f32(float 0.0, float 0x7FF8000000000000)
+ ret float %min
+}
+
+define float @minnum_float_qnan_p0() {
+; CHECK-LABEL: @minnum_float_qnan_p0(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %min = call float @llvm.minnum.f32(float 0x7FF8000000000000, float 0.0)
+ ret float %min
+}
+
define bfloat @minnum_bfloat() {
; CHECK-LABEL: @minnum_bfloat(
; CHECK-NEXT: ret bfloat 0xR40A0
@@ -95,7 +127,7 @@ define <4 x half> @minnum_half_vec() {
define <4 x float> @minnum_float_zeros_vec() {
; CHECK-LABEL: @minnum_float_zeros_vec(
-; CHECK-NEXT: ret <4 x float> <float 0.000000e+00, float -0.000000e+00, float 0.000000e+00, float -0.000000e+00>
+; CHECK-NEXT: ret <4 x float> <float 0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>
;
%1 = call <4 x float> @llvm.minnum.v4f32(<4 x float> <float 0.0, float -0.0, float 0.0, float -0.0>, <4 x float> <float 0.0, float 0.0, float -0.0, float -0.0>)
ret <4 x float> %1
@@ -109,6 +141,38 @@ define float @maxnum_float() {
ret float %1
}
+define float @maxnum_float_p0_n0() {
+; CHECK-LABEL: @maxnum_float_p0_n0(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %max = call float @llvm.maxnum.f32(float 0.0, float -0.0)
+ ret float %max
+}
+
+define float @maxnum_float_n0_p0() {
+; CHECK-LABEL: @maxnum_float_n0_p0(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %max = call float @llvm.maxnum.f32(float -0.0, float 0.0)
+ ret float %max
+}
+
+define float @maxnum_float_p0_qnan() {
+; CHECK-LABEL: @maxnum_float_p0_qnan(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %max = call float @llvm.maxnum.f32(float 0.0, float 0x7FF8000000000000)
+ ret float %max
+}
+
+define float @maxnum_float_qnan_p0() {
+; CHECK-LABEL: @maxnum_float_qnan_p0(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %max = call float @llvm.maxnum.f32(float 0x7FF8000000000000, float 0.0)
+ ret float %max
+}
+
define bfloat @maxnum_bfloat() {
; CHECK-LABEL: @maxnum_bfloat(
; CHECK-NEXT: ret bfloat 0xR4228
@@ -155,7 +219,7 @@ define <4 x half> @maxnum_half_vec() {
define <4 x float> @maxnum_float_zeros_vec() {
; CHECK-LABEL: @maxnum_float_zeros_vec(
-; CHECK-NEXT: ret <4 x float> <float 0.000000e+00, float -0.000000e+00, float 0.000000e+00, float -0.000000e+00>
+; CHECK-NEXT: ret <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float -0.000000e+00>
;
%1 = call <4 x float> @llvm.maxnum.v4f32(<4 x float> <float 0.0, float -0.0, float 0.0, float -0.0>, <4 x float> <float 0.0, float 0.0, float -0.0, float -0.0>)
ret <4 x float> %1
diff --git a/llvm/test/Transforms/LoopRotate/oz-disable.ll b/llvm/test/Transforms/LoopRotate/oz-disable.ll
index 6a7847ac0ff2..c45603878ee6 100644
--- a/llvm/test/Transforms/LoopRotate/oz-disable.ll
+++ b/llvm/test/Transforms/LoopRotate/oz-disable.ll
@@ -4,6 +4,9 @@
; RUN: opt < %s -S -passes='default<Os>' -debug -debug-only=loop-rotate 2>&1 | FileCheck %s -check-prefix=OS
; RUN: opt < %s -S -passes='default<Oz>' -debug -debug-only=loop-rotate 2>&1 | FileCheck %s -check-prefix=OZ
+;; Make sure -allow-loop-header-duplication overrides the default behavior at Oz
+; RUN: opt < %s -S -passes='default<Oz>' -enable-loop-header-duplication -debug -debug-only=loop-rotate 2>&1 | FileCheck %s -check-prefix=OS
+
; Loop should be rotated for -Os but not for -Oz.
; OS: rotating Loop at depth 1
; OZ-NOT: rotating Loop at depth 1
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll
index 7121c85dd59b..c12b3b122ba7 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -passes=loop-vectorize -S -mcpu=pwr9 -interleave-small-loop-scalar-reduction=true 2>&1 | FileCheck %s
-; RUN: opt < %s -passes='loop-vectorize' -S -mcpu=pwr9 -interleave-small-loop-scalar-reduction=true 2>&1 | FileCheck %s
+; RUN: opt < %s -passes=loop-vectorize -S -mcpu=pwr9 2>&1 | FileCheck %s
+; RUN: opt < %s -passes='loop-vectorize' -S -mcpu=pwr9 2>&1 | FileCheck %s
; CHECK-LABEL: vector.body
; CHECK: load double, ptr
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
index 1bcd7a2e009e..72d9691b2bb8 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
@@ -36,10 +36,10 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4
; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %add9 = add i32 %1, 1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
@@ -86,10 +86,10 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4
; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %add9 = add i32 %1, 1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
@@ -112,7 +112,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 1 registers
; CHECK-NEXT: LV: The target has 31 registers of RISCV::GPRRC register class
; CHECK-NEXT: LV: The target has 32 registers of RISCV::VRRC register class
-; CHECK-NEXT: LV: Loop cost is 28
+; CHECK-NEXT: LV: Loop cost is 32
; CHECK-NEXT: LV: IC is 1
; CHECK-NEXT: LV: VF is vscale x 4
; CHECK-NEXT: LV: Not Interleaving.
@@ -120,8 +120,9 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: Found a vectorizable loop (vscale x 4) in <stdin>
; CHECK-NEXT: LEV: Epilogue vectorization is not profitable for this loop
; CHECK-NEXT: Executing best plan with VF=vscale x 4, UF=1
-; CHECK-NEXT: LV: Interleaving disabled by the pass manager
+; CHECK: LV: Interleaving disabled by the pass manager
; CHECK-NEXT: LV: Vectorizing: innermost loop.
+; CHECK-EMPTY:
;
entry:
%cmp7 = icmp sgt i32 %n, 0
@@ -176,10 +177,10 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4
; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %conv1 = fadd float %1, 1.000000e+00
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
@@ -226,10 +227,10 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4
; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %conv1 = fadd float %1, 1.000000e+00
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
@@ -252,7 +253,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 1 registers
; CHECK-NEXT: LV: The target has 31 registers of RISCV::GPRRC register class
; CHECK-NEXT: LV: The target has 32 registers of RISCV::VRRC register class
-; CHECK-NEXT: LV: Loop cost is 28
+; CHECK-NEXT: LV: Loop cost is 32
; CHECK-NEXT: LV: IC is 1
; CHECK-NEXT: LV: VF is vscale x 4
; CHECK-NEXT: LV: Not Interleaving.
@@ -260,7 +261,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: Found a vectorizable loop (vscale x 4) in <stdin>
; CHECK-NEXT: LEV: Epilogue vectorization is not profitable for this loop
; CHECK-NEXT: Executing best plan with VF=vscale x 4, UF=1
-; CHECK-NEXT: LV: Interleaving disabled by the pass manager
+; CHECK: LV: Interleaving disabled by the pass manager
; CHECK-NEXT: LV: Vectorizing: innermost loop.
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/no-fold-tail-by-masking-iv-external-uses.ll b/llvm/test/Transforms/LoopVectorize/no-fold-tail-by-masking-iv-external-uses.ll
new file mode 100644
index 000000000000..9968c933494a
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/no-fold-tail-by-masking-iv-external-uses.ll
@@ -0,0 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s
+
+; FIXME: The vectorizer should refuse to fold the tail by masking because
+; %conv is used outside of the loop. Test for this by checking that
+; %n.vec, the vector trip count, is rounded down to the next multiple of
+; 4. If folding the tail, it would have been rounded up instead.
+; Test case for #76069(https://github.com/llvm/llvm-project/issues/76069).
+define i32 @test(ptr %arr, i64 %n) {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ptr [[ARR:%.*]], i64 [[N:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i64 [[N]], 1
+; CHECK-NEXT: br i1 [[CMP1]], label [[PREHEADER:%.*]], label [[DONE:%.*]]
+; CHECK: preheader:
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; CHECK: vector.scevcheck:
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[N]], -2
+; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i8
+; CHECK-NEXT: [[TMP3:%.*]] = add i8 1, [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ult i8 [[TMP3]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[TMP1]], 255
+; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP4]], [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP1]] to i8
+; CHECK-NEXT: [[TMP8:%.*]] = add i8 2, [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp ult i8 [[TMP8]], 2
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt i64 [[TMP1]], 255
+; CHECK-NEXT: [[TMP11:%.*]] = or i1 [[TMP9]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = or i1 [[TMP6]], [[TMP11]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 3
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[IND_END:%.*]] = add i64 1, [[N_VEC]]
+; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i8
+; CHECK-NEXT: [[IND_END1:%.*]] = add i8 1, [[DOTCAST]]
+; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP0]], 1
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE10:%.*]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 1, i64 2, i64 3, i64 4>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE10]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], 1
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 2
+; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[OFFSET_IDX]], 3
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT3]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[VEC_IV:%.*]] = add <4 x i64> [[BROADCAST_SPLAT4]], <i64 0, i64 1, i64 2, i64 3>
+; CHECK-NEXT: [[TMP17:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP18:%.*]] = add nsw <4 x i64> [[VEC_IND]], <i64 -1, i64 -1, i64 -1, i64 -1>
+; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP17]], i32 0
+; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
+; CHECK: pred.store.if:
+; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i64> [[TMP18]], i32 0
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP20]]
+; CHECK-NEXT: store i32 65, ptr [[TMP21]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
+; CHECK: pred.store.continue:
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i1> [[TMP17]], i32 1
+; CHECK-NEXT: br i1 [[TMP22]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]]
+; CHECK: pred.store.if5:
+; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i64> [[TMP18]], i32 1
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP23]]
+; CHECK-NEXT: store i32 65, ptr [[TMP24]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
+; CHECK: pred.store.continue6:
+; CHECK-NEXT: [[TMP25:%.*]] = extractelement <4 x i1> [[TMP17]], i32 2
+; CHECK-NEXT: br i1 [[TMP25]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
+; CHECK: pred.store.if7:
+; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i64> [[TMP18]], i32 2
+; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP26]]
+; CHECK-NEXT: store i32 65, ptr [[TMP27]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]]
+; CHECK: pred.store.continue8:
+; CHECK-NEXT: [[TMP28:%.*]] = extractelement <4 x i1> [[TMP17]], i32 3
+; CHECK-NEXT: br i1 [[TMP28]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10]]
+; CHECK: pred.store.if9:
+; CHECK-NEXT: [[TMP29:%.*]] = extractelement <4 x i64> [[TMP18]], i32 3
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP29]]
+; CHECK-NEXT: store i32 65, ptr [[TMP30]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]]
+; CHECK: pred.store.continue10:
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4>
+; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMO:%.*]] = sub i64 [[N_VEC]], 1
+; CHECK-NEXT: [[IND_ESCAPE:%.*]] = add i64 1, [[CMO]]
+; CHECK-NEXT: br i1 true, label [[LOAD_VAL:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ], [ 1, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i8 [ [[IND_END1]], [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ], [ 1, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[CONV:%.*]] = phi i64 [ [[CONV2:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i8 [ [[INC:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[SUB:%.*]] = add nsw i64 [[CONV]], -1
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[SUB]]
+; CHECK-NEXT: store i32 65, ptr [[PTR]], align 4
+; CHECK-NEXT: [[INC]] = add i8 [[I]], 1
+; CHECK-NEXT: [[CONV2]] = zext i8 [[INC]] to i64
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[CONV2]], [[N]]
+; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP]], label [[LOAD_VAL]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: load_val:
+; CHECK-NEXT: [[FINAL:%.*]] = phi i64 [ [[CONV]], [[LOOP]] ], [ [[IND_ESCAPE]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[PTR2:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[FINAL]]
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[PTR2]], align 4
+; CHECK-NEXT: br label [[DONE]]
+; CHECK: done:
+; CHECK-NEXT: [[VALUE:%.*]] = phi i32 [ [[VAL]], [[LOAD_VAL]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[VALUE]]
+;
+entry:
+ %cmp1 = icmp ugt i64 %n, 1
+ br i1 %cmp1, label %preheader, label %done
+
+preheader:
+ br label %loop
+
+loop:
+ %conv = phi i64 [ %conv2, %loop ], [ 1, %preheader ]
+ %i = phi i8 [ %inc, %loop ], [ 1, %preheader ]
+ %sub = add nsw i64 %conv, -1
+ %ptr = getelementptr inbounds i32, ptr %arr, i64 %sub
+ store i32 65, ptr %ptr, align 4
+ %inc = add i8 %i, 1
+ %conv2 = zext i8 %inc to i64
+ %cmp2 = icmp ult i64 %conv2, %n
+ br i1 %cmp2, label %loop, label %load_val, !llvm.loop !0
+
+load_val:
+ %final = phi i64 [ %conv, %loop ]
+ %ptr2 = getelementptr inbounds i32, ptr %arr, i64 %final
+ %val = load i32, ptr %ptr2, align 4
+ br label %done
+
+done:
+ %value = phi i32 [ %val, %load_val ], [ 0, %entry ]
+ ret i32 %value
+
+}
+
+!0 = distinct !{!0, !1, !2, !3}
+!1 = !{!"llvm.loop.unroll.disable"}
+!2 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
+!3 = !{!"llvm.loop.vectorize.enable", i1 true}
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.unroll.disable"}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
new file mode 100644
index 000000000000..1dddbfe20a2e
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
@@ -0,0 +1,90 @@
+; RUN: opt -passes=loop-vectorize -force-vector-width=8 -force-vector-interleave=2 -disable-output -debug -S %s 2>&1 | FileCheck --check-prefixes=CHECK %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+; REQUIRES: asserts
+
+; Check if the vector loop condition can be simplified to true for a given
+; VF/IC combination.
+define void @test_tc_less_than_16(ptr %A, i64 %N) {
+; CHECK: LV: Scalarizing: %cmp =
+; CHECK-NEXT: VPlan 'Initial VPlan for VF={8},UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF
+; CHECK-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count
+; CHECK-NEXT: vp<[[TC:%.+]]> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ph:
+; CHECK-NEXT: EMIT vp<[[TC]]> = EXPAND SCEV (zext i4 (trunc i64 %N to i4) to i64)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[CAN_IV_NEXT:%.+]]>
+; CHECK-NEXT: EMIT ir<%p.src> = WIDEN-POINTER-INDUCTION ir<%A>, 1
+; CHECK-NEXT: vp<[[VPTR:%.]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VPTR]]>
+; CHECK-NEXT: WIDEN ir<%add> = add nsw ir<%l>, ir<10>
+; CHECK-NEXT: vp<[[VPTR2:%.+]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: WIDEN store vp<[[VPTR2]]>, ir<%add>
+; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV:%.+]]>, vp<[[VFxUF]]>
+; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VTC]]>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+;
+; CHECK: Executing best plan with VF=8, UF=2
+; CHECK-NEXT: VPlan 'Final VPlan for VF={8},UF={2}' {
+; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF
+; CHECK-NEXT: vp<[[TC:%.+]]> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ph:
+; CHECK-NEXT: EMIT vp<[[TC]]> = EXPAND SCEV (zext i4 (trunc i64 %N to i4) to i64)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[CAN_IV_NEXT:%.+]]>
+; CHECK-NEXT: EMIT ir<%p.src> = WIDEN-POINTER-INDUCTION ir<%A>, 1
+; CHECK-NEXT: vp<[[VPTR:%.]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VPTR]]>
+; CHECK-NEXT: WIDEN ir<%add> = add nsw ir<%l>, ir<10>
+; CHECK-NEXT: vp<[[VPTR2:%.+]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: WIDEN store vp<[[VPTR2]]>, ir<%add>
+; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV:%.+]]>, vp<[[VFxUF]]>
+; CHECK-NEXT: EMIT branch-on-cond ir<true>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+;
+entry:
+ %and = and i64 %N, 15
+ br label %loop
+
+loop:
+ %iv = phi i64 [ %and, %entry ], [ %iv.next, %loop ]
+ %p.src = phi ptr [ %A, %entry ], [ %p.src.next, %loop ]
+ %p.src.next = getelementptr inbounds i8, ptr %p.src, i64 1
+ %l = load i8, ptr %p.src, align 1
+ %add = add nsw i8 %l, 10
+ store i8 %add, ptr %p.src
+ %iv.next = add nsw i64 %iv, -1
+ %cmp = icmp eq i64 %iv.next, 0
+ br i1 %cmp, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/MemProfContextDisambiguation/inlined3.ll b/llvm/test/Transforms/MemProfContextDisambiguation/inlined3.ll
index 39a595897c37..a2e01187108d 100644
--- a/llvm/test/Transforms/MemProfContextDisambiguation/inlined3.ll
+++ b/llvm/test/Transforms/MemProfContextDisambiguation/inlined3.ll
@@ -188,6 +188,9 @@ attributes #7 = { builtin }
; IR: define {{.*}} @_Z1Mv.memprof.1()
; IR: call {{.*}} @_Z2XZv.memprof.1()
+; IR: attributes #[[NOTCOLD]] = { builtin "memprof"="notcold" }
+; IR: attributes #[[COLD]] = { builtin "memprof"="cold" }
+
; STATS: 2 memprof-context-disambiguation - Number of cold static allocations (possibly cloned)
; STATS: 2 memprof-context-disambiguation - Number of not cold static allocations (possibly cloned)
; STATS: 3 memprof-context-disambiguation - Number of function clones created during whole program analysis
diff --git a/llvm/test/Transforms/PhaseOrdering/enable-loop-header-duplication-oz.ll b/llvm/test/Transforms/PhaseOrdering/enable-loop-header-duplication-oz.ll
new file mode 100644
index 000000000000..98b11578b49f
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/enable-loop-header-duplication-oz.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
+
+;; Check that -enable-loop-header-duplication at Oz enables certain types of
+;; optimizations, for example replacing the loop body w/ a call to memset. If
+;; loop idiom recognition begins to recognize unrotated loops, this test will
+;; need to be updated.
+
+; RUN: opt -passes='default<Oz>' -S < %s | FileCheck %s --check-prefix=NOROTATION
+; RUN: opt -passes='default<Oz>' -S -enable-loop-header-duplication < %s | FileCheck %s --check-prefix=ROTATION
+; RUN: opt -passes='default<O2>' -S < %s | FileCheck %s --check-prefix=ROTATION
+
+define void @test(i8* noalias nonnull align 1 %start, i8* %end) unnamed_addr {
+; NOROTATION-LABEL: define void @test(
+; NOROTATION-SAME: ptr noalias nonnull writeonly align 1 [[START:%.*]], ptr readnone [[END:%.*]]) unnamed_addr #[[ATTR0:[0-9]+]] {
+; NOROTATION-NEXT: entry:
+; NOROTATION-NEXT: br label [[LOOP_HEADER:%.*]]
+; NOROTATION: loop.header:
+; NOROTATION-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], [[ENTRY:%.*]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; NOROTATION-NEXT: [[_12_I:%.*]] = icmp eq ptr [[PTR_IV]], [[END]]
+; NOROTATION-NEXT: br i1 [[_12_I]], label [[EXIT:%.*]], label [[LOOP_LATCH]]
+; NOROTATION: loop.latch:
+; NOROTATION-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds i8, ptr [[PTR_IV]], i64 1
+; NOROTATION-NEXT: store i8 1, ptr [[PTR_IV]], align 1
+; NOROTATION-NEXT: br label [[LOOP_HEADER]]
+; NOROTATION: exit:
+; NOROTATION-NEXT: ret void
+;
+; ROTATION-LABEL: define void @test(
+; ROTATION-SAME: ptr noalias nonnull writeonly align 1 [[START:%.*]], ptr readnone [[END:%.*]]) unnamed_addr #[[ATTR0:[0-9]+]] {
+; ROTATION-NEXT: entry:
+; ROTATION-NEXT: [[_12_I1:%.*]] = icmp eq ptr [[START]], [[END]]
+; ROTATION-NEXT: br i1 [[_12_I1]], label [[EXIT:%.*]], label [[LOOP_LATCH_PREHEADER:%.*]]
+; ROTATION: loop.latch.preheader:
+; ROTATION-NEXT: [[END3:%.*]] = ptrtoint ptr [[END]] to i64
+; ROTATION-NEXT: [[START4:%.*]] = ptrtoint ptr [[START]] to i64
+; ROTATION-NEXT: [[TMP0:%.*]] = sub i64 [[END3]], [[START4]]
+; ROTATION-NEXT: tail call void @llvm.memset.p0.i64(ptr nonnull align 1 [[START]], i8 1, i64 [[TMP0]], i1 false)
+; ROTATION-NEXT: br label [[EXIT]]
+; ROTATION: exit:
+; ROTATION-NEXT: ret void
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %ptr.iv = phi i8* [ %start, %entry ], [ %ptr.iv.next, %loop.latch ]
+ %_12.i = icmp eq i8* %ptr.iv, %end
+ br i1 %_12.i, label %exit, label %loop.latch
+
+loop.latch:
+ %ptr.iv.next = getelementptr inbounds i8, i8* %ptr.iv, i64 1
+ store i8 1, i8* %ptr.iv, align 1
+ br label %loop.header
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
index e167b6a47af5..ed73f7b13446 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/complex-loads.ll
@@ -1,369 +1,362 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
-; RUN: opt -S -mtriple riscv64-unknown-linux-gnu < %s --passes=slp-vectorizer -mattr=+v -slp-threshold=-40 | FileCheck %s
+; RUN: opt -S -mtriple riscv64-unknown-linux-gnu < %s --passes=slp-vectorizer -mattr=+v -slp-threshold=-20 | FileCheck %s
define i32 @test(ptr %pix1, ptr %pix2, i64 %idx.ext, i64 %idx.ext63, ptr %add.ptr, ptr %add.ptr64) {
; CHECK-LABEL: define i32 @test(
; CHECK-SAME: ptr [[PIX1:%.*]], ptr [[PIX2:%.*]], i64 [[IDX_EXT:%.*]], i64 [[IDX_EXT63:%.*]], ptr [[ADD_PTR:%.*]], ptr [[ADD_PTR64:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x ptr> poison, ptr [[PIX1]], i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x ptr> [[TMP0]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, <2 x ptr> [[TMP1]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x ptr> poison, ptr [[PIX2]], i32 0
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x ptr> [[TMP3]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, <2 x ptr> [[TMP4]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, <2 x ptr> [[TMP4]], <2 x i64> <i64 1, i64 3>
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, <2 x ptr> [[TMP1]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, <2 x ptr> [[TMP4]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr i8, ptr [[PIX1]], i64 2
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[PIX1]], align 1
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x ptr> poison, ptr [[PIX1]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x ptr> [[TMP1]], <2 x ptr> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, <2 x ptr> [[TMP2]], <2 x i64> <i64 4, i64 6>
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x ptr> poison, ptr [[PIX2]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x ptr> [[TMP4]], <2 x ptr> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, <2 x ptr> [[TMP5]], <2 x i64> <i64 4, i64 6>
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr i8, ptr [[PIX1]], i64 1
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, <2 x ptr> [[TMP5]], <2 x i64> <i64 1, i64 3>
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, <2 x ptr> [[TMP2]], <2 x i64> <i64 5, i64 7>
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, <2 x ptr> [[TMP5]], <2 x i64> <i64 5, i64 7>
; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr i8, ptr [[PIX2]], i64 2
+; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr i8, ptr [[PIX1]], i64 3
+; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX32]], align 1
+; CHECK-NEXT: [[CONV33:%.*]] = zext i8 [[TMP10]] to i32
; CHECK-NEXT: [[ADD_PTR3:%.*]] = getelementptr i8, ptr [[PIX1]], i64 [[IDX_EXT]]
-; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[ADD_PTR3]], align 1
-; CHECK-NEXT: [[CONV_1:%.*]] = zext i8 [[TMP9]] to i32
+; CHECK-NEXT: [[ADD_PTR644:%.*]] = getelementptr i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[ADD_PTR3]], align 1
+; CHECK-NEXT: [[CONV_1:%.*]] = zext i8 [[TMP11]] to i32
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ADD_PTR644]], align 1
; CHECK-NEXT: [[ARRAYIDX8_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 1
+; CHECK-NEXT: [[ARRAYIDX22_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 2
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX22_1]], align 1
; CHECK-NEXT: [[ARRAYIDX32_1:%.*]] = getelementptr i8, ptr [[ADD_PTR3]], i64 3
-; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX32_1]], align 1
-; CHECK-NEXT: [[CONV33_1:%.*]] = zext i8 [[TMP10]] to i32
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr [[ARRAYIDX32_1]], align 1
+; CHECK-NEXT: [[CONV33_1:%.*]] = zext i8 [[TMP14]] to i32
; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr i8, ptr [[ADD_PTR]], i64 [[IDX_EXT]]
; CHECK-NEXT: [[ADD_PTR64_1:%.*]] = getelementptr i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]]
-; CHECK-NEXT: [[ARRAYIDX20_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 2
-; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x ptr> poison, ptr [[ADD_PTR_1]], i32 0
-; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x ptr> [[TMP11]], ptr [[ARRAYIDX20_2]], i32 1
-; CHECK-NEXT: [[TMP13:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP12]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP14:%.*]] = zext <2 x i8> [[TMP13]] to <2 x i32>
-; CHECK-NEXT: [[ARRAYIDX22_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 2
-; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x ptr> poison, ptr [[ADD_PTR64_1]], i32 0
-; CHECK-NEXT: [[TMP16:%.*]] = insertelement <2 x ptr> [[TMP15]], ptr [[ARRAYIDX22_2]], i32 1
-; CHECK-NEXT: [[TMP17:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP16]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 4
+; CHECK-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 4
+; CHECK-NEXT: [[TMP15:%.*]] = load <2 x i8>, ptr [[ADD_PTR_1]], align 1
+; CHECK-NEXT: [[TMP16:%.*]] = zext <2 x i8> [[TMP15]] to <2 x i32>
+; CHECK-NEXT: [[TMP17:%.*]] = load <2 x i8>, ptr [[ADD_PTR64_1]], align 1
; CHECK-NEXT: [[TMP18:%.*]] = zext <2 x i8> [[TMP17]] to <2 x i32>
-; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP14]], [[TMP18]]
-; CHECK-NEXT: [[TMP20:%.*]] = shufflevector <2 x ptr> [[TMP12]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, <2 x ptr> [[TMP20]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[TMP22:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP21]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP16]], [[TMP18]]
+; CHECK-NEXT: [[TMP20:%.*]] = load <2 x i8>, ptr [[ARRAYIDX3_2]], align 1
+; CHECK-NEXT: [[TMP21:%.*]] = zext <2 x i8> [[TMP20]] to <2 x i32>
+; CHECK-NEXT: [[TMP22:%.*]] = load <2 x i8>, ptr [[ARRAYIDX5_2]], align 1
; CHECK-NEXT: [[TMP23:%.*]] = zext <2 x i8> [[TMP22]] to <2 x i32>
-; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <2 x ptr> [[TMP16]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, <2 x ptr> [[TMP24]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[TMP26:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP25]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP27:%.*]] = zext <2 x i8> [[TMP26]] to <2 x i32>
-; CHECK-NEXT: [[TMP28:%.*]] = sub <2 x i32> [[TMP23]], [[TMP27]]
-; CHECK-NEXT: [[TMP29:%.*]] = shl <2 x i32> [[TMP28]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP30:%.*]] = add <2 x i32> [[TMP29]], [[TMP19]]
-; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, <2 x ptr> [[TMP20]], <2 x i64> <i64 1, i64 3>
-; CHECK-NEXT: [[TMP32:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP31]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP24:%.*]] = sub <2 x i32> [[TMP21]], [[TMP23]]
+; CHECK-NEXT: [[TMP25:%.*]] = shl <2 x i32> [[TMP24]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP26:%.*]] = add <2 x i32> [[TMP25]], [[TMP19]]
+; CHECK-NEXT: [[ARRAYIDX20_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 2
+; CHECK-NEXT: [[ARRAYIDX22_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 2
+; CHECK-NEXT: [[ARRAYIDX25_2:%.*]] = getelementptr i8, ptr [[ADD_PTR_1]], i64 6
+; CHECK-NEXT: [[ARRAYIDX27_2:%.*]] = getelementptr i8, ptr [[ADD_PTR64_1]], i64 6
+; CHECK-NEXT: [[TMP27:%.*]] = load <2 x i8>, ptr [[ARRAYIDX20_2]], align 1
+; CHECK-NEXT: [[TMP28:%.*]] = zext <2 x i8> [[TMP27]] to <2 x i32>
+; CHECK-NEXT: [[TMP29:%.*]] = load <2 x i8>, ptr [[ARRAYIDX22_2]], align 1
+; CHECK-NEXT: [[TMP30:%.*]] = zext <2 x i8> [[TMP29]] to <2 x i32>
+; CHECK-NEXT: [[TMP31:%.*]] = sub <2 x i32> [[TMP28]], [[TMP30]]
+; CHECK-NEXT: [[TMP32:%.*]] = load <2 x i8>, ptr [[ARRAYIDX25_2]], align 1
; CHECK-NEXT: [[TMP33:%.*]] = zext <2 x i8> [[TMP32]] to <2 x i32>
-; CHECK-NEXT: [[TMP34:%.*]] = getelementptr i8, <2 x ptr> [[TMP24]], <2 x i64> <i64 1, i64 3>
-; CHECK-NEXT: [[TMP35:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP34]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP36:%.*]] = zext <2 x i8> [[TMP35]] to <2 x i32>
-; CHECK-NEXT: [[TMP37:%.*]] = sub <2 x i32> [[TMP33]], [[TMP36]]
-; CHECK-NEXT: [[TMP38:%.*]] = getelementptr i8, <2 x ptr> [[TMP20]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[TMP39:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP38]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP40:%.*]] = zext <2 x i8> [[TMP39]] to <2 x i32>
-; CHECK-NEXT: [[TMP41:%.*]] = getelementptr i8, <2 x ptr> [[TMP24]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[TMP42:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP41]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP43:%.*]] = zext <2 x i8> [[TMP42]] to <2 x i32>
-; CHECK-NEXT: [[TMP44:%.*]] = sub <2 x i32> [[TMP40]], [[TMP43]]
-; CHECK-NEXT: [[TMP45:%.*]] = shl <2 x i32> [[TMP44]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP46:%.*]] = add <2 x i32> [[TMP45]], [[TMP37]]
-; CHECK-NEXT: [[TMP47:%.*]] = sub <2 x i32> [[TMP30]], [[TMP46]]
-; CHECK-NEXT: [[TMP48:%.*]] = extractelement <2 x i32> [[TMP47]], i32 0
-; CHECK-NEXT: [[TMP49:%.*]] = extractelement <2 x i32> [[TMP47]], i32 1
-; CHECK-NEXT: [[SUB59_2:%.*]] = sub i32 [[TMP48]], [[TMP49]]
-; CHECK-NEXT: [[TMP50:%.*]] = load i8, ptr null, align 1
+; CHECK-NEXT: [[TMP34:%.*]] = load <2 x i8>, ptr [[ARRAYIDX27_2]], align 1
+; CHECK-NEXT: [[TMP35:%.*]] = zext <2 x i8> [[TMP34]] to <2 x i32>
+; CHECK-NEXT: [[TMP36:%.*]] = sub <2 x i32> [[TMP33]], [[TMP35]]
+; CHECK-NEXT: [[TMP37:%.*]] = shl <2 x i32> [[TMP36]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP38:%.*]] = add <2 x i32> [[TMP37]], [[TMP31]]
+; CHECK-NEXT: [[TMP39:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0
+; CHECK-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1
+; CHECK-NEXT: [[ADD44_2:%.*]] = add i32 [[TMP40]], [[TMP39]]
+; CHECK-NEXT: [[SUB45_2:%.*]] = sub i32 [[TMP39]], [[TMP40]]
+; CHECK-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP38]], i32 0
+; CHECK-NEXT: [[TMP42:%.*]] = extractelement <2 x i32> [[TMP38]], i32 1
+; CHECK-NEXT: [[ADD46_2:%.*]] = add i32 [[TMP42]], [[TMP41]]
+; CHECK-NEXT: [[SUB47_2:%.*]] = sub i32 [[TMP41]], [[TMP42]]
+; CHECK-NEXT: [[ADD48_2:%.*]] = add i32 [[ADD46_2]], [[ADD44_2]]
+; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT: [[ARRAYIDX20_3:%.*]] = getelementptr i8, ptr null, i64 2
; CHECK-NEXT: [[ARRAYIDX22_3:%.*]] = getelementptr i8, ptr null, i64 2
-; CHECK-NEXT: [[TMP51:%.*]] = load i8, ptr null, align 1
-; CHECK-NEXT: [[TMP52:%.*]] = insertelement <2 x ptr> <ptr poison, ptr null>, ptr [[ARRAYIDX20_3]], i32 0
-; CHECK-NEXT: [[TMP53:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP52]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP54:%.*]] = zext <2 x i8> [[TMP53]] to <2 x i32>
-; CHECK-NEXT: [[TMP55:%.*]] = insertelement <2 x ptr> <ptr poison, ptr null>, ptr [[ARRAYIDX22_3]], i32 0
-; CHECK-NEXT: [[TMP56:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP55]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP57:%.*]] = zext <2 x i8> [[TMP56]] to <2 x i32>
-; CHECK-NEXT: [[TMP58:%.*]] = sub <2 x i32> [[TMP54]], [[TMP57]]
-; CHECK-NEXT: [[TMP59:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 4, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP44:%.*]] = load i8, ptr null, align 1
+; CHECK-NEXT: [[TMP45:%.*]] = insertelement <2 x ptr> <ptr poison, ptr null>, ptr [[ARRAYIDX20_3]], i32 0
+; CHECK-NEXT: [[TMP46:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP45]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP47:%.*]] = zext <2 x i8> [[TMP46]] to <2 x i32>
+; CHECK-NEXT: [[TMP48:%.*]] = insertelement <2 x ptr> <ptr poison, ptr null>, ptr [[ARRAYIDX22_3]], i32 0
+; CHECK-NEXT: [[TMP49:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP48]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP50:%.*]] = zext <2 x i8> [[TMP49]] to <2 x i32>
+; CHECK-NEXT: [[TMP51:%.*]] = sub <2 x i32> [[TMP47]], [[TMP50]]
+; CHECK-NEXT: [[TMP52:%.*]] = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr align 1 null, i64 4, <2 x i1> <i1 true, i1 true>, i32 2)
+; CHECK-NEXT: [[TMP53:%.*]] = zext <2 x i8> [[TMP52]] to <2 x i32>
+; CHECK-NEXT: [[TMP54:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 6, i64 4>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP55:%.*]] = zext <2 x i8> [[TMP54]] to <2 x i32>
+; CHECK-NEXT: [[TMP56:%.*]] = sub <2 x i32> [[TMP53]], [[TMP55]]
+; CHECK-NEXT: [[TMP57:%.*]] = shl <2 x i32> [[TMP56]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP58:%.*]] = add <2 x i32> [[TMP57]], [[TMP51]]
+; CHECK-NEXT: [[TMP59:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 3, i64 1>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
; CHECK-NEXT: [[TMP60:%.*]] = zext <2 x i8> [[TMP59]] to <2 x i32>
-; CHECK-NEXT: [[TMP61:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 6, i64 4>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP61:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 3, i64 1>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
; CHECK-NEXT: [[TMP62:%.*]] = zext <2 x i8> [[TMP61]] to <2 x i32>
; CHECK-NEXT: [[TMP63:%.*]] = sub <2 x i32> [[TMP60]], [[TMP62]]
-; CHECK-NEXT: [[TMP64:%.*]] = shl <2 x i32> [[TMP63]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP65:%.*]] = add <2 x i32> [[TMP64]], [[TMP58]]
-; CHECK-NEXT: [[TMP66:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 3, i64 1>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP67:%.*]] = zext <2 x i8> [[TMP66]] to <2 x i32>
-; CHECK-NEXT: [[TMP68:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 3, i64 1>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP69:%.*]] = zext <2 x i8> [[TMP68]] to <2 x i32>
-; CHECK-NEXT: [[TMP70:%.*]] = sub <2 x i32> [[TMP67]], [[TMP69]]
-; CHECK-NEXT: [[TMP71:%.*]] = insertelement <2 x i8> poison, i8 [[TMP51]], i32 0
-; CHECK-NEXT: [[TMP72:%.*]] = insertelement <2 x i8> [[TMP71]], i8 [[TMP50]], i32 1
-; CHECK-NEXT: [[TMP73:%.*]] = zext <2 x i8> [[TMP72]] to <2 x i32>
-; CHECK-NEXT: [[TMP74:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 7, i64 5>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP75:%.*]] = zext <2 x i8> [[TMP74]] to <2 x i32>
-; CHECK-NEXT: [[TMP76:%.*]] = sub <2 x i32> [[TMP73]], [[TMP75]]
-; CHECK-NEXT: [[TMP77:%.*]] = shl <2 x i32> [[TMP76]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP78:%.*]] = add <2 x i32> [[TMP77]], [[TMP70]]
-; CHECK-NEXT: [[TMP79:%.*]] = sub <2 x i32> [[TMP65]], [[TMP78]]
-; CHECK-NEXT: [[TMP80:%.*]] = shufflevector <2 x i32> [[TMP78]], <2 x i32> [[TMP46]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP81:%.*]] = shufflevector <2 x i32> [[TMP65]], <2 x i32> [[TMP30]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP82:%.*]] = add <2 x i32> [[TMP80]], [[TMP81]]
-; CHECK-NEXT: [[TMP83:%.*]] = shufflevector <2 x i32> [[TMP78]], <2 x i32> [[TMP46]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP84:%.*]] = shufflevector <2 x i32> [[TMP65]], <2 x i32> [[TMP30]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP85:%.*]] = add <2 x i32> [[TMP83]], [[TMP84]]
-; CHECK-NEXT: [[TMP86:%.*]] = add <2 x i32> [[TMP85]], [[TMP82]]
-; CHECK-NEXT: [[TMP87:%.*]] = sub <2 x i32> [[TMP82]], [[TMP85]]
-; CHECK-NEXT: [[TMP88:%.*]] = extractelement <2 x i32> [[TMP79]], i32 0
-; CHECK-NEXT: [[TMP89:%.*]] = extractelement <2 x i32> [[TMP79]], i32 1
-; CHECK-NEXT: [[SUB59_3:%.*]] = sub i32 [[TMP89]], [[TMP88]]
-; CHECK-NEXT: [[TMP90:%.*]] = extractelement <2 x i32> [[TMP86]], i32 0
-; CHECK-NEXT: [[TMP91:%.*]] = extractelement <2 x i32> [[TMP86]], i32 1
-; CHECK-NEXT: [[ADD94:%.*]] = add i32 [[TMP90]], [[TMP91]]
-; CHECK-NEXT: [[SUB102:%.*]] = sub i32 [[TMP91]], [[TMP90]]
-; CHECK-NEXT: [[TMP92:%.*]] = extractelement <2 x i32> [[TMP54]], i32 1
-; CHECK-NEXT: [[SHR_I:%.*]] = lshr i32 [[TMP92]], 15
+; CHECK-NEXT: [[TMP64:%.*]] = insertelement <2 x i8> poison, i8 [[TMP44]], i32 0
+; CHECK-NEXT: [[TMP65:%.*]] = insertelement <2 x i8> [[TMP64]], i8 [[TMP43]], i32 1
+; CHECK-NEXT: [[TMP66:%.*]] = zext <2 x i8> [[TMP65]] to <2 x i32>
+; CHECK-NEXT: [[TMP67:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i64> <i64 7, i64 5>), i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP68:%.*]] = zext <2 x i8> [[TMP67]] to <2 x i32>
+; CHECK-NEXT: [[TMP69:%.*]] = sub <2 x i32> [[TMP66]], [[TMP68]]
+; CHECK-NEXT: [[TMP70:%.*]] = shl <2 x i32> [[TMP69]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP71:%.*]] = add <2 x i32> [[TMP70]], [[TMP63]]
+; CHECK-NEXT: [[TMP72:%.*]] = add <2 x i32> [[TMP71]], [[TMP58]]
+; CHECK-NEXT: [[TMP73:%.*]] = sub <2 x i32> [[TMP58]], [[TMP71]]
+; CHECK-NEXT: [[TMP74:%.*]] = extractelement <2 x i32> [[TMP72]], i32 0
+; CHECK-NEXT: [[TMP75:%.*]] = extractelement <2 x i32> [[TMP72]], i32 1
+; CHECK-NEXT: [[ADD48_3:%.*]] = add i32 [[TMP74]], [[TMP75]]
+; CHECK-NEXT: [[ADD94:%.*]] = add i32 [[ADD48_3]], [[ADD48_2]]
+; CHECK-NEXT: [[SUB102:%.*]] = sub i32 [[ADD48_2]], [[ADD48_3]]
+; CHECK-NEXT: [[TMP76:%.*]] = extractelement <2 x i32> [[TMP47]], i32 1
+; CHECK-NEXT: [[SHR_I:%.*]] = lshr i32 [[TMP76]], 15
; CHECK-NEXT: [[AND_I:%.*]] = and i32 [[SHR_I]], 65537
; CHECK-NEXT: [[MUL_I:%.*]] = mul i32 [[AND_I]], 65535
-; CHECK-NEXT: [[TMP93:%.*]] = extractelement <2 x i32> [[TMP85]], i32 1
-; CHECK-NEXT: [[SHR_I49:%.*]] = lshr i32 [[TMP93]], 15
+; CHECK-NEXT: [[SHR_I49:%.*]] = lshr i32 [[ADD46_2]], 15
; CHECK-NEXT: [[AND_I50:%.*]] = and i32 [[SHR_I49]], 65537
; CHECK-NEXT: [[MUL_I51:%.*]] = mul i32 [[AND_I50]], 65535
-; CHECK-NEXT: [[TMP94:%.*]] = extractelement <2 x i32> [[TMP87]], i32 0
-; CHECK-NEXT: [[TMP95:%.*]] = extractelement <2 x i32> [[TMP87]], i32 1
-; CHECK-NEXT: [[ADD94_2:%.*]] = add i32 [[TMP94]], [[TMP95]]
-; CHECK-NEXT: [[TMP96:%.*]] = load <2 x i8>, ptr [[ARRAYIDX20]], align 1
-; CHECK-NEXT: [[TMP97:%.*]] = zext <2 x i8> [[TMP96]] to <2 x i32>
-; CHECK-NEXT: [[TMP98:%.*]] = insertelement <2 x i32> poison, i32 [[SUB59_2]], i32 0
-; CHECK-NEXT: [[TMP99:%.*]] = shufflevector <2 x i32> [[TMP98]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP100:%.*]] = insertelement <2 x i32> poison, i32 [[SUB59_3]], i32 0
-; CHECK-NEXT: [[TMP101:%.*]] = shufflevector <2 x i32> [[TMP100]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP102:%.*]] = add <2 x i32> [[TMP99]], [[TMP101]]
-; CHECK-NEXT: [[TMP103:%.*]] = sub <2 x i32> [[TMP99]], [[TMP101]]
-; CHECK-NEXT: [[TMP104:%.*]] = shufflevector <2 x i32> [[TMP102]], <2 x i32> [[TMP103]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP105:%.*]] = load <2 x i8>, ptr [[PIX1]], align 1
-; CHECK-NEXT: [[TMP106:%.*]] = zext <2 x i8> [[TMP105]] to <2 x i32>
-; CHECK-NEXT: [[TMP107:%.*]] = shufflevector <2 x i32> [[TMP106]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: [[TMP108:%.*]] = insertelement <2 x ptr> [[TMP4]], ptr [[ARRAYIDX22]], i32 1
-; CHECK-NEXT: [[TMP109:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP108]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP77:%.*]] = extractelement <2 x i32> [[TMP16]], i32 0
+; CHECK-NEXT: [[SHR_I49_1:%.*]] = lshr i32 [[TMP77]], 15
+; CHECK-NEXT: [[AND_I50_1:%.*]] = and i32 [[SHR_I49_1]], 65537
+; CHECK-NEXT: [[MUL_I51_1:%.*]] = mul i32 [[AND_I50_1]], 65535
+; CHECK-NEXT: [[SHR_I49_2:%.*]] = lshr i32 [[CONV_1]], 15
+; CHECK-NEXT: [[AND_I50_2:%.*]] = and i32 [[SHR_I49_2]], 65537
+; CHECK-NEXT: [[MUL_I51_2:%.*]] = mul i32 [[AND_I50_2]], 65535
+; CHECK-NEXT: [[SHR_I49_3:%.*]] = lshr i32 [[CONV]], 15
+; CHECK-NEXT: [[AND_I50_3:%.*]] = and i32 [[SHR_I49_3]], 65537
+; CHECK-NEXT: [[MUL_I51_3:%.*]] = mul i32 [[AND_I50_3]], 65535
+; CHECK-NEXT: [[TMP78:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8]], align 1
+; CHECK-NEXT: [[TMP79:%.*]] = zext <2 x i8> [[TMP78]] to <2 x i32>
+; CHECK-NEXT: [[TMP80:%.*]] = insertelement <2 x ptr> [[TMP5]], ptr [[ARRAYIDX22]], i32 1
+; CHECK-NEXT: [[TMP81:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP80]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP82:%.*]] = zext <2 x i8> [[TMP81]] to <2 x i32>
+; CHECK-NEXT: [[TMP83:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP3]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP84:%.*]] = zext <2 x i8> [[TMP83]] to <2 x i32>
+; CHECK-NEXT: [[TMP85:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP6]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP86:%.*]] = zext <2 x i8> [[TMP85]] to <2 x i32>
+; CHECK-NEXT: [[TMP87:%.*]] = sub <2 x i32> [[TMP84]], [[TMP86]]
+; CHECK-NEXT: [[TMP88:%.*]] = shl <2 x i32> [[TMP87]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP89:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP7]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP90:%.*]] = zext <2 x i8> [[TMP89]] to <2 x i32>
+; CHECK-NEXT: [[TMP91:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP8]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP92:%.*]] = zext <2 x i8> [[TMP91]] to <2 x i32>
+; CHECK-NEXT: [[TMP93:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP9]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP94:%.*]] = zext <2 x i8> [[TMP93]] to <2 x i32>
+; CHECK-NEXT: [[TMP95:%.*]] = sub <2 x i32> [[TMP92]], [[TMP94]]
+; CHECK-NEXT: [[TMP96:%.*]] = shl <2 x i32> [[TMP95]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i32> [[TMP79]], i32 [[CONV33]], i32 1
+; CHECK-NEXT: [[TMP98:%.*]] = sub <2 x i32> [[TMP97]], [[TMP90]]
+; CHECK-NEXT: [[TMP99:%.*]] = add <2 x i32> [[TMP96]], [[TMP98]]
+; CHECK-NEXT: [[TMP100:%.*]] = insertelement <2 x i32> [[TMP79]], i32 [[CONV]], i32 0
+; CHECK-NEXT: [[TMP101:%.*]] = sub <2 x i32> [[TMP100]], [[TMP82]]
+; CHECK-NEXT: [[TMP102:%.*]] = add <2 x i32> [[TMP88]], [[TMP101]]
+; CHECK-NEXT: [[TMP103:%.*]] = shufflevector <2 x i32> [[TMP99]], <2 x i32> [[TMP102]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[TMP104:%.*]] = add <2 x i32> [[TMP99]], [[TMP102]]
+; CHECK-NEXT: [[TMP105:%.*]] = sub <2 x i32> [[TMP102]], [[TMP99]]
+; CHECK-NEXT: [[TMP106:%.*]] = extractelement <2 x i32> [[TMP104]], i32 0
+; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i32> [[TMP104]], i32 1
+; CHECK-NEXT: [[ADD48:%.*]] = add i32 [[TMP107]], [[TMP106]]
+; CHECK-NEXT: [[TMP108:%.*]] = extractelement <2 x i32> [[TMP105]], i32 1
+; CHECK-NEXT: [[SHR_I59:%.*]] = lshr i32 [[TMP107]], 15
+; CHECK-NEXT: [[AND_I60:%.*]] = and i32 [[SHR_I59]], 65537
+; CHECK-NEXT: [[MUL_I61:%.*]] = mul i32 [[AND_I60]], 65535
+; CHECK-NEXT: [[SHR_I59_1:%.*]] = lshr i32 [[TMP108]], 15
+; CHECK-NEXT: [[AND_I60_1:%.*]] = and i32 [[SHR_I59_1]], 65537
+; CHECK-NEXT: [[MUL_I61_1:%.*]] = mul i32 [[AND_I60_1]], 65535
+; CHECK-NEXT: [[TMP109:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
; CHECK-NEXT: [[TMP110:%.*]] = zext <2 x i8> [[TMP109]] to <2 x i32>
-; CHECK-NEXT: [[TMP111:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP2]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP112:%.*]] = zext <2 x i8> [[TMP111]] to <2 x i32>
-; CHECK-NEXT: [[TMP113:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP5]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP114:%.*]] = zext <2 x i8> [[TMP113]] to <2 x i32>
-; CHECK-NEXT: [[TMP115:%.*]] = sub <2 x i32> [[TMP112]], [[TMP114]]
-; CHECK-NEXT: [[TMP116:%.*]] = shl <2 x i32> [[TMP115]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP117:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP6]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP111:%.*]] = insertelement <2 x i8> poison, i8 [[TMP12]], i32 0
+; CHECK-NEXT: [[TMP112:%.*]] = insertelement <2 x i8> [[TMP111]], i8 [[TMP13]], i32 1
+; CHECK-NEXT: [[TMP113:%.*]] = zext <2 x i8> [[TMP112]] to <2 x i32>
+; CHECK-NEXT: [[TMP114:%.*]] = insertelement <2 x ptr> poison, ptr [[ADD_PTR3]], i32 0
+; CHECK-NEXT: [[TMP115:%.*]] = shufflevector <2 x ptr> [[TMP114]], <2 x ptr> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP116:%.*]] = getelementptr i8, <2 x ptr> [[TMP115]], <2 x i64> <i64 4, i64 6>
+; CHECK-NEXT: [[TMP117:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP116]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
; CHECK-NEXT: [[TMP118:%.*]] = zext <2 x i8> [[TMP117]] to <2 x i32>
-; CHECK-NEXT: [[TMP119:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP7]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP120:%.*]] = zext <2 x i8> [[TMP119]] to <2 x i32>
-; CHECK-NEXT: [[TMP121:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP8]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP122:%.*]] = zext <2 x i8> [[TMP121]] to <2 x i32>
-; CHECK-NEXT: [[TMP123:%.*]] = sub <2 x i32> [[TMP120]], [[TMP122]]
-; CHECK-NEXT: [[TMP124:%.*]] = shl <2 x i32> [[TMP123]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP125:%.*]] = shufflevector <2 x i32> [[TMP106]], <2 x i32> [[TMP97]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[TMP126:%.*]] = sub <2 x i32> [[TMP125]], [[TMP110]]
-; CHECK-NEXT: [[TMP127:%.*]] = add <2 x i32> [[TMP116]], [[TMP126]]
-; CHECK-NEXT: [[TMP128:%.*]] = shufflevector <2 x i32> [[TMP107]], <2 x i32> [[TMP97]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP129:%.*]] = sub <2 x i32> [[TMP128]], [[TMP118]]
-; CHECK-NEXT: [[TMP130:%.*]] = add <2 x i32> [[TMP124]], [[TMP129]]
-; CHECK-NEXT: [[TMP131:%.*]] = extractelement <2 x i32> [[TMP127]], i32 1
-; CHECK-NEXT: [[TMP132:%.*]] = extractelement <2 x i32> [[TMP130]], i32 1
-; CHECK-NEXT: [[ADD46:%.*]] = add i32 [[TMP132]], [[TMP131]]
-; CHECK-NEXT: [[TMP133:%.*]] = sub <2 x i32> [[TMP127]], [[TMP130]]
-; CHECK-NEXT: [[TMP134:%.*]] = extractelement <2 x i32> [[TMP127]], i32 0
-; CHECK-NEXT: [[TMP135:%.*]] = extractelement <2 x i32> [[TMP130]], i32 0
-; CHECK-NEXT: [[ADD44:%.*]] = add i32 [[TMP135]], [[TMP134]]
-; CHECK-NEXT: [[TMP136:%.*]] = lshr <2 x i32> [[TMP107]], <i32 15, i32 15>
-; CHECK-NEXT: [[TMP137:%.*]] = and <2 x i32> [[TMP136]], <i32 65537, i32 65537>
-; CHECK-NEXT: [[TMP138:%.*]] = mul <2 x i32> [[TMP137]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP139:%.*]] = extractelement <2 x i32> [[TMP133]], i32 0
-; CHECK-NEXT: [[TMP140:%.*]] = extractelement <2 x i32> [[TMP133]], i32 1
-; CHECK-NEXT: [[SUB59:%.*]] = sub i32 [[TMP139]], [[TMP140]]
-; CHECK-NEXT: [[TMP141:%.*]] = load <2 x i8>, ptr [[ARRAYIDX8_1]], align 1
-; CHECK-NEXT: [[TMP142:%.*]] = zext <2 x i8> [[TMP141]] to <2 x i32>
-; CHECK-NEXT: [[ADD_PTR644:%.*]] = getelementptr i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
-; CHECK-NEXT: [[ARRAYIDX22_1:%.*]] = getelementptr i8, ptr [[ADD_PTR644]], i64 2
-; CHECK-NEXT: [[TMP143:%.*]] = insertelement <2 x ptr> poison, ptr [[ADD_PTR644]], i32 0
-; CHECK-NEXT: [[TMP144:%.*]] = insertelement <2 x ptr> [[TMP143]], ptr [[ARRAYIDX22_1]], i32 1
-; CHECK-NEXT: [[TMP145:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP144]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP146:%.*]] = zext <2 x i8> [[TMP145]] to <2 x i32>
-; CHECK-NEXT: [[TMP147:%.*]] = insertelement <2 x ptr> poison, ptr [[ADD_PTR3]], i32 0
-; CHECK-NEXT: [[TMP148:%.*]] = shufflevector <2 x ptr> [[TMP147]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP149:%.*]] = getelementptr i8, <2 x ptr> [[TMP148]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[TMP150:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP149]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP151:%.*]] = zext <2 x i8> [[TMP150]] to <2 x i32>
-; CHECK-NEXT: [[TMP152:%.*]] = shufflevector <2 x ptr> [[TMP144]], <2 x ptr> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP153:%.*]] = getelementptr i8, <2 x ptr> [[TMP152]], <2 x i64> <i64 4, i64 6>
-; CHECK-NEXT: [[TMP154:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP153]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP155:%.*]] = zext <2 x i8> [[TMP154]] to <2 x i32>
-; CHECK-NEXT: [[TMP156:%.*]] = sub <2 x i32> [[TMP151]], [[TMP155]]
-; CHECK-NEXT: [[TMP157:%.*]] = shl <2 x i32> [[TMP156]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP158:%.*]] = getelementptr i8, <2 x ptr> [[TMP152]], <2 x i64> <i64 1, i64 3>
-; CHECK-NEXT: [[TMP159:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP158]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP160:%.*]] = zext <2 x i8> [[TMP159]] to <2 x i32>
-; CHECK-NEXT: [[TMP161:%.*]] = getelementptr i8, <2 x ptr> [[TMP148]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[TMP162:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP161]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP163:%.*]] = zext <2 x i8> [[TMP162]] to <2 x i32>
-; CHECK-NEXT: [[TMP164:%.*]] = getelementptr i8, <2 x ptr> [[TMP152]], <2 x i64> <i64 5, i64 7>
-; CHECK-NEXT: [[TMP165:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP164]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
-; CHECK-NEXT: [[TMP166:%.*]] = zext <2 x i8> [[TMP165]] to <2 x i32>
-; CHECK-NEXT: [[TMP167:%.*]] = sub <2 x i32> [[TMP163]], [[TMP166]]
-; CHECK-NEXT: [[TMP168:%.*]] = shl <2 x i32> [[TMP167]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP169:%.*]] = insertelement <2 x i32> [[TMP142]], i32 [[CONV33_1]], i32 1
-; CHECK-NEXT: [[TMP170:%.*]] = sub <2 x i32> [[TMP169]], [[TMP160]]
-; CHECK-NEXT: [[TMP171:%.*]] = add <2 x i32> [[TMP168]], [[TMP170]]
-; CHECK-NEXT: [[TMP172:%.*]] = insertelement <2 x i32> [[TMP142]], i32 [[CONV_1]], i32 0
-; CHECK-NEXT: [[TMP173:%.*]] = sub <2 x i32> [[TMP172]], [[TMP146]]
-; CHECK-NEXT: [[TMP174:%.*]] = add <2 x i32> [[TMP157]], [[TMP173]]
-; CHECK-NEXT: [[TMP175:%.*]] = add <2 x i32> [[TMP171]], [[TMP174]]
-; CHECK-NEXT: [[TMP176:%.*]] = sub <2 x i32> [[TMP174]], [[TMP171]]
-; CHECK-NEXT: [[TMP177:%.*]] = extractelement <2 x i32> [[TMP175]], i32 0
-; CHECK-NEXT: [[TMP178:%.*]] = extractelement <2 x i32> [[TMP175]], i32 1
-; CHECK-NEXT: [[SUB51_1:%.*]] = sub i32 [[TMP177]], [[TMP178]]
-; CHECK-NEXT: [[TMP179:%.*]] = shufflevector <2 x i32> [[TMP176]], <2 x i32> [[TMP133]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[TMP180:%.*]] = shufflevector <2 x i32> [[TMP176]], <2 x i32> [[TMP133]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[TMP181:%.*]] = add <2 x i32> [[TMP179]], [[TMP180]]
-; CHECK-NEXT: [[TMP182:%.*]] = extractelement <2 x i32> [[TMP176]], i32 0
-; CHECK-NEXT: [[TMP183:%.*]] = extractelement <2 x i32> [[TMP176]], i32 1
-; CHECK-NEXT: [[SUB59_1:%.*]] = sub i32 [[TMP182]], [[TMP183]]
-; CHECK-NEXT: [[SHR_I54:%.*]] = lshr i32 [[TMP178]], 15
+; CHECK-NEXT: [[TMP119:%.*]] = insertelement <2 x ptr> poison, ptr [[ADD_PTR644]], i32 0
+; CHECK-NEXT: [[TMP120:%.*]] = shufflevector <2 x ptr> [[TMP119]], <2 x ptr> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP121:%.*]] = getelementptr i8, <2 x ptr> [[TMP120]], <2 x i64> <i64 4, i64 6>
+; CHECK-NEXT: [[TMP122:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP121]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP123:%.*]] = zext <2 x i8> [[TMP122]] to <2 x i32>
+; CHECK-NEXT: [[TMP124:%.*]] = sub <2 x i32> [[TMP118]], [[TMP123]]
+; CHECK-NEXT: [[TMP125:%.*]] = shl <2 x i32> [[TMP124]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP126:%.*]] = getelementptr i8, <2 x ptr> [[TMP120]], <2 x i64> <i64 1, i64 3>
+; CHECK-NEXT: [[TMP127:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP126]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP128:%.*]] = zext <2 x i8> [[TMP127]] to <2 x i32>
+; CHECK-NEXT: [[TMP129:%.*]] = getelementptr i8, <2 x ptr> [[TMP115]], <2 x i64> <i64 5, i64 7>
+; CHECK-NEXT: [[TMP130:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP129]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP131:%.*]] = zext <2 x i8> [[TMP130]] to <2 x i32>
+; CHECK-NEXT: [[TMP132:%.*]] = getelementptr i8, <2 x ptr> [[TMP120]], <2 x i64> <i64 5, i64 7>
+; CHECK-NEXT: [[TMP133:%.*]] = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> [[TMP132]], i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> poison)
+; CHECK-NEXT: [[TMP134:%.*]] = zext <2 x i8> [[TMP133]] to <2 x i32>
+; CHECK-NEXT: [[TMP135:%.*]] = sub <2 x i32> [[TMP131]], [[TMP134]]
+; CHECK-NEXT: [[TMP136:%.*]] = shl <2 x i32> [[TMP135]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP137:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV33_1]], i32 1
+; CHECK-NEXT: [[TMP138:%.*]] = sub <2 x i32> [[TMP137]], [[TMP128]]
+; CHECK-NEXT: [[TMP139:%.*]] = add <2 x i32> [[TMP136]], [[TMP138]]
+; CHECK-NEXT: [[TMP140:%.*]] = insertelement <2 x i32> [[TMP110]], i32 [[CONV_1]], i32 0
+; CHECK-NEXT: [[TMP141:%.*]] = sub <2 x i32> [[TMP140]], [[TMP113]]
+; CHECK-NEXT: [[TMP142:%.*]] = add <2 x i32> [[TMP125]], [[TMP141]]
+; CHECK-NEXT: [[TMP143:%.*]] = add <2 x i32> [[TMP139]], [[TMP142]]
+; CHECK-NEXT: [[TMP144:%.*]] = sub <2 x i32> [[TMP142]], [[TMP139]]
+; CHECK-NEXT: [[TMP145:%.*]] = extractelement <2 x i32> [[TMP143]], i32 0
+; CHECK-NEXT: [[TMP146:%.*]] = extractelement <2 x i32> [[TMP143]], i32 1
+; CHECK-NEXT: [[ADD48_1:%.*]] = add i32 [[TMP146]], [[TMP145]]
+; CHECK-NEXT: [[SHR_I54:%.*]] = lshr i32 [[TMP146]], 15
; CHECK-NEXT: [[AND_I55:%.*]] = and i32 [[SHR_I54]], 65537
; CHECK-NEXT: [[MUL_I56:%.*]] = mul i32 [[AND_I55]], 65535
-; CHECK-NEXT: [[TMP184:%.*]] = lshr <2 x i32> [[TMP142]], <i32 15, i32 15>
-; CHECK-NEXT: [[TMP185:%.*]] = and <2 x i32> [[TMP184]], <i32 65537, i32 65537>
-; CHECK-NEXT: [[TMP186:%.*]] = mul <2 x i32> [[TMP185]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP187:%.*]] = insertelement <2 x i32> poison, i32 [[SUB59_1]], i32 0
-; CHECK-NEXT: [[TMP188:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP189:%.*]] = extractelement <2 x i32> [[TMP181]], i32 0
-; CHECK-NEXT: [[TMP190:%.*]] = extractelement <2 x i32> [[TMP181]], i32 1
-; CHECK-NEXT: [[ADD78_1:%.*]] = add i32 [[TMP189]], [[TMP190]]
-; CHECK-NEXT: [[TMP191:%.*]] = shufflevector <2 x i32> [[TMP33]], <2 x i32> [[TMP176]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP192:%.*]] = lshr <2 x i32> [[TMP191]], <i32 15, i32 15>
-; CHECK-NEXT: [[TMP193:%.*]] = and <2 x i32> [[TMP192]], <i32 65537, i32 65537>
-; CHECK-NEXT: [[TMP194:%.*]] = mul <2 x i32> [[TMP193]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP195:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_1]], i32 0
-; CHECK-NEXT: [[TMP196:%.*]] = shufflevector <2 x i32> [[TMP195]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP197:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_2]], i32 0
-; CHECK-NEXT: [[TMP198:%.*]] = shufflevector <2 x i32> [[TMP197]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP199:%.*]] = insertelement <2 x i32> poison, i32 [[ADD44]], i32 0
-; CHECK-NEXT: [[TMP200:%.*]] = shufflevector <2 x i32> [[TMP199]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP201:%.*]] = insertelement <2 x i32> <i32 15, i32 poison>, i32 [[ADD46]], i32 1
-; CHECK-NEXT: [[TMP202:%.*]] = lshr <2 x i32> [[TMP200]], [[TMP201]]
-; CHECK-NEXT: [[TMP203:%.*]] = sub <2 x i32> [[TMP200]], [[TMP201]]
-; CHECK-NEXT: [[TMP204:%.*]] = shufflevector <2 x i32> [[TMP202]], <2 x i32> [[TMP203]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP205:%.*]] = extractelement <2 x i32> [[TMP204]], i32 1
-; CHECK-NEXT: [[ADD78_2:%.*]] = add i32 [[SUB51_1]], [[TMP205]]
-; CHECK-NEXT: [[TMP206:%.*]] = insertelement <2 x i32> <i32 65537, i32 poison>, i32 [[SUB51_1]], i32 1
-; CHECK-NEXT: [[TMP207:%.*]] = and <2 x i32> [[TMP204]], [[TMP206]]
-; CHECK-NEXT: [[TMP208:%.*]] = sub <2 x i32> [[TMP204]], [[TMP206]]
-; CHECK-NEXT: [[TMP209:%.*]] = shufflevector <2 x i32> [[TMP207]], <2 x i32> [[TMP208]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP210:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_2]], i32 0
-; CHECK-NEXT: [[TMP211:%.*]] = shufflevector <2 x i32> [[TMP210]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP212:%.*]] = add <2 x i32> [[TMP211]], [[TMP198]]
-; CHECK-NEXT: [[TMP213:%.*]] = sub <2 x i32> [[TMP211]], [[TMP198]]
-; CHECK-NEXT: [[TMP214:%.*]] = shufflevector <2 x i32> [[TMP212]], <2 x i32> [[TMP213]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP215:%.*]] = insertelement <2 x i32> [[TMP133]], i32 [[CONV_1]], i32 0
-; CHECK-NEXT: [[TMP216:%.*]] = lshr <2 x i32> [[TMP215]], <i32 15, i32 15>
-; CHECK-NEXT: [[TMP217:%.*]] = and <2 x i32> [[TMP216]], <i32 65537, i32 65537>
-; CHECK-NEXT: [[TMP218:%.*]] = mul <2 x i32> [[TMP217]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP219:%.*]] = shufflevector <2 x i32> [[TMP87]], <2 x i32> poison, <2 x i32> <i32 1, i32 poison>
-; CHECK-NEXT: [[TMP220:%.*]] = shufflevector <2 x i32> [[TMP219]], <2 x i32> [[TMP181]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP221:%.*]] = shufflevector <2 x i32> [[TMP87]], <2 x i32> [[TMP181]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[TMP222:%.*]] = sub <2 x i32> [[TMP220]], [[TMP221]]
-; CHECK-NEXT: [[TMP223:%.*]] = shufflevector <2 x i32> [[TMP47]], <2 x i32> poison, <2 x i32> <i32 1, i32 poison>
-; CHECK-NEXT: [[TMP224:%.*]] = insertelement <2 x i32> [[TMP223]], i32 [[ADD46]], i32 1
-; CHECK-NEXT: [[TMP225:%.*]] = insertelement <2 x i32> [[TMP47]], i32 [[ADD44]], i32 1
-; CHECK-NEXT: [[TMP226:%.*]] = add <2 x i32> [[TMP224]], [[TMP225]]
-; CHECK-NEXT: [[TMP227:%.*]] = shufflevector <2 x i32> [[TMP79]], <2 x i32> [[TMP175]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP228:%.*]] = shufflevector <2 x i32> [[TMP79]], <2 x i32> [[TMP175]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: [[TMP229:%.*]] = add <2 x i32> [[TMP227]], [[TMP228]]
-; CHECK-NEXT: [[TMP230:%.*]] = extractelement <2 x i32> [[TMP226]], i32 0
-; CHECK-NEXT: [[TMP231:%.*]] = extractelement <2 x i32> [[TMP229]], i32 0
-; CHECK-NEXT: [[ADD94_1:%.*]] = add i32 [[TMP231]], [[TMP230]]
-; CHECK-NEXT: [[TMP232:%.*]] = insertelement <2 x i32> [[TMP14]], i32 [[ADD46]], i32 1
-; CHECK-NEXT: [[TMP233:%.*]] = lshr <2 x i32> [[TMP232]], <i32 15, i32 15>
-; CHECK-NEXT: [[TMP234:%.*]] = and <2 x i32> [[TMP233]], <i32 65537, i32 65537>
-; CHECK-NEXT: [[TMP235:%.*]] = mul <2 x i32> [[TMP234]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP236:%.*]] = extractelement <2 x i32> [[TMP226]], i32 1
-; CHECK-NEXT: [[TMP237:%.*]] = extractelement <2 x i32> [[TMP229]], i32 1
-; CHECK-NEXT: [[ADD78:%.*]] = add i32 [[TMP237]], [[TMP236]]
-; CHECK-NEXT: [[TMP238:%.*]] = sub <2 x i32> [[TMP226]], [[TMP229]]
+; CHECK-NEXT: [[TMP147:%.*]] = lshr <2 x i32> [[TMP110]], <i32 15, i32 15>
+; CHECK-NEXT: [[TMP148:%.*]] = and <2 x i32> [[TMP147]], <i32 65537, i32 65537>
+; CHECK-NEXT: [[TMP149:%.*]] = mul <2 x i32> [[TMP148]], <i32 65535, i32 65535>
+; CHECK-NEXT: [[ADD78:%.*]] = add i32 [[ADD48_1]], [[ADD48]]
+; CHECK-NEXT: [[SUB86:%.*]] = sub i32 [[ADD48]], [[ADD48_1]]
; CHECK-NEXT: [[ADD103:%.*]] = add i32 [[ADD94]], [[ADD78]]
; CHECK-NEXT: [[SUB104:%.*]] = sub i32 [[ADD78]], [[ADD94]]
-; CHECK-NEXT: [[TMP239:%.*]] = extractelement <2 x i32> [[TMP238]], i32 1
-; CHECK-NEXT: [[ADD105:%.*]] = add i32 [[SUB102]], [[TMP239]]
+; CHECK-NEXT: [[ADD105:%.*]] = add i32 [[SUB102]], [[SUB86]]
+; CHECK-NEXT: [[SUB106:%.*]] = sub i32 [[SUB86]], [[SUB102]]
; CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[MUL_I]], [[ADD103]]
-; CHECK-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP92]]
+; CHECK-NEXT: [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[TMP76]]
; CHECK-NEXT: [[ADD_I52:%.*]] = add i32 [[MUL_I51]], [[ADD105]]
-; CHECK-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[TMP93]]
+; CHECK-NEXT: [[XOR_I53:%.*]] = xor i32 [[ADD_I52]], [[ADD46_2]]
; CHECK-NEXT: [[ADD_I57:%.*]] = add i32 [[MUL_I56]], [[SUB104]]
-; CHECK-NEXT: [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP178]]
+; CHECK-NEXT: [[XOR_I58:%.*]] = xor i32 [[ADD_I57]], [[TMP146]]
+; CHECK-NEXT: [[ADD_I62:%.*]] = add i32 [[MUL_I61]], [[SUB106]]
+; CHECK-NEXT: [[XOR_I63:%.*]] = xor i32 [[ADD_I62]], [[TMP107]]
; CHECK-NEXT: [[ADD110:%.*]] = add i32 [[XOR_I53]], [[XOR_I]]
; CHECK-NEXT: [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I58]]
-; CHECK-NEXT: [[TMP240:%.*]] = shufflevector <2 x i32> [[TMP222]], <2 x i32> poison, <2 x i32> <i32 1, i32 poison>
-; CHECK-NEXT: [[TMP241:%.*]] = insertelement <2 x i32> [[TMP240]], i32 [[SUB102]], i32 1
-; CHECK-NEXT: [[TMP242:%.*]] = add <2 x i32> [[TMP238]], [[TMP241]]
-; CHECK-NEXT: [[TMP243:%.*]] = sub <2 x i32> [[TMP238]], [[TMP241]]
-; CHECK-NEXT: [[TMP244:%.*]] = shufflevector <2 x i32> [[TMP242]], <2 x i32> [[TMP243]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP245:%.*]] = add <2 x i32> [[TMP235]], [[TMP244]]
-; CHECK-NEXT: [[TMP246:%.*]] = xor <2 x i32> [[TMP245]], [[TMP232]]
-; CHECK-NEXT: [[TMP247:%.*]] = extractelement <2 x i32> [[TMP246]], i32 1
-; CHECK-NEXT: [[ADD113:%.*]] = add i32 [[ADD112]], [[TMP247]]
-; CHECK-NEXT: [[TMP248:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_1]], i32 0
-; CHECK-NEXT: [[TMP249:%.*]] = shufflevector <2 x i32> [[TMP248]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP250:%.*]] = add <2 x i32> [[TMP196]], [[TMP249]]
-; CHECK-NEXT: [[TMP251:%.*]] = sub <2 x i32> [[TMP196]], [[TMP249]]
-; CHECK-NEXT: [[TMP252:%.*]] = shufflevector <2 x i32> [[TMP250]], <2 x i32> [[TMP251]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP253:%.*]] = add <2 x i32> [[TMP194]], [[TMP252]]
-; CHECK-NEXT: [[TMP254:%.*]] = xor <2 x i32> [[TMP253]], [[TMP191]]
-; CHECK-NEXT: [[TMP255:%.*]] = extractelement <2 x i32> [[TMP246]], i32 0
-; CHECK-NEXT: [[ADD108_1:%.*]] = add i32 [[TMP255]], [[ADD113]]
-; CHECK-NEXT: [[TMP256:%.*]] = extractelement <2 x i32> [[TMP254]], i32 0
-; CHECK-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP256]]
-; CHECK-NEXT: [[TMP257:%.*]] = extractelement <2 x i32> [[TMP254]], i32 1
-; CHECK-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP257]]
-; CHECK-NEXT: [[TMP258:%.*]] = shufflevector <2 x i32> [[TMP209]], <2 x i32> poison, <2 x i32> <i32 1, i32 poison>
-; CHECK-NEXT: [[TMP259:%.*]] = shufflevector <2 x i32> [[TMP258]], <2 x i32> [[TMP238]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[TMP260:%.*]] = add <2 x i32> [[TMP222]], [[TMP259]]
-; CHECK-NEXT: [[TMP261:%.*]] = sub <2 x i32> [[TMP222]], [[TMP259]]
-; CHECK-NEXT: [[TMP262:%.*]] = shufflevector <2 x i32> [[TMP260]], <2 x i32> [[TMP261]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP263:%.*]] = add <2 x i32> [[TMP218]], [[TMP262]]
-; CHECK-NEXT: [[TMP264:%.*]] = xor <2 x i32> [[TMP263]], [[TMP215]]
-; CHECK-NEXT: [[TMP265:%.*]] = extractelement <2 x i32> [[TMP264]], i32 1
-; CHECK-NEXT: [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[TMP265]]
-; CHECK-NEXT: [[TMP266:%.*]] = shufflevector <2 x i32> <i32 65535, i32 poison>, <2 x i32> [[TMP222]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[TMP267:%.*]] = mul <2 x i32> [[TMP209]], [[TMP266]]
-; CHECK-NEXT: [[TMP268:%.*]] = sub <2 x i32> [[TMP209]], [[TMP266]]
-; CHECK-NEXT: [[TMP269:%.*]] = shufflevector <2 x i32> [[TMP267]], <2 x i32> [[TMP268]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP270:%.*]] = add <2 x i32> [[TMP186]], [[TMP214]]
-; CHECK-NEXT: [[TMP271:%.*]] = xor <2 x i32> [[TMP270]], [[TMP142]]
-; CHECK-NEXT: [[TMP272:%.*]] = extractelement <2 x i32> [[TMP269]], i32 0
-; CHECK-NEXT: [[TMP273:%.*]] = extractelement <2 x i32> [[TMP269]], i32 1
-; CHECK-NEXT: [[ADD_I62_2:%.*]] = add i32 [[TMP272]], [[TMP273]]
-; CHECK-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[ADD44]]
-; CHECK-NEXT: [[TMP274:%.*]] = extractelement <2 x i32> [[TMP264]], i32 0
-; CHECK-NEXT: [[ADD108_2:%.*]] = add i32 [[TMP274]], [[ADD113_1]]
-; CHECK-NEXT: [[TMP275:%.*]] = extractelement <2 x i32> [[TMP271]], i32 0
-; CHECK-NEXT: [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP275]]
-; CHECK-NEXT: [[TMP276:%.*]] = extractelement <2 x i32> [[TMP271]], i32 1
-; CHECK-NEXT: [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP276]]
+; CHECK-NEXT: [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I63]]
+; CHECK-NEXT: [[TMP150:%.*]] = shufflevector <2 x i32> [[TMP105]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP151:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB47_2]], i32 1
+; CHECK-NEXT: [[TMP152:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB45_2]], i32 1
+; CHECK-NEXT: [[TMP153:%.*]] = add <2 x i32> [[TMP151]], [[TMP152]]
+; CHECK-NEXT: [[TMP154:%.*]] = shufflevector <2 x i32> [[TMP144]], <2 x i32> [[TMP73]], <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: [[TMP155:%.*]] = shufflevector <2 x i32> [[TMP144]], <2 x i32> [[TMP73]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP156:%.*]] = add <2 x i32> [[TMP154]], [[TMP155]]
+; CHECK-NEXT: [[TMP157:%.*]] = extractelement <2 x i32> [[TMP153]], i32 1
+; CHECK-NEXT: [[TMP158:%.*]] = extractelement <2 x i32> [[TMP156]], i32 1
+; CHECK-NEXT: [[TMP159:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP153]], <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[ADD94_1:%.*]] = add i32 [[TMP158]], [[TMP157]]
+; CHECK-NEXT: [[TMP160:%.*]] = extractelement <2 x i32> [[TMP153]], i32 0
+; CHECK-NEXT: [[TMP161:%.*]] = extractelement <2 x i32> [[TMP156]], i32 0
+; CHECK-NEXT: [[TMP162:%.*]] = shufflevector <2 x i32> [[TMP156]], <2 x i32> [[TMP153]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[ADD78_1:%.*]] = add i32 [[TMP161]], [[TMP160]]
+; CHECK-NEXT: [[TMP163:%.*]] = sub <2 x i32> [[TMP153]], [[TMP156]]
+; CHECK-NEXT: [[TMP164:%.*]] = extractelement <2 x i32> [[TMP163]], i32 0
+; CHECK-NEXT: [[TMP165:%.*]] = extractelement <2 x i32> [[TMP163]], i32 1
+; CHECK-NEXT: [[ADD105_1:%.*]] = add i32 [[TMP165]], [[TMP164]]
+; CHECK-NEXT: [[SUB106_1:%.*]] = sub i32 [[TMP164]], [[TMP165]]
+; CHECK-NEXT: [[ADD_I52_1:%.*]] = add i32 [[MUL_I51_1]], [[ADD105_1]]
+; CHECK-NEXT: [[XOR_I53_1:%.*]] = xor i32 [[ADD_I52_1]], [[TMP77]]
+; CHECK-NEXT: [[TMP166:%.*]] = shufflevector <2 x i32> [[TMP16]], <2 x i32> [[TMP144]], <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[TMP167:%.*]] = lshr <2 x i32> [[TMP166]], <i32 15, i32 15>
+; CHECK-NEXT: [[TMP168:%.*]] = and <2 x i32> [[TMP167]], <i32 65537, i32 65537>
+; CHECK-NEXT: [[TMP169:%.*]] = mul <2 x i32> [[TMP168]], <i32 65535, i32 65535>
+; CHECK-NEXT: [[TMP170:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_1]], i32 0
+; CHECK-NEXT: [[TMP171:%.*]] = shufflevector <2 x i32> [[TMP170]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP172:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_1]], i32 0
+; CHECK-NEXT: [[TMP173:%.*]] = shufflevector <2 x i32> [[TMP172]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP174:%.*]] = add <2 x i32> [[TMP171]], [[TMP173]]
+; CHECK-NEXT: [[TMP175:%.*]] = sub <2 x i32> [[TMP171]], [[TMP173]]
+; CHECK-NEXT: [[TMP176:%.*]] = shufflevector <2 x i32> [[TMP174]], <2 x i32> [[TMP175]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP177:%.*]] = add <2 x i32> [[TMP169]], [[TMP176]]
+; CHECK-NEXT: [[TMP178:%.*]] = xor <2 x i32> [[TMP177]], [[TMP166]]
+; CHECK-NEXT: [[ADD_I62_1:%.*]] = add i32 [[MUL_I61_1]], [[SUB106_1]]
+; CHECK-NEXT: [[XOR_I63_1:%.*]] = xor i32 [[ADD_I62_1]], [[TMP108]]
+; CHECK-NEXT: [[ADD108_1:%.*]] = add i32 [[XOR_I53_1]], [[ADD113]]
+; CHECK-NEXT: [[TMP179:%.*]] = extractelement <2 x i32> [[TMP178]], i32 0
+; CHECK-NEXT: [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[TMP179]]
+; CHECK-NEXT: [[TMP180:%.*]] = extractelement <2 x i32> [[TMP178]], i32 1
+; CHECK-NEXT: [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[TMP180]]
+; CHECK-NEXT: [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I63_1]]
+; CHECK-NEXT: [[TMP181:%.*]] = shufflevector <2 x i32> [[TMP104]], <2 x i32> poison, <2 x i32> <i32 poison, i32 0>
+; CHECK-NEXT: [[TMP182:%.*]] = insertelement <2 x i32> [[TMP181]], i32 [[ADD44_2]], i32 0
+; CHECK-NEXT: [[TMP183:%.*]] = insertelement <2 x i32> [[TMP104]], i32 [[ADD46_2]], i32 0
+; CHECK-NEXT: [[TMP184:%.*]] = sub <2 x i32> [[TMP182]], [[TMP183]]
+; CHECK-NEXT: [[TMP185:%.*]] = shufflevector <2 x i32> [[TMP72]], <2 x i32> [[TMP143]], <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: [[TMP186:%.*]] = shufflevector <2 x i32> [[TMP72]], <2 x i32> [[TMP143]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP187:%.*]] = sub <2 x i32> [[TMP185]], [[TMP186]]
+; CHECK-NEXT: [[TMP188:%.*]] = extractelement <2 x i32> [[TMP184]], i32 0
+; CHECK-NEXT: [[TMP189:%.*]] = extractelement <2 x i32> [[TMP187]], i32 0
+; CHECK-NEXT: [[TMP190:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP184]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[ADD94_2:%.*]] = add i32 [[TMP189]], [[TMP188]]
+; CHECK-NEXT: [[TMP191:%.*]] = extractelement <2 x i32> [[TMP184]], i32 1
+; CHECK-NEXT: [[TMP192:%.*]] = extractelement <2 x i32> [[TMP187]], i32 1
+; CHECK-NEXT: [[TMP193:%.*]] = shufflevector <2 x i32> [[TMP187]], <2 x i32> [[TMP184]], <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[ADD78_2:%.*]] = add i32 [[TMP192]], [[TMP191]]
+; CHECK-NEXT: [[TMP194:%.*]] = sub <2 x i32> [[TMP184]], [[TMP187]]
+; CHECK-NEXT: [[TMP195:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_2]], i32 0
+; CHECK-NEXT: [[TMP196:%.*]] = shufflevector <2 x i32> [[TMP195]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP197:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_2]], i32 0
+; CHECK-NEXT: [[TMP198:%.*]] = shufflevector <2 x i32> [[TMP197]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP199:%.*]] = add <2 x i32> [[TMP196]], [[TMP198]]
+; CHECK-NEXT: [[TMP200:%.*]] = sub <2 x i32> [[TMP196]], [[TMP198]]
+; CHECK-NEXT: [[TMP201:%.*]] = shufflevector <2 x i32> [[TMP199]], <2 x i32> [[TMP200]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP202:%.*]] = extractelement <2 x i32> [[TMP194]], i32 0
+; CHECK-NEXT: [[TMP203:%.*]] = extractelement <2 x i32> [[TMP194]], i32 1
+; CHECK-NEXT: [[ADD105_2:%.*]] = add i32 [[TMP202]], [[TMP203]]
+; CHECK-NEXT: [[SUB106_2:%.*]] = sub i32 [[TMP203]], [[TMP202]]
+; CHECK-NEXT: [[ADD_I52_2:%.*]] = add i32 [[MUL_I51_2]], [[ADD105_2]]
+; CHECK-NEXT: [[XOR_I53_2:%.*]] = xor i32 [[ADD_I52_2]], [[CONV_1]]
+; CHECK-NEXT: [[TMP204:%.*]] = add <2 x i32> [[TMP149]], [[TMP201]]
+; CHECK-NEXT: [[TMP205:%.*]] = xor <2 x i32> [[TMP204]], [[TMP110]]
+; CHECK-NEXT: [[SHR_I59_2:%.*]] = lshr i32 [[TMP106]], 15
+; CHECK-NEXT: [[AND_I60_2:%.*]] = and i32 [[SHR_I59_2]], 65537
+; CHECK-NEXT: [[MUL_I61_2:%.*]] = mul i32 [[AND_I60_2]], 65535
+; CHECK-NEXT: [[ADD_I62_2:%.*]] = add i32 [[MUL_I61_2]], [[SUB106_2]]
+; CHECK-NEXT: [[XOR_I63_2:%.*]] = xor i32 [[ADD_I62_2]], [[TMP106]]
+; CHECK-NEXT: [[ADD108_2:%.*]] = add i32 [[XOR_I53_2]], [[ADD113_1]]
+; CHECK-NEXT: [[TMP206:%.*]] = extractelement <2 x i32> [[TMP205]], i32 0
+; CHECK-NEXT: [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[TMP206]]
+; CHECK-NEXT: [[TMP207:%.*]] = extractelement <2 x i32> [[TMP205]], i32 1
+; CHECK-NEXT: [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[TMP207]]
; CHECK-NEXT: [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I63_2]]
-; CHECK-NEXT: [[TMP277:%.*]] = insertelement <2 x i32> poison, i32 [[SUB59]], i32 0
-; CHECK-NEXT: [[TMP278:%.*]] = shufflevector <2 x i32> [[TMP277]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP279:%.*]] = add <2 x i32> [[TMP278]], [[TMP188]]
-; CHECK-NEXT: [[TMP280:%.*]] = sub <2 x i32> [[TMP278]], [[TMP188]]
-; CHECK-NEXT: [[TMP281:%.*]] = shufflevector <2 x i32> [[TMP279]], <2 x i32> [[TMP280]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT: [[TMP282:%.*]] = add <2 x i32> [[TMP104]], [[TMP281]]
-; CHECK-NEXT: [[TMP283:%.*]] = sub <2 x i32> [[TMP281]], [[TMP104]]
-; CHECK-NEXT: [[TMP284:%.*]] = add <2 x i32> [[TMP138]], [[TMP282]]
-; CHECK-NEXT: [[TMP285:%.*]] = xor <2 x i32> [[TMP284]], [[TMP107]]
-; CHECK-NEXT: [[TMP286:%.*]] = lshr <2 x i32> [[TMP97]], <i32 15, i32 15>
-; CHECK-NEXT: [[TMP287:%.*]] = and <2 x i32> [[TMP286]], <i32 65537, i32 65537>
-; CHECK-NEXT: [[TMP288:%.*]] = mul <2 x i32> [[TMP287]], <i32 65535, i32 65535>
-; CHECK-NEXT: [[TMP289:%.*]] = add <2 x i32> [[TMP288]], [[TMP283]]
-; CHECK-NEXT: [[TMP290:%.*]] = xor <2 x i32> [[TMP289]], [[TMP97]]
-; CHECK-NEXT: [[TMP291:%.*]] = extractelement <2 x i32> [[TMP285]], i32 1
-; CHECK-NEXT: [[ADD108_3:%.*]] = add i32 [[TMP291]], [[ADD113_2]]
-; CHECK-NEXT: [[TMP292:%.*]] = extractelement <2 x i32> [[TMP285]], i32 0
-; CHECK-NEXT: [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[TMP292]]
-; CHECK-NEXT: [[TMP293:%.*]] = extractelement <2 x i32> [[TMP290]], i32 0
-; CHECK-NEXT: [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[TMP293]]
-; CHECK-NEXT: [[TMP294:%.*]] = extractelement <2 x i32> [[TMP290]], i32 1
-; CHECK-NEXT: [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[TMP294]]
+; CHECK-NEXT: [[TMP208:%.*]] = insertelement <2 x i32> [[TMP150]], i32 [[SUB45_2]], i32 0
+; CHECK-NEXT: [[TMP209:%.*]] = insertelement <2 x i32> [[TMP105]], i32 [[SUB47_2]], i32 0
+; CHECK-NEXT: [[TMP210:%.*]] = sub <2 x i32> [[TMP208]], [[TMP209]]
+; CHECK-NEXT: [[TMP211:%.*]] = shufflevector <2 x i32> [[TMP73]], <2 x i32> [[TMP144]], <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: [[TMP212:%.*]] = shufflevector <2 x i32> [[TMP73]], <2 x i32> [[TMP144]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP213:%.*]] = sub <2 x i32> [[TMP211]], [[TMP212]]
+; CHECK-NEXT: [[TMP214:%.*]] = extractelement <2 x i32> [[TMP210]], i32 0
+; CHECK-NEXT: [[TMP215:%.*]] = extractelement <2 x i32> [[TMP213]], i32 0
+; CHECK-NEXT: [[TMP216:%.*]] = shufflevector <2 x i32> [[TMP213]], <2 x i32> [[TMP210]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[ADD94_3:%.*]] = add i32 [[TMP215]], [[TMP214]]
+; CHECK-NEXT: [[TMP217:%.*]] = extractelement <2 x i32> [[TMP210]], i32 1
+; CHECK-NEXT: [[TMP218:%.*]] = extractelement <2 x i32> [[TMP213]], i32 1
+; CHECK-NEXT: [[TMP219:%.*]] = shufflevector <2 x i32> [[TMP213]], <2 x i32> [[TMP210]], <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[ADD78_3:%.*]] = add i32 [[TMP218]], [[TMP217]]
+; CHECK-NEXT: [[TMP220:%.*]] = sub <2 x i32> [[TMP210]], [[TMP213]]
+; CHECK-NEXT: [[TMP221:%.*]] = insertelement <2 x i32> poison, i32 [[ADD78_3]], i32 0
+; CHECK-NEXT: [[TMP222:%.*]] = shufflevector <2 x i32> [[TMP221]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP223:%.*]] = insertelement <2 x i32> poison, i32 [[ADD94_3]], i32 0
+; CHECK-NEXT: [[TMP224:%.*]] = shufflevector <2 x i32> [[TMP223]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP225:%.*]] = add <2 x i32> [[TMP222]], [[TMP224]]
+; CHECK-NEXT: [[TMP226:%.*]] = sub <2 x i32> [[TMP222]], [[TMP224]]
+; CHECK-NEXT: [[TMP227:%.*]] = shufflevector <2 x i32> [[TMP225]], <2 x i32> [[TMP226]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP228:%.*]] = extractelement <2 x i32> [[TMP220]], i32 0
+; CHECK-NEXT: [[TMP229:%.*]] = extractelement <2 x i32> [[TMP220]], i32 1
+; CHECK-NEXT: [[ADD105_3:%.*]] = add i32 [[TMP228]], [[TMP229]]
+; CHECK-NEXT: [[SUB106_3:%.*]] = sub i32 [[TMP229]], [[TMP228]]
+; CHECK-NEXT: [[ADD_I52_3:%.*]] = add i32 [[MUL_I51_3]], [[ADD105_3]]
+; CHECK-NEXT: [[XOR_I53_3:%.*]] = xor i32 [[ADD_I52_3]], [[CONV]]
+; CHECK-NEXT: [[TMP230:%.*]] = lshr <2 x i32> [[TMP79]], <i32 15, i32 15>
+; CHECK-NEXT: [[TMP231:%.*]] = and <2 x i32> [[TMP230]], <i32 65537, i32 65537>
+; CHECK-NEXT: [[TMP232:%.*]] = mul <2 x i32> [[TMP231]], <i32 65535, i32 65535>
+; CHECK-NEXT: [[TMP233:%.*]] = add <2 x i32> [[TMP232]], [[TMP227]]
+; CHECK-NEXT: [[TMP234:%.*]] = xor <2 x i32> [[TMP233]], [[TMP79]]
+; CHECK-NEXT: [[SHR_I59_3:%.*]] = lshr i32 [[CONV33]], 15
+; CHECK-NEXT: [[AND_I60_3:%.*]] = and i32 [[SHR_I59_3]], 65537
+; CHECK-NEXT: [[MUL_I61_3:%.*]] = mul i32 [[AND_I60_3]], 65535
+; CHECK-NEXT: [[ADD_I62_3:%.*]] = add i32 [[MUL_I61_3]], [[SUB106_3]]
+; CHECK-NEXT: [[XOR_I63_3:%.*]] = xor i32 [[ADD_I62_3]], [[CONV33]]
+; CHECK-NEXT: [[ADD108_3:%.*]] = add i32 [[XOR_I53_3]], [[ADD113_2]]
+; CHECK-NEXT: [[TMP235:%.*]] = extractelement <2 x i32> [[TMP234]], i32 0
+; CHECK-NEXT: [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[TMP235]]
+; CHECK-NEXT: [[TMP236:%.*]] = extractelement <2 x i32> [[TMP234]], i32 1
+; CHECK-NEXT: [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[TMP236]]
+; CHECK-NEXT: [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[XOR_I63_3]]
; CHECK-NEXT: ret i32 [[ADD113_3]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll b/llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll
index abf1d7abdc12..5c261d69cd53 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll
@@ -23,16 +23,16 @@ define void @Test(i32) {
; FORCE_REDUCTION-NEXT: entry:
; FORCE_REDUCTION-NEXT: br label [[LOOP:%.*]]
; FORCE_REDUCTION: loop:
-; FORCE_REDUCTION-NEXT: [[TMP1:%.*]] = phi <2 x i32> [ [[TMP7:%.*]], [[LOOP]] ], [ zeroinitializer, [[ENTRY:%.*]] ]
+; FORCE_REDUCTION-NEXT: [[TMP1:%.*]] = phi <2 x i32> [ [[TMP9:%.*]], [[LOOP]] ], [ zeroinitializer, [[ENTRY:%.*]] ]
; FORCE_REDUCTION-NEXT: [[TMP2:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <8 x i32> <i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-; FORCE_REDUCTION-NEXT: [[TMP3:%.*]] = extractelement <8 x i32> [[TMP2]], i32 1
-; FORCE_REDUCTION-NEXT: [[TMP4:%.*]] = add <8 x i32> [[TMP2]], <i32 0, i32 55, i32 285, i32 1240, i32 1496, i32 8555, i32 12529, i32 13685>
-; FORCE_REDUCTION-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP4]])
-; FORCE_REDUCTION-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP0:%.*]], [[TMP5]]
-; FORCE_REDUCTION-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[TMP3]]
-; FORCE_REDUCTION-NEXT: [[VAL_43:%.*]] = add i32 [[TMP3]], 14910
-; FORCE_REDUCTION-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[OP_RDX1]], i32 0
-; FORCE_REDUCTION-NEXT: [[TMP7]] = insertelement <2 x i32> [[TMP6]], i32 [[VAL_43]], i32 1
+; FORCE_REDUCTION-NEXT: [[TMP3:%.*]] = add <8 x i32> [[TMP2]], <i32 0, i32 55, i32 285, i32 1240, i32 1496, i32 8555, i32 12529, i32 13685>
+; FORCE_REDUCTION-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP3]])
+; FORCE_REDUCTION-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP0:%.*]], [[TMP4]]
+; FORCE_REDUCTION-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> <i32 poison, i32 14910>, i32 [[OP_RDX]], i32 0
+; FORCE_REDUCTION-NEXT: [[TMP6:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <2 x i32> <i32 1, i32 1>
+; FORCE_REDUCTION-NEXT: [[TMP7:%.*]] = and <2 x i32> [[TMP5]], [[TMP6]]
+; FORCE_REDUCTION-NEXT: [[TMP8:%.*]] = add <2 x i32> [[TMP5]], [[TMP6]]
+; FORCE_REDUCTION-NEXT: [[TMP9]] = shufflevector <2 x i32> [[TMP7]], <2 x i32> [[TMP8]], <2 x i32> <i32 0, i32 3>
; FORCE_REDUCTION-NEXT: br label [[LOOP]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_clear_undefs.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_clear_undefs.ll
index c2369a6a89ec..de99654d84eb 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_clear_undefs.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_clear_undefs.ll
@@ -9,7 +9,7 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
; YAML-NEXT: Function: foo
; YAML-NEXT: Args:
; YAML-NEXT: - String: 'SLP vectorized with cost '
-; YAML-NEXT: - Cost: '-4'
+; YAML-NEXT: - Cost: '-3'
; YAML-NEXT: - String: ' and with tree size '
; YAML-NEXT: - TreeSize: '10'
; YAML-NEXT: ...
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-inseltpoison.ll
index 10369e3aa270..fd9528aa8df3 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-inseltpoison.ll
@@ -70,8 +70,10 @@ define <4 x float> @simple_select_eph(<4 x float> %a, <4 x float> %b, <4 x i32>
; THRESHOLD-NEXT: [[RD:%.*]] = insertelement <4 x float> [[RC]], float [[S3]], i32 3
; THRESHOLD-NEXT: [[Q0:%.*]] = extractelement <4 x float> [[RD]], i32 0
; THRESHOLD-NEXT: [[Q1:%.*]] = extractelement <4 x float> [[RD]], i32 1
+; THRESHOLD-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[RD]], <4 x float> poison, <2 x i32> <i32 0, i32 1>
; THRESHOLD-NEXT: [[Q2:%.*]] = extractelement <4 x float> [[RD]], i32 2
; THRESHOLD-NEXT: [[Q3:%.*]] = extractelement <4 x float> [[RD]], i32 3
+; THRESHOLD-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[RD]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
; THRESHOLD-NEXT: [[Q4:%.*]] = fadd float [[Q0]], [[Q1]]
; THRESHOLD-NEXT: [[Q5:%.*]] = fadd float [[Q2]], [[Q3]]
; THRESHOLD-NEXT: [[Q6:%.*]] = fadd float [[Q4]], [[Q5]]
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll b/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
index 9376bcd220a2..18d5b0900176 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
@@ -104,8 +104,10 @@ define <4 x float> @simple_select_eph(<4 x float> %a, <4 x float> %b, <4 x i32>
; THRESHOLD-NEXT: [[RD:%.*]] = insertelement <4 x float> [[RC]], float [[S3]], i32 3
; THRESHOLD-NEXT: [[Q0:%.*]] = extractelement <4 x float> [[RD]], i32 0
; THRESHOLD-NEXT: [[Q1:%.*]] = extractelement <4 x float> [[RD]], i32 1
+; THRESHOLD-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[RD]], <4 x float> poison, <2 x i32> <i32 0, i32 1>
; THRESHOLD-NEXT: [[Q2:%.*]] = extractelement <4 x float> [[RD]], i32 2
; THRESHOLD-NEXT: [[Q3:%.*]] = extractelement <4 x float> [[RD]], i32 3
+; THRESHOLD-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[RD]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
; THRESHOLD-NEXT: [[Q4:%.*]] = fadd float [[Q0]], [[Q1]]
; THRESHOLD-NEXT: [[Q5:%.*]] = fadd float [[Q2]], [[Q3]]
; THRESHOLD-NEXT: [[Q6:%.*]] = fadd float [[Q4]], [[Q5]]
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/multi-nodes-to-shuffle.ll b/llvm/test/Transforms/SLPVectorizer/X86/multi-nodes-to-shuffle.ll
index e5b5a5c6c4a0..a48076adc809 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/multi-nodes-to-shuffle.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/multi-nodes-to-shuffle.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -S < %s -mtriple=x86_64-unknown-linux -slp-threshold=-115 | FileCheck %s
+; RUN: opt -passes=slp-vectorizer -S < %s -mtriple=x86_64-unknown-linux -slp-threshold=-127 | FileCheck %s
; RUN: opt -passes=slp-vectorizer -S < %s -mtriple=x86_64-unknown-linux -slp-threshold=-115 -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
define void @test(i64 %p0, i64 %p1, i64 %p2, i64 %p3) {
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll
index ec90ca9bc674..2cdbd5cff446 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=slp-vectorizer -mtriple=x86_64-- -mcpu=x86-64 -S | FileCheck %s --check-prefixes=SSE2
; RUN: opt < %s -passes=slp-vectorizer -mtriple=x86_64-- -mcpu=x86-64-v2 -S | FileCheck %s --check-prefixes=SSE42
-; RUN: opt < %s -passes=slp-vectorizer -mtriple=x86_64-- -mcpu=x86-64-v3 -S | FileCheck %s --check-prefixes=AVX
-; RUN: opt < %s -passes=slp-vectorizer -mtriple=x86_64-- -mcpu=x86-64-v4 -S | FileCheck %s --check-prefixes=AVX
+; RUN: opt < %s -passes=slp-vectorizer -mtriple=x86_64-- -mcpu=x86-64-v3 -S | FileCheck %s --check-prefixes=AVX2
+; RUN: opt < %s -passes=slp-vectorizer -mtriple=x86_64-- -mcpu=x86-64-v4 -S | FileCheck %s --check-prefixes=AVX512
; PR51746
; typedef int v4si __attribute__ ((vector_size (16)));
@@ -18,33 +18,44 @@
define i32 @reduce_and4(i32 %acc, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3, <4 x i32> %v4) {
; SSE2-LABEL: @reduce_and4(
; SSE2-NEXT: entry:
-; SSE2-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; SSE2-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP0]])
-; SSE2-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; SSE2-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP2]])
-; SSE2-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP1]], [[TMP3]]
-; SSE2-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
-; SSE2-NEXT: ret i32 [[OP_RDX1]]
+; SSE2-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT: [[TMP2:%.*]] = shufflevector <16 x i32> [[TMP0]], <16 x i32> [[TMP1]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[V1:%.*]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT: [[TMP4:%.*]] = shufflevector <16 x i32> [[TMP2]], <16 x i32> [[TMP3]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; SSE2-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> [[TMP4]])
+; SSE2-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP5]], [[ACC:%.*]]
+; SSE2-NEXT: ret i32 [[OP_RDX]]
;
; SSE42-LABEL: @reduce_and4(
; SSE42-NEXT: entry:
-; SSE42-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; SSE42-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP0]])
-; SSE42-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; SSE42-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP2]])
-; SSE42-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP1]], [[TMP3]]
-; SSE42-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
-; SSE42-NEXT: ret i32 [[OP_RDX1]]
+; SSE42-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE42-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE42-NEXT: [[TMP2:%.*]] = shufflevector <16 x i32> [[TMP0]], <16 x i32> [[TMP1]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE42-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[V1:%.*]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE42-NEXT: [[TMP4:%.*]] = shufflevector <16 x i32> [[TMP2]], <16 x i32> [[TMP3]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; SSE42-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> [[TMP4]])
+; SSE42-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP5]], [[ACC:%.*]]
+; SSE42-NEXT: ret i32 [[OP_RDX]]
;
-; AVX-LABEL: @reduce_and4(
-; AVX-NEXT: entry:
-; AVX-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
-; AVX-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP0]])
-; AVX-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
-; AVX-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP2]])
-; AVX-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP1]], [[TMP3]]
-; AVX-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
-; AVX-NEXT: ret i32 [[OP_RDX1]]
+; AVX2-LABEL: @reduce_and4(
+; AVX2-NEXT: entry:
+; AVX2-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
+; AVX2-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
+; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP0]], <8 x i32> [[TMP1]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; AVX2-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> [[TMP2]])
+; AVX2-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP3]], [[ACC:%.*]]
+; AVX2-NEXT: ret i32 [[OP_RDX]]
+;
+; AVX512-LABEL: @reduce_and4(
+; AVX512-NEXT: entry:
+; AVX512-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
+; AVX512-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP0]])
+; AVX512-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
+; AVX512-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP2]])
+; AVX512-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP1]], [[TMP3]]
+; AVX512-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
+; AVX512-NEXT: ret i32 [[OP_RDX1]]
;
entry:
%vecext = extractelement <4 x i32> %v1, i64 0
@@ -92,31 +103,41 @@ entry:
define i32 @reduce_and4_transpose(i32 %acc, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3, <4 x i32> %v4) {
; SSE2-LABEL: @reduce_and4_transpose(
-; SSE2-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; SSE2-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP1]])
-; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; SSE2-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP3]])
-; SSE2-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP2]], [[TMP4]]
-; SSE2-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
-; SSE2-NEXT: ret i32 [[OP_RDX1]]
+; SSE2-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[V1:%.*]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> [[TMP3]], <16 x i32> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; SSE2-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> [[TMP5]])
+; SSE2-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP6]], [[ACC:%.*]]
+; SSE2-NEXT: ret i32 [[OP_RDX]]
;
; SSE42-LABEL: @reduce_and4_transpose(
-; SSE42-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; SSE42-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP1]])
-; SSE42-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; SSE42-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP3]])
-; SSE42-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP2]], [[TMP4]]
-; SSE42-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
-; SSE42-NEXT: ret i32 [[OP_RDX1]]
+; SSE42-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE42-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE42-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE42-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[V1:%.*]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE42-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> [[TMP3]], <16 x i32> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; SSE42-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> [[TMP5]])
+; SSE42-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP6]], [[ACC:%.*]]
+; SSE42-NEXT: ret i32 [[OP_RDX]]
+;
+; AVX2-LABEL: @reduce_and4_transpose(
+; AVX2-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+; AVX2-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; AVX2-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> [[TMP3]])
+; AVX2-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP4]], [[ACC:%.*]]
+; AVX2-NEXT: ret i32 [[OP_RDX]]
;
-; AVX-LABEL: @reduce_and4_transpose(
-; AVX-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
-; AVX-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP1]])
-; AVX-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
-; AVX-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP3]])
-; AVX-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP2]], [[TMP4]]
-; AVX-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
-; AVX-NEXT: ret i32 [[OP_RDX1]]
+; AVX512-LABEL: @reduce_and4_transpose(
+; AVX512-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+; AVX512-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP1]])
+; AVX512-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+; AVX512-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP3]])
+; AVX512-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP2]], [[TMP4]]
+; AVX512-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
+; AVX512-NEXT: ret i32 [[OP_RDX1]]
;
%vecext = extractelement <4 x i32> %v1, i64 0
%vecext1 = extractelement <4 x i32> %v2, i64 0
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder-node.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder-node.ll
new file mode 100644
index 000000000000..1940e1bc8d18
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder-node.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-linux-gnu < %s | FileCheck %s
+
+define void @test(ptr noalias %arg, ptr noalias %arg1, ptr %arg2) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr noalias [[ARG:%.*]], ptr noalias [[ARG1:%.*]], ptr [[ARG2:%.*]]) {
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP_I_I:%.*]] = getelementptr i8, ptr [[ARG1]], i64 24
+; CHECK-NEXT: [[TMP_I_I4:%.*]] = getelementptr i8, ptr [[ARG]], i64 24
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[TMP_I_I]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
+; CHECK-NEXT: store float [[TMP1]], ptr [[ARG2]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <4 x float> [[TMP0]], zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <4 x i32> <i32 2, i32 3, i32 2, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP2]], <4 x float> [[TMP3]], <4 x float> [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x float> [[TMP5]], <4 x float> poison, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+; CHECK-NEXT: store <4 x float> [[TMP6]], ptr [[TMP_I_I4]], align 8
+; CHECK-NEXT: ret void
+;
+bb:
+ %tmp.i.i = getelementptr i8, ptr %arg1, i64 24
+ %tmp1.i.i = load float, ptr %tmp.i.i, align 8
+ %tmp.i.i2 = getelementptr i8, ptr %arg1, i64 32
+ %tmp1.i.i3 = load float, ptr %tmp.i.i2, align 8
+ %tmp1.i.i.i = fcmp olt float %tmp1.i.i3, 0.000000e+00
+ %tmp9 = select i1 %tmp1.i.i.i, float %tmp1.i.i3, float %tmp1.i.i
+ %tmp.i.i4 = getelementptr i8, ptr %arg, i64 24
+ store float %tmp9, ptr %tmp.i.i4, align 8
+ %tmp1.i.i.i10 = fcmp olt float %tmp1.i.i, 0.000000e+00
+ %tmp13 = select i1 %tmp1.i.i.i10, float %tmp1.i.i3, float %tmp1.i.i
+ %tmp.i.i12 = getelementptr i8, ptr %arg, i64 28
+ store float %tmp13, ptr %tmp.i.i12, align 4
+ %tmp.i.i13 = getelementptr i8, ptr %arg1, i64 28
+ %tmp1.i.i14 = load float, ptr %tmp.i.i13, align 4
+ %tmp.i.i15 = getelementptr i8, ptr %arg1, i64 36
+ %tmp1.i.i16 = load float, ptr %tmp.i.i15, align 4
+ %tmp1.i.i.i18 = fcmp olt float %tmp1.i.i16, 0.000000e+00
+ %tmp17 = select i1 %tmp1.i.i.i18, float %tmp1.i.i16, float %tmp1.i.i14
+ %tmp.i.i20 = getelementptr i8, ptr %arg, i64 32
+ store float %tmp17, ptr %tmp.i.i20, align 8
+ store float %tmp1.i.i14, ptr %arg2, align 4
+ %tmp1.i.i.i24 = fcmp olt float %tmp1.i.i14, 0.000000e+00
+ %tmp20 = select i1 %tmp1.i.i.i24, float %tmp1.i.i16, float %tmp1.i.i14
+ %tmp.i.i26 = getelementptr i8, ptr %arg, i64 36
+ store float %tmp20, ptr %tmp.i.i26, align 4
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/scatter-vectorize-reused-pointer.ll b/llvm/test/Transforms/SLPVectorizer/X86/scatter-vectorize-reused-pointer.ll
index bb16b52f44ec..dadf5992ba28 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/scatter-vectorize-reused-pointer.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/scatter-vectorize-reused-pointer.ll
@@ -5,19 +5,23 @@ define void @test(i1 %c, ptr %arg) {
; CHECK-LABEL: @test(
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
; CHECK: if:
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x ptr> poison, ptr [[ARG:%.*]], i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x ptr> [[TMP1]], <4 x ptr> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, <4 x ptr> [[TMP2]], <4 x i64> <i64 32, i64 24, i64 8, i64 0>
-; CHECK-NEXT: [[TMP4:%.*]] = call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> [[TMP3]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i64> poison)
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr [[ARG:%.*]], align 8
+; CHECK-NEXT: [[ARG2_2:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i64 24
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr [[ARG2_2]], align 8
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> poison, <4 x i32> <i32 1, i32 0, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <4 x i32> <i32 1, i32 0, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i64> [[TMP3]], <4 x i64> [[TMP4]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; CHECK-NEXT: br label [[JOIN:%.*]]
; CHECK: else:
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x ptr> poison, ptr [[ARG]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x ptr> [[TMP5]], <4 x ptr> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, <4 x ptr> [[TMP6]], <4 x i64> <i64 32, i64 24, i64 8, i64 0>
-; CHECK-NEXT: [[TMP8:%.*]] = call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> [[TMP7]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i64> poison)
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr [[ARG]], align 8
+; CHECK-NEXT: [[ARG_2:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i64 24
+; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr [[ARG_2]], align 8
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP7]], <2 x i64> poison, <4 x i32> <i32 1, i32 0, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP6]], <2 x i64> poison, <4 x i32> <i32 1, i32 0, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i64> [[TMP8]], <4 x i64> [[TMP9]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; CHECK-NEXT: br label [[JOIN]]
; CHECK: join:
-; CHECK-NEXT: [[TMP9:%.*]] = phi <4 x i64> [ [[TMP4]], [[IF]] ], [ [[TMP8]], [[ELSE]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = phi <4 x i64> [ [[TMP5]], [[IF]] ], [ [[TMP10]], [[ELSE]] ]
; CHECK-NEXT: ret void
;
br i1 %c, label %if, label %else
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/split-load8_2-unord.ll b/llvm/test/Transforms/SLPVectorizer/X86/split-load8_2-unord.ll
index 63d13452bc96..6825f43b5a9e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/split-load8_2-unord.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/split-load8_2-unord.ll
@@ -8,24 +8,27 @@ define dso_local void @_Z4testP1S(ptr %p) local_unnamed_addr {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[P:%.*]], i64 0, i32 1, i64 0
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[P]], i64 0, i32 2, i64 15
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[P]], i64 0, i32 2, i64 7
+; CHECK-NEXT: [[I1:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[P]], i64 0, i32 2, i64 6
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr [[ARRAYIDX13]], align 4
; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[P]], i64 0, i32 2, i64 4
+; CHECK-NEXT: [[I7:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4
; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[P]], i64 0, i32 2, i64 12
+; CHECK-NEXT: [[I9:%.*]] = load i32, ptr [[ARRAYIDX27]], align 4
; CHECK-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[P]], i64 0, i32 2, i64 13
-; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[P]], i64 0, i32 2, i64 14
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[ARRAYIDX34]], align 4
; CHECK-NEXT: [[ARRAYIDX48:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[P]], i64 0, i32 2, i64 5
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x ptr> poison, ptr [[ARRAYIDX1]], i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x ptr> [[TMP2]], ptr [[ARRAYIDX6]], i32 1
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x ptr> [[TMP3]], ptr [[ARRAYIDX13]], i32 2
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x ptr> [[TMP4]], ptr [[ARRAYIDX20]], i32 3
-; CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x ptr> [[TMP5]], ptr [[ARRAYIDX27]], i32 4
-; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x ptr> [[TMP6]], ptr [[ARRAYIDX34]], i32 5
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x ptr> [[TMP7]], ptr [[ARRAYIDX41]], i32 6
-; CHECK-NEXT: [[TMP9:%.*]] = insertelement <8 x ptr> [[TMP8]], ptr [[ARRAYIDX48]], i32 7
-; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> [[TMP9]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> poison)
-; CHECK-NEXT: [[TMP11:%.*]] = add nsw <8 x i32> [[TMP10]], [[TMP1]]
+; CHECK-NEXT: [[I15:%.*]] = load i32, ptr [[ARRAYIDX48]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> poison, i32 [[I1]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> poison, <8 x i32> <i32 1, i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i32> [[TMP3]], <8 x i32> [[TMP4]], <8 x i32> <i32 0, i32 8, i32 9, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i32> [[TMP5]], i32 [[I7]], i32 3
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i32> [[TMP6]], i32 [[I9]], i32 4
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x i32> [[TMP7]], <8 x i32> [[TMP8]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 8, i32 9, i32 poison>
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <8 x i32> [[TMP9]], i32 [[I15]], i32 7
+; CHECK-NEXT: [[TMP11:%.*]] = add nsw <8 x i32> [[TMP10]], [[TMP2]]
; CHECK-NEXT: store <8 x i32> [[TMP11]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
@@ -101,13 +104,13 @@ define dso_local void @test_unordered_splits(ptr nocapture %p) local_unnamed_add
; CHECK-NEXT: [[P2:%.*]] = alloca [16 x i32], align 16
; CHECK-NEXT: [[G10:%.*]] = getelementptr inbounds [16 x i32], ptr [[P1]], i32 0, i64 4
; CHECK-NEXT: [[G20:%.*]] = getelementptr inbounds [16 x i32], ptr [[P2]], i32 0, i64 12
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[G10]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[G20]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> [[TMP5]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
-; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP6]], <8 x i32> poison, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 7, i32 5, i32 6, i32 4>
-; CHECK-NEXT: store <8 x i32> [[SHUFFLE]], ptr [[P:%.*]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[G10]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[G20]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> [[TMP3]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 7, i32 5, i32 6, i32 4>
+; CHECK-NEXT: store <8 x i32> [[TMP5]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
@@ -158,18 +161,18 @@ define dso_local void @test_cost_splits(ptr nocapture %p) local_unnamed_addr {
; CHECK-NEXT: [[G12:%.*]] = getelementptr inbounds [16 x i32], ptr [[P2]], i32 0, i64 6
; CHECK-NEXT: [[G20:%.*]] = getelementptr inbounds [16 x i32], ptr [[P3]], i32 0, i64 12
; CHECK-NEXT: [[G22:%.*]] = getelementptr inbounds [16 x i32], ptr [[P4]], i32 0, i64 14
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[G10]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr [[G12]], align 4
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr [[G20]], align 4
-; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i32>, ptr [[G22]], align 4
-; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr [[G10]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[G12]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr [[G20]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr [[G22]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> [[TMP5]], <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x i32> [[TMP6]], <8 x i32> [[TMP7]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 poison, i32 poison>
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP8]], <8 x i32> [[TMP9]], <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <2 x i32> [[TMP5]], <2 x i32> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <8 x i32> [[TMP10]], <8 x i32> [[TMP11]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[TMP7]], <2 x i32> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <8 x i32> [[TMP12]], <8 x i32> [[TMP13]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
-; CHECK-NEXT: store <8 x i32> [[TMP14]], ptr [[P:%.*]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP8]], <8 x i32> [[TMP9]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
+; CHECK-NEXT: store <8 x i32> [[TMP10]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SROA/vector-promotion.ll b/llvm/test/Transforms/SROA/vector-promotion.ll
index e48dd5bb3920..1691f7733ace 100644
--- a/llvm/test/Transforms/SROA/vector-promotion.ll
+++ b/llvm/test/Transforms/SROA/vector-promotion.ll
@@ -22,21 +22,21 @@ define i32 @test1(<4 x i32> %x, <4 x i32> %y) {
;
; DEBUG-LABEL: @test1(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META9:![0-9]+]], metadata !DIExpression()), !dbg [[DBG21:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META11:![0-9]+]], metadata !DIExpression()), !dbg [[DBG22:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META12:![0-9]+]], metadata !DIExpression()), !dbg [[DBG23:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META9:![0-9]+]], metadata !DIExpression()), !dbg [[DBG21:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META11:![0-9]+]], metadata !DIExpression()), !dbg [[DBG22:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META12:![0-9]+]], metadata !DIExpression()), !dbg [[DBG23:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_8_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[X:%.*]], i32 2, !dbg [[DBG24:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META13:![0-9]+]], metadata !DIExpression()), !dbg [[DBG24]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META15:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META13:![0-9]+]], metadata !DIExpression()), !dbg [[DBG24]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META15:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_2_28_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[Y:%.*]], i32 3, !dbg [[DBG26:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_2_28_VEC_EXTRACT]], metadata [[META16:![0-9]+]], metadata !DIExpression()), !dbg [[DBG26]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG27:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_2_28_VEC_EXTRACT]], metadata [[META16:![0-9]+]], metadata !DIExpression()), !dbg [[DBG26]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG27:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_2_16_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[Y]], i32 0, !dbg [[DBG28:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_2_16_VEC_EXTRACT]], metadata [[META18:![0-9]+]], metadata !DIExpression()), !dbg [[DBG28]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_2_16_VEC_EXTRACT]], metadata [[META18:![0-9]+]], metadata !DIExpression()), !dbg [[DBG28]]
; DEBUG-NEXT: [[TMP4:%.*]] = add i32 [[A_SROA_0_8_VEC_EXTRACT]], [[A_SROA_2_28_VEC_EXTRACT]], !dbg [[DBG29:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META19:![0-9]+]], metadata !DIExpression()), !dbg [[DBG29]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META19:![0-9]+]], metadata !DIExpression()), !dbg [[DBG29]]
; DEBUG-NEXT: [[TMP5:%.*]] = add i32 [[A_SROA_2_16_VEC_EXTRACT]], [[TMP4]], !dbg [[DBG30:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META20:![0-9]+]], metadata !DIExpression()), !dbg [[DBG30]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META20:![0-9]+]], metadata !DIExpression()), !dbg [[DBG30]]
; DEBUG-NEXT: ret i32 [[TMP5]], !dbg [[DBG31:![0-9]+]]
;
entry:
@@ -71,23 +71,23 @@ define i32 @test2(<4 x i32> %x, <4 x i32> %y) {
;
; DEBUG-LABEL: @test2(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META34:![0-9]+]], metadata !DIExpression()), !dbg [[DBG45:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META35:![0-9]+]], metadata !DIExpression()), !dbg [[DBG46:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META36:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META34:![0-9]+]], metadata !DIExpression()), !dbg [[DBG45:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META35:![0-9]+]], metadata !DIExpression()), !dbg [[DBG46:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META36:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_8_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[X:%.*]], i32 2, !dbg [[DBG48:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META37:![0-9]+]], metadata !DIExpression()), !dbg [[DBG48]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META38:![0-9]+]], metadata !DIExpression()), !dbg [[DBG49:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META37:![0-9]+]], metadata !DIExpression()), !dbg [[DBG48]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META38:![0-9]+]], metadata !DIExpression()), !dbg [[DBG49:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_2_28_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[Y:%.*]], i32 3, !dbg [[DBG50:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_2_28_VEC_EXTRACT]], metadata [[META39:![0-9]+]], metadata !DIExpression()), !dbg [[DBG50]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META40:![0-9]+]], metadata !DIExpression()), !dbg [[DBG51:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_2_28_VEC_EXTRACT]], metadata [[META39:![0-9]+]], metadata !DIExpression()), !dbg [[DBG50]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META40:![0-9]+]], metadata !DIExpression()), !dbg [[DBG51:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_2_16_VEC_EXTRACT:%.*]] = shufflevector <4 x i32> [[Y]], <4 x i32> poison, <2 x i32> <i32 0, i32 1>, !dbg [[DBG52:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <2 x i32> [[A_SROA_2_16_VEC_EXTRACT]], metadata [[META41:![0-9]+]], metadata !DIExpression()), !dbg [[DBG52]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <2 x i32> [[A_SROA_2_16_VEC_EXTRACT]], metadata [[META41:![0-9]+]], metadata !DIExpression()), !dbg [[DBG52]]
; DEBUG-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[A_SROA_2_16_VEC_EXTRACT]], i32 0, !dbg [[DBG53:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP3]], metadata [[META42:![0-9]+]], metadata !DIExpression()), !dbg [[DBG53]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP3]], metadata [[META42:![0-9]+]], metadata !DIExpression()), !dbg [[DBG53]]
; DEBUG-NEXT: [[TMP4:%.*]] = add i32 [[A_SROA_0_8_VEC_EXTRACT]], [[A_SROA_2_28_VEC_EXTRACT]], !dbg [[DBG54:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META43:![0-9]+]], metadata !DIExpression()), !dbg [[DBG54]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META43:![0-9]+]], metadata !DIExpression()), !dbg [[DBG54]]
; DEBUG-NEXT: [[TMP5:%.*]] = add i32 [[TMP3]], [[TMP4]], !dbg [[DBG55:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META44:![0-9]+]], metadata !DIExpression()), !dbg [[DBG55]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META44:![0-9]+]], metadata !DIExpression()), !dbg [[DBG55]]
; DEBUG-NEXT: ret i32 [[TMP5]], !dbg [[DBG56:![0-9]+]]
;
entry:
@@ -123,22 +123,22 @@ define i32 @test3(<4 x i32> %x, <4 x i32> %y) {
;
; DEBUG-LABEL: @test3(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META59:![0-9]+]], metadata !DIExpression()), !dbg [[DBG69:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META60:![0-9]+]], metadata !DIExpression()), !dbg [[DBG70:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META61:![0-9]+]], metadata !DIExpression()), !dbg [[DBG71:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META59:![0-9]+]], metadata !DIExpression()), !dbg [[DBG69:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META60:![0-9]+]], metadata !DIExpression()), !dbg [[DBG70:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META61:![0-9]+]], metadata !DIExpression()), !dbg [[DBG71:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_8_VEC_INSERT:%.*]] = insertelement <4 x i32> [[X:%.*]], i32 -1, i32 2, !dbg [[DBG72:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_8_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[A_SROA_0_8_VEC_INSERT]], i32 2, !dbg [[DBG73:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META62:![0-9]+]], metadata !DIExpression()), !dbg [[DBG73]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META63:![0-9]+]], metadata !DIExpression()), !dbg [[DBG74:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META62:![0-9]+]], metadata !DIExpression()), !dbg [[DBG73]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META63:![0-9]+]], metadata !DIExpression()), !dbg [[DBG74:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_3_28_VEC_EXTRACT:%.*]] = extractelement <4 x i32> zeroinitializer, i32 3, !dbg [[DBG75:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_3_28_VEC_EXTRACT]], metadata [[META64:![0-9]+]], metadata !DIExpression()), !dbg [[DBG75]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META65:![0-9]+]], metadata !DIExpression()), !dbg [[DBG76:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_3_28_VEC_EXTRACT]], metadata [[META64:![0-9]+]], metadata !DIExpression()), !dbg [[DBG75]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META65:![0-9]+]], metadata !DIExpression()), !dbg [[DBG76:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_3_16_VEC_EXTRACT:%.*]] = extractelement <4 x i32> zeroinitializer, i32 0, !dbg [[DBG77:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_3_16_VEC_EXTRACT]], metadata [[META66:![0-9]+]], metadata !DIExpression()), !dbg [[DBG77]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_3_16_VEC_EXTRACT]], metadata [[META66:![0-9]+]], metadata !DIExpression()), !dbg [[DBG77]]
; DEBUG-NEXT: [[TMP4:%.*]] = add i32 [[A_SROA_0_8_VEC_EXTRACT]], [[A_SROA_3_28_VEC_EXTRACT]], !dbg [[DBG78:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META67:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META67:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
; DEBUG-NEXT: [[TMP5:%.*]] = add i32 [[A_SROA_3_16_VEC_EXTRACT]], [[TMP4]], !dbg [[DBG79:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META68:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META68:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79]]
; DEBUG-NEXT: ret i32 [[TMP5]], !dbg [[DBG80:![0-9]+]]
;
entry:
@@ -179,26 +179,26 @@ define i32 @test4(<4 x i32> %x, <4 x i32> %y, ptr %z) {
;
; DEBUG-LABEL: @test4(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META83:![0-9]+]], metadata !DIExpression()), !dbg [[DBG94:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META84:![0-9]+]], metadata !DIExpression()), !dbg [[DBG95:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META83:![0-9]+]], metadata !DIExpression()), !dbg [[DBG94:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META84:![0-9]+]], metadata !DIExpression()), !dbg [[DBG95:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_3_16_COPYLOAD:%.*]] = load <4 x i32>, ptr [[Z:%.*]], align 1, !dbg [[DBG96:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META85:![0-9]+]], metadata !DIExpression()), !dbg [[DBG97:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META85:![0-9]+]], metadata !DIExpression()), !dbg [[DBG97:![0-9]+]]
; DEBUG-NEXT: [[Z_TMP1:%.*]] = getelementptr inbounds <4 x i32>, ptr [[Z]], i64 0, i64 2, !dbg [[DBG98:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[Z_TMP1]], metadata [[META86:![0-9]+]], metadata !DIExpression()), !dbg [[DBG98]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[Z_TMP1]], metadata [[META86:![0-9]+]], metadata !DIExpression()), !dbg [[DBG98]]
; DEBUG-NEXT: [[A_SROA_0_8_COPYLOAD:%.*]] = load i32, ptr [[Z_TMP1]], align 1, !dbg [[DBG99:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_8_VEC_INSERT:%.*]] = insertelement <4 x i32> [[X:%.*]], i32 [[A_SROA_0_8_COPYLOAD]], i32 2, !dbg [[DBG99]]
; DEBUG-NEXT: [[A_SROA_0_8_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[A_SROA_0_8_VEC_INSERT]], i32 2, !dbg [[DBG100:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META87:![0-9]+]], metadata !DIExpression()), !dbg [[DBG100]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META88:![0-9]+]], metadata !DIExpression()), !dbg [[DBG101:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META87:![0-9]+]], metadata !DIExpression()), !dbg [[DBG100]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META88:![0-9]+]], metadata !DIExpression()), !dbg [[DBG101:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_3_28_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[A_SROA_3_16_COPYLOAD]], i32 3, !dbg [[DBG102:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_3_28_VEC_EXTRACT]], metadata [[META89:![0-9]+]], metadata !DIExpression()), !dbg [[DBG102]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META90:![0-9]+]], metadata !DIExpression()), !dbg [[DBG103:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_3_28_VEC_EXTRACT]], metadata [[META89:![0-9]+]], metadata !DIExpression()), !dbg [[DBG102]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META90:![0-9]+]], metadata !DIExpression()), !dbg [[DBG103:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_3_16_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[A_SROA_3_16_COPYLOAD]], i32 0, !dbg [[DBG104:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_3_16_VEC_EXTRACT]], metadata [[META91:![0-9]+]], metadata !DIExpression()), !dbg [[DBG104]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_3_16_VEC_EXTRACT]], metadata [[META91:![0-9]+]], metadata !DIExpression()), !dbg [[DBG104]]
; DEBUG-NEXT: [[TMP4:%.*]] = add i32 [[A_SROA_0_8_VEC_EXTRACT]], [[A_SROA_3_28_VEC_EXTRACT]], !dbg [[DBG105:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META92:![0-9]+]], metadata !DIExpression()), !dbg [[DBG105]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META92:![0-9]+]], metadata !DIExpression()), !dbg [[DBG105]]
; DEBUG-NEXT: [[TMP5:%.*]] = add i32 [[A_SROA_3_16_VEC_EXTRACT]], [[TMP4]], !dbg [[DBG106:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META93:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META93:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106]]
; DEBUG-NEXT: ret i32 [[TMP5]], !dbg [[DBG107:![0-9]+]]
;
entry:
@@ -243,26 +243,26 @@ define i32 @test4_as1(<4 x i32> %x, <4 x i32> %y, ptr addrspace(1) %z) {
;
; DEBUG-LABEL: @test4_as1(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META110:![0-9]+]], metadata !DIExpression()), !dbg [[DBG121:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META111:![0-9]+]], metadata !DIExpression()), !dbg [[DBG122:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META110:![0-9]+]], metadata !DIExpression()), !dbg [[DBG121:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META111:![0-9]+]], metadata !DIExpression()), !dbg [[DBG122:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_3_16_COPYLOAD:%.*]] = load <4 x i32>, ptr addrspace(1) [[Z:%.*]], align 1, !dbg [[DBG123:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META112:![0-9]+]], metadata !DIExpression()), !dbg [[DBG124:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META112:![0-9]+]], metadata !DIExpression()), !dbg [[DBG124:![0-9]+]]
; DEBUG-NEXT: [[Z_TMP1:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[Z]], i16 0, i16 2, !dbg [[DBG125:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr addrspace(1) [[Z_TMP1]], metadata [[META113:![0-9]+]], metadata !DIExpression()), !dbg [[DBG125]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr addrspace(1) [[Z_TMP1]], metadata [[META113:![0-9]+]], metadata !DIExpression()), !dbg [[DBG125]]
; DEBUG-NEXT: [[A_SROA_0_8_COPYLOAD:%.*]] = load i32, ptr addrspace(1) [[Z_TMP1]], align 1, !dbg [[DBG126:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_8_VEC_INSERT:%.*]] = insertelement <4 x i32> [[X:%.*]], i32 [[A_SROA_0_8_COPYLOAD]], i32 2, !dbg [[DBG126]]
; DEBUG-NEXT: [[A_SROA_0_8_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[A_SROA_0_8_VEC_INSERT]], i32 2, !dbg [[DBG127:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META114:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META115:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META114:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META115:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_3_28_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[A_SROA_3_16_COPYLOAD]], i32 3, !dbg [[DBG129:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_3_28_VEC_EXTRACT]], metadata [[META116:![0-9]+]], metadata !DIExpression()), !dbg [[DBG129]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META117:![0-9]+]], metadata !DIExpression()), !dbg [[DBG130:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_3_28_VEC_EXTRACT]], metadata [[META116:![0-9]+]], metadata !DIExpression()), !dbg [[DBG129]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META117:![0-9]+]], metadata !DIExpression()), !dbg [[DBG130:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_3_16_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[A_SROA_3_16_COPYLOAD]], i32 0, !dbg [[DBG131:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_3_16_VEC_EXTRACT]], metadata [[META118:![0-9]+]], metadata !DIExpression()), !dbg [[DBG131]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_3_16_VEC_EXTRACT]], metadata [[META118:![0-9]+]], metadata !DIExpression()), !dbg [[DBG131]]
; DEBUG-NEXT: [[TMP4:%.*]] = add i32 [[A_SROA_0_8_VEC_EXTRACT]], [[A_SROA_3_28_VEC_EXTRACT]], !dbg [[DBG132:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META119:![0-9]+]], metadata !DIExpression()), !dbg [[DBG132]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META119:![0-9]+]], metadata !DIExpression()), !dbg [[DBG132]]
; DEBUG-NEXT: [[TMP5:%.*]] = add i32 [[A_SROA_3_16_VEC_EXTRACT]], [[TMP4]], !dbg [[DBG133:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META120:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META120:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133]]
; DEBUG-NEXT: ret i32 [[TMP5]], !dbg [[DBG134:![0-9]+]]
;
entry:
@@ -305,25 +305,25 @@ define i32 @test5(<4 x i32> %x, <4 x i32> %y, ptr %z) {
;
; DEBUG-LABEL: @test5(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META137:![0-9]+]], metadata !DIExpression()), !dbg [[DBG148:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META138:![0-9]+]], metadata !DIExpression()), !dbg [[DBG149:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META139:![0-9]+]], metadata !DIExpression()), !dbg [[DBG150:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META137:![0-9]+]], metadata !DIExpression()), !dbg [[DBG148:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META138:![0-9]+]], metadata !DIExpression()), !dbg [[DBG149:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META139:![0-9]+]], metadata !DIExpression()), !dbg [[DBG150:![0-9]+]]
; DEBUG-NEXT: [[Z_TMP1:%.*]] = getelementptr inbounds <4 x i32>, ptr [[Z:%.*]], i64 0, i64 2, !dbg [[DBG151:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[Z_TMP1]], metadata [[META140:![0-9]+]], metadata !DIExpression()), !dbg [[DBG151]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[Z_TMP1]], metadata [[META140:![0-9]+]], metadata !DIExpression()), !dbg [[DBG151]]
; DEBUG-NEXT: [[A_SROA_0_8_VEC_EXTRACT3:%.*]] = extractelement <4 x i32> [[Y:%.*]], i32 2, !dbg [[DBG152:![0-9]+]]
; DEBUG-NEXT: store i32 [[A_SROA_0_8_VEC_EXTRACT3]], ptr [[Z_TMP1]], align 1, !dbg [[DBG152]]
; DEBUG-NEXT: [[A_SROA_0_8_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[Y]], i32 2, !dbg [[DBG153:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META141:![0-9]+]], metadata !DIExpression()), !dbg [[DBG153]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META142:![0-9]+]], metadata !DIExpression()), !dbg [[DBG154:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_0_8_VEC_EXTRACT]], metadata [[META141:![0-9]+]], metadata !DIExpression()), !dbg [[DBG153]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META142:![0-9]+]], metadata !DIExpression()), !dbg [[DBG154:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_4_12_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[Y]], i32 3, !dbg [[DBG155:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_4_12_VEC_EXTRACT]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG155]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG156:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_4_12_VEC_EXTRACT]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG155]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG156:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_4_0_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[Y]], i32 0, !dbg [[DBG157:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_4_0_VEC_EXTRACT]], metadata [[META145:![0-9]+]], metadata !DIExpression()), !dbg [[DBG157]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_4_0_VEC_EXTRACT]], metadata [[META145:![0-9]+]], metadata !DIExpression()), !dbg [[DBG157]]
; DEBUG-NEXT: [[TMP4:%.*]] = add i32 [[A_SROA_0_8_VEC_EXTRACT]], [[A_SROA_4_12_VEC_EXTRACT]], !dbg [[DBG158:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META146:![0-9]+]], metadata !DIExpression()), !dbg [[DBG158]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META146:![0-9]+]], metadata !DIExpression()), !dbg [[DBG158]]
; DEBUG-NEXT: [[TMP5:%.*]] = add i32 [[A_SROA_4_0_VEC_EXTRACT]], [[TMP4]], !dbg [[DBG159:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META147:![0-9]+]], metadata !DIExpression()), !dbg [[DBG159]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META147:![0-9]+]], metadata !DIExpression()), !dbg [[DBG159]]
; DEBUG-NEXT: ret i32 [[TMP5]], !dbg [[DBG160:![0-9]+]]
;
entry:
@@ -367,17 +367,17 @@ define i64 @test6(<4 x i64> %x, <4 x i64> %y, i64 %n) {
;
; DEBUG-LABEL: @test6(
; DEBUG-NEXT: [[TMP:%.*]] = alloca { <4 x i64>, <4 x i64> }, align 32, !dbg [[DBG168:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[TMP]], metadata [[META163:![0-9]+]], metadata !DIExpression()), !dbg [[DBG168]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[TMP]], metadata [[META163:![0-9]+]], metadata !DIExpression()), !dbg [[DBG168]]
; DEBUG-NEXT: [[P0:%.*]] = getelementptr inbounds { <4 x i64>, <4 x i64> }, ptr [[TMP]], i32 0, i32 0, !dbg [[DBG169:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[P0]], metadata [[META164:![0-9]+]], metadata !DIExpression()), !dbg [[DBG169]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[P0]], metadata [[META164:![0-9]+]], metadata !DIExpression()), !dbg [[DBG169]]
; DEBUG-NEXT: store <4 x i64> [[X:%.*]], ptr [[P0]], align 32, !dbg [[DBG170:![0-9]+]]
; DEBUG-NEXT: [[P1:%.*]] = getelementptr inbounds { <4 x i64>, <4 x i64> }, ptr [[TMP]], i32 0, i32 1, !dbg [[DBG171:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[P1]], metadata [[META165:![0-9]+]], metadata !DIExpression()), !dbg [[DBG171]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[P1]], metadata [[META165:![0-9]+]], metadata !DIExpression()), !dbg [[DBG171]]
; DEBUG-NEXT: store <4 x i64> [[Y:%.*]], ptr [[P1]], align 32, !dbg [[DBG172:![0-9]+]]
; DEBUG-NEXT: [[ADDR:%.*]] = getelementptr inbounds { <4 x i64>, <4 x i64> }, ptr [[TMP]], i32 0, i32 0, i64 [[N:%.*]], !dbg [[DBG173:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[ADDR]], metadata [[META166:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[ADDR]], metadata [[META166:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173]]
; DEBUG-NEXT: [[RES:%.*]] = load i64, ptr [[ADDR]], align 4, !dbg [[DBG174:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i64 [[RES]], metadata [[META167:![0-9]+]], metadata !DIExpression()), !dbg [[DBG174]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i64 [[RES]], metadata [[META167:![0-9]+]], metadata !DIExpression()), !dbg [[DBG174]]
; DEBUG-NEXT: ret i64 [[RES]], !dbg [[DBG175:![0-9]+]]
;
%tmp = alloca { <4 x i64>, <4 x i64> }
@@ -401,15 +401,15 @@ define <4 x i32> @test_subvec_store() {
;
; DEBUG-LABEL: @test_subvec_store(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META178:![0-9]+]], metadata !DIExpression()), !dbg [[DBG184:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META178:![0-9]+]], metadata !DIExpression()), !dbg [[DBG184:![0-9]+]]
; DEBUG-NEXT: [[A_0_VECBLEND:%.*]] = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>, <4 x i32> undef, !dbg [[DBG185:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META179:![0-9]+]], metadata !DIExpression()), !dbg [[DBG186:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META179:![0-9]+]], metadata !DIExpression()), !dbg [[DBG186:![0-9]+]]
; DEBUG-NEXT: [[A_4_VECBLEND:%.*]] = select <4 x i1> <i1 false, i1 true, i1 true, i1 false>, <4 x i32> <i32 undef, i32 1, i32 1, i32 undef>, <4 x i32> [[A_0_VECBLEND]], !dbg [[DBG187:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META180:![0-9]+]], metadata !DIExpression()), !dbg [[DBG188:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META180:![0-9]+]], metadata !DIExpression()), !dbg [[DBG188:![0-9]+]]
; DEBUG-NEXT: [[A_8_VECBLEND:%.*]] = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x i32> <i32 undef, i32 undef, i32 2, i32 2>, <4 x i32> [[A_4_VECBLEND]], !dbg [[DBG189:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META181:![0-9]+]], metadata !DIExpression()), !dbg [[DBG190:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META181:![0-9]+]], metadata !DIExpression()), !dbg [[DBG190:![0-9]+]]
; DEBUG-NEXT: [[A_12_VEC_INSERT:%.*]] = insertelement <4 x i32> [[A_8_VECBLEND]], i32 3, i32 3, !dbg [[DBG191:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x i32> [[A_12_VEC_INSERT]], metadata [[META182:![0-9]+]], metadata !DIExpression()), !dbg [[DBG192:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x i32> [[A_12_VEC_INSERT]], metadata [[META182:![0-9]+]], metadata !DIExpression()), !dbg [[DBG192:![0-9]+]]
; DEBUG-NEXT: ret <4 x i32> [[A_12_VEC_INSERT]], !dbg [[DBG193:![0-9]+]]
;
entry:
@@ -443,19 +443,19 @@ define <4 x i32> @test_subvec_load() {
;
; DEBUG-LABEL: @test_subvec_load(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META196:![0-9]+]], metadata !DIExpression()), !dbg [[DBG204:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META196:![0-9]+]], metadata !DIExpression()), !dbg [[DBG204:![0-9]+]]
; DEBUG-NEXT: [[A_0_VEC_EXTRACT:%.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> poison, <2 x i32> <i32 0, i32 1>, !dbg [[DBG205:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <2 x i32> [[A_0_VEC_EXTRACT]], metadata [[META197:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META198:![0-9]+]], metadata !DIExpression()), !dbg [[DBG206:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <2 x i32> [[A_0_VEC_EXTRACT]], metadata [[META197:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META198:![0-9]+]], metadata !DIExpression()), !dbg [[DBG206:![0-9]+]]
; DEBUG-NEXT: [[A_4_VEC_EXTRACT:%.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> poison, <2 x i32> <i32 1, i32 2>, !dbg [[DBG207:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <2 x i32> [[A_4_VEC_EXTRACT]], metadata [[META199:![0-9]+]], metadata !DIExpression()), !dbg [[DBG207]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META200:![0-9]+]], metadata !DIExpression()), !dbg [[DBG208:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <2 x i32> [[A_4_VEC_EXTRACT]], metadata [[META199:![0-9]+]], metadata !DIExpression()), !dbg [[DBG207]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META200:![0-9]+]], metadata !DIExpression()), !dbg [[DBG208:![0-9]+]]
; DEBUG-NEXT: [[A_8_VEC_EXTRACT:%.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> poison, <2 x i32> <i32 2, i32 3>, !dbg [[DBG209:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <2 x i32> [[A_8_VEC_EXTRACT]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG209]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <2 x i32> [[A_8_VEC_EXTRACT]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG209]]
; DEBUG-NEXT: [[TMP:%.*]] = shufflevector <2 x i32> [[A_0_VEC_EXTRACT]], <2 x i32> [[A_4_VEC_EXTRACT]], <2 x i32> <i32 0, i32 2>, !dbg [[DBG210:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <2 x i32> [[TMP]], metadata [[META202:![0-9]+]], metadata !DIExpression()), !dbg [[DBG210]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <2 x i32> [[TMP]], metadata [[META202:![0-9]+]], metadata !DIExpression()), !dbg [[DBG210]]
; DEBUG-NEXT: [[RET:%.*]] = shufflevector <2 x i32> [[TMP]], <2 x i32> [[A_8_VEC_EXTRACT]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>, !dbg [[DBG211:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x i32> [[RET]], metadata [[META203:![0-9]+]], metadata !DIExpression()), !dbg [[DBG211]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x i32> [[RET]], metadata [[META203:![0-9]+]], metadata !DIExpression()), !dbg [[DBG211]]
; DEBUG-NEXT: ret <4 x i32> [[RET]], !dbg [[DBG212:![0-9]+]]
;
entry:
@@ -488,15 +488,15 @@ define <4 x float> @test_subvec_memset() {
;
; DEBUG-LABEL: @test_subvec_memset(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META215:![0-9]+]], metadata !DIExpression()), !dbg [[DBG220:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META215:![0-9]+]], metadata !DIExpression()), !dbg [[DBG220:![0-9]+]]
; DEBUG-NEXT: [[A_0_VECBLEND:%.*]] = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x float> <float 0.000000e+00, float 0.000000e+00, float undef, float undef>, <4 x float> undef, !dbg [[DBG221:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META216:![0-9]+]], metadata !DIExpression()), !dbg [[DBG222:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META216:![0-9]+]], metadata !DIExpression()), !dbg [[DBG222:![0-9]+]]
; DEBUG-NEXT: [[A_4_VECBLEND:%.*]] = select <4 x i1> <i1 false, i1 true, i1 true, i1 false>, <4 x float> <float undef, float 0x3820202020000000, float 0x3820202020000000, float undef>, <4 x float> [[A_0_VECBLEND]], !dbg [[DBG223:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META217:![0-9]+]], metadata !DIExpression()), !dbg [[DBG224:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META217:![0-9]+]], metadata !DIExpression()), !dbg [[DBG224:![0-9]+]]
; DEBUG-NEXT: [[A_8_VECBLEND:%.*]] = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> <float undef, float undef, float 0x3860606060000000, float 0x3860606060000000>, <4 x float> [[A_4_VECBLEND]], !dbg [[DBG225:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META218:![0-9]+]], metadata !DIExpression()), !dbg [[DBG226:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META218:![0-9]+]], metadata !DIExpression()), !dbg [[DBG226:![0-9]+]]
; DEBUG-NEXT: [[A_12_VEC_INSERT:%.*]] = insertelement <4 x float> [[A_8_VECBLEND]], float 0x38E0E0E0E0000000, i32 3, !dbg [[DBG227:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x float> [[A_12_VEC_INSERT]], metadata [[META219:![0-9]+]], metadata !DIExpression()), !dbg [[DBG228:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x float> [[A_12_VEC_INSERT]], metadata [[META219:![0-9]+]], metadata !DIExpression()), !dbg [[DBG228:![0-9]+]]
; DEBUG-NEXT: ret <4 x float> [[A_12_VEC_INSERT]], !dbg [[DBG229:![0-9]+]]
;
entry:
@@ -538,24 +538,24 @@ define <4 x float> @test_subvec_memcpy(ptr %x, ptr %y, ptr %z, ptr %f, ptr %out)
;
; DEBUG-LABEL: @test_subvec_memcpy(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META232:![0-9]+]], metadata !DIExpression()), !dbg [[DBG237:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META232:![0-9]+]], metadata !DIExpression()), !dbg [[DBG237:![0-9]+]]
; DEBUG-NEXT: [[A_0_COPYLOAD:%.*]] = load <2 x float>, ptr [[X:%.*]], align 1, !dbg [[DBG238:![0-9]+]]
; DEBUG-NEXT: [[A_0_VEC_EXPAND:%.*]] = shufflevector <2 x float> [[A_0_COPYLOAD]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>, !dbg [[DBG238]]
; DEBUG-NEXT: [[A_0_VECBLEND:%.*]] = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x float> [[A_0_VEC_EXPAND]], <4 x float> undef, !dbg [[DBG238]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META233:![0-9]+]], metadata !DIExpression()), !dbg [[DBG239:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META233:![0-9]+]], metadata !DIExpression()), !dbg [[DBG239:![0-9]+]]
; DEBUG-NEXT: [[A_4_COPYLOAD:%.*]] = load <2 x float>, ptr [[Y:%.*]], align 1, !dbg [[DBG240:![0-9]+]]
; DEBUG-NEXT: [[A_4_VEC_EXPAND:%.*]] = shufflevector <2 x float> [[A_4_COPYLOAD]], <2 x float> poison, <4 x i32> <i32 poison, i32 0, i32 1, i32 poison>, !dbg [[DBG240]]
; DEBUG-NEXT: [[A_4_VECBLEND:%.*]] = select <4 x i1> <i1 false, i1 true, i1 true, i1 false>, <4 x float> [[A_4_VEC_EXPAND]], <4 x float> [[A_0_VECBLEND]], !dbg [[DBG240]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META234:![0-9]+]], metadata !DIExpression()), !dbg [[DBG241:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META234:![0-9]+]], metadata !DIExpression()), !dbg [[DBG241:![0-9]+]]
; DEBUG-NEXT: [[A_8_COPYLOAD:%.*]] = load <2 x float>, ptr [[Z:%.*]], align 1, !dbg [[DBG242:![0-9]+]]
; DEBUG-NEXT: [[A_8_VEC_EXPAND:%.*]] = shufflevector <2 x float> [[A_8_COPYLOAD]], <2 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 0, i32 1>, !dbg [[DBG242]]
; DEBUG-NEXT: [[A_8_VECBLEND:%.*]] = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> [[A_8_VEC_EXPAND]], <4 x float> [[A_4_VECBLEND]], !dbg [[DBG242]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META235:![0-9]+]], metadata !DIExpression()), !dbg [[DBG243:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META235:![0-9]+]], metadata !DIExpression()), !dbg [[DBG243:![0-9]+]]
; DEBUG-NEXT: [[A_12_COPYLOAD:%.*]] = load float, ptr [[F:%.*]], align 1, !dbg [[DBG244:![0-9]+]]
; DEBUG-NEXT: [[A_12_VEC_INSERT:%.*]] = insertelement <4 x float> [[A_8_VECBLEND]], float [[A_12_COPYLOAD]], i32 3, !dbg [[DBG244]]
; DEBUG-NEXT: [[A_8_VEC_EXTRACT:%.*]] = shufflevector <4 x float> [[A_12_VEC_INSERT]], <4 x float> poison, <2 x i32> <i32 2, i32 3>, !dbg [[DBG245:![0-9]+]]
; DEBUG-NEXT: store <2 x float> [[A_8_VEC_EXTRACT]], ptr [[OUT:%.*]], align 1, !dbg [[DBG245]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x float> [[A_12_VEC_INSERT]], metadata [[META236:![0-9]+]], metadata !DIExpression()), !dbg [[DBG246:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x float> [[A_12_VEC_INSERT]], metadata [[META236:![0-9]+]], metadata !DIExpression()), !dbg [[DBG246:![0-9]+]]
; DEBUG-NEXT: ret <4 x float> [[A_12_VEC_INSERT]], !dbg [[DBG247:![0-9]+]]
;
entry:
@@ -596,7 +596,7 @@ define i32 @PR14212(<3 x i8> %val) {
;
; DEBUG-LABEL: @PR14212(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META250:![0-9]+]], metadata !DIExpression()), !dbg [[DBG252:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META250:![0-9]+]], metadata !DIExpression()), !dbg [[DBG252:![0-9]+]]
; DEBUG-NEXT: [[TMP0:%.*]] = bitcast <3 x i8> [[VAL:%.*]] to i24, !dbg [[DBG253:![0-9]+]]
; DEBUG-NEXT: [[RETVAL_SROA_2_0_INSERT_EXT:%.*]] = zext i8 undef to i32, !dbg [[DBG254:![0-9]+]]
; DEBUG-NEXT: [[RETVAL_SROA_2_0_INSERT_SHIFT:%.*]] = shl i32 [[RETVAL_SROA_2_0_INSERT_EXT]], 24, !dbg [[DBG254]]
@@ -605,7 +605,7 @@ define i32 @PR14212(<3 x i8> %val) {
; DEBUG-NEXT: [[RETVAL_0_INSERT_EXT:%.*]] = zext i24 [[TMP0]] to i32, !dbg [[DBG254]]
; DEBUG-NEXT: [[RETVAL_0_INSERT_MASK:%.*]] = and i32 [[RETVAL_SROA_2_0_INSERT_INSERT]], -16777216, !dbg [[DBG254]]
; DEBUG-NEXT: [[RETVAL_0_INSERT_INSERT:%.*]] = or i32 [[RETVAL_0_INSERT_MASK]], [[RETVAL_0_INSERT_EXT]], !dbg [[DBG254]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[RETVAL_0_INSERT_INSERT]], metadata [[META251:![0-9]+]], metadata !DIExpression()), !dbg [[DBG253]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[RETVAL_0_INSERT_INSERT]], metadata [[META251:![0-9]+]], metadata !DIExpression()), !dbg [[DBG253]]
; DEBUG-NEXT: ret i32 [[RETVAL_0_INSERT_INSERT]], !dbg [[DBG254]]
;
entry:
@@ -630,12 +630,12 @@ define <2 x i8> @PR14349.1(i32 %x) {
;
; DEBUG-LABEL: @PR14349.1(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META257:![0-9]+]], metadata !DIExpression()), !dbg [[DBG260:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META257:![0-9]+]], metadata !DIExpression()), !dbg [[DBG260:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[X:%.*]] to i16, !dbg [[DBG261:![0-9]+]]
; DEBUG-NEXT: [[TMP0:%.*]] = bitcast i16 [[A_SROA_0_0_EXTRACT_TRUNC]] to <2 x i8>, !dbg [[DBG261]]
; DEBUG-NEXT: [[A_SROA_2_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[X]], 16, !dbg [[DBG261]]
; DEBUG-NEXT: [[A_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_2_0_EXTRACT_SHIFT]] to i16, !dbg [[DBG261]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <2 x i8> [[TMP0]], metadata [[META258:![0-9]+]], metadata !DIExpression()), !dbg [[DBG262:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <2 x i8> [[TMP0]], metadata [[META258:![0-9]+]], metadata !DIExpression()), !dbg [[DBG262:![0-9]+]]
; DEBUG-NEXT: ret <2 x i8> [[TMP0]], !dbg [[DBG263:![0-9]+]]
;
entry:
@@ -666,7 +666,7 @@ define i32 @PR14349.2(<2 x i8> %x) {
;
; DEBUG-LABEL: @PR14349.2(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META266:![0-9]+]], metadata !DIExpression()), !dbg [[DBG268:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META266:![0-9]+]], metadata !DIExpression()), !dbg [[DBG268:![0-9]+]]
; DEBUG-NEXT: [[TMP0:%.*]] = bitcast <2 x i8> [[X:%.*]] to i16, !dbg [[DBG269:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_2_0_INSERT_EXT:%.*]] = zext i16 undef to i32, !dbg [[DBG270:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_2_0_INSERT_SHIFT:%.*]] = shl i32 [[A_SROA_2_0_INSERT_EXT]], 16, !dbg [[DBG270]]
@@ -675,7 +675,7 @@ define i32 @PR14349.2(<2 x i8> %x) {
; DEBUG-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i16 [[TMP0]] to i32, !dbg [[DBG270]]
; DEBUG-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i32 [[A_SROA_2_0_INSERT_INSERT]], -65536, !dbg [[DBG270]]
; DEBUG-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_EXT]], !dbg [[DBG270]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_0_0_INSERT_INSERT]], metadata [[META267:![0-9]+]], metadata !DIExpression()), !dbg [[DBG269]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_0_0_INSERT_INSERT]], metadata [[META267:![0-9]+]], metadata !DIExpression()), !dbg [[DBG269]]
; DEBUG-NEXT: ret i32 [[A_SROA_0_0_INSERT_INSERT]], !dbg [[DBG270]]
;
entry:
@@ -702,21 +702,21 @@ define i32 @test7(<2 x i32> %x, <2 x i32> %y) {
;
; DEBUG-LABEL: @test7(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META273:![0-9]+]], metadata !DIExpression()), !dbg [[DBG283:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META274:![0-9]+]], metadata !DIExpression()), !dbg [[DBG284:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META275:![0-9]+]], metadata !DIExpression()), !dbg [[DBG285:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META273:![0-9]+]], metadata !DIExpression()), !dbg [[DBG283:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META274:![0-9]+]], metadata !DIExpression()), !dbg [[DBG284:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META275:![0-9]+]], metadata !DIExpression()), !dbg [[DBG285:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_4_VEC_EXTRACT:%.*]] = extractelement <2 x i32> [[X:%.*]], i32 1, !dbg [[DBG286:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_0_4_VEC_EXTRACT]], metadata [[META276:![0-9]+]], metadata !DIExpression()), !dbg [[DBG286]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META277:![0-9]+]], metadata !DIExpression()), !dbg [[DBG287:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_0_4_VEC_EXTRACT]], metadata [[META276:![0-9]+]], metadata !DIExpression()), !dbg [[DBG286]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META277:![0-9]+]], metadata !DIExpression()), !dbg [[DBG287:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_2_12_VEC_EXTRACT:%.*]] = extractelement <2 x i32> [[Y:%.*]], i32 1, !dbg [[DBG288:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_2_12_VEC_EXTRACT]], metadata [[META278:![0-9]+]], metadata !DIExpression()), !dbg [[DBG288]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META279:![0-9]+]], metadata !DIExpression()), !dbg [[DBG289:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_2_12_VEC_EXTRACT]], metadata [[META278:![0-9]+]], metadata !DIExpression()), !dbg [[DBG288]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META279:![0-9]+]], metadata !DIExpression()), !dbg [[DBG289:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_2_8_VEC_EXTRACT:%.*]] = extractelement <2 x i32> [[Y]], i32 0, !dbg [[DBG290:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_2_8_VEC_EXTRACT]], metadata [[META280:![0-9]+]], metadata !DIExpression()), !dbg [[DBG290]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_2_8_VEC_EXTRACT]], metadata [[META280:![0-9]+]], metadata !DIExpression()), !dbg [[DBG290]]
; DEBUG-NEXT: [[TMP4:%.*]] = add i32 [[A_SROA_0_4_VEC_EXTRACT]], [[A_SROA_2_12_VEC_EXTRACT]], !dbg [[DBG291:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META281:![0-9]+]], metadata !DIExpression()), !dbg [[DBG291]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META281:![0-9]+]], metadata !DIExpression()), !dbg [[DBG291]]
; DEBUG-NEXT: [[TMP5:%.*]] = add i32 [[A_SROA_2_8_VEC_EXTRACT]], [[TMP4]], !dbg [[DBG292:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META282:![0-9]+]], metadata !DIExpression()), !dbg [[DBG292]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP5]], metadata [[META282:![0-9]+]], metadata !DIExpression()), !dbg [[DBG292]]
; DEBUG-NEXT: ret i32 [[TMP5]], !dbg [[DBG293:![0-9]+]]
;
entry:
@@ -751,14 +751,14 @@ define i32 @test8(<2 x i32> %x) {
;
; DEBUG-LABEL: @test8(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META296:![0-9]+]], metadata !DIExpression()), !dbg [[DBG301:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META296:![0-9]+]], metadata !DIExpression()), !dbg [[DBG301:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_0_VEC_EXTRACT:%.*]] = extractelement <2 x i32> [[X:%.*]], i32 0, !dbg [[DBG302:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_0_0_VEC_EXTRACT]], metadata [[META297:![0-9]+]], metadata !DIExpression()), !dbg [[DBG302]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META298:![0-9]+]], metadata !DIExpression()), !dbg [[DBG303:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_0_0_VEC_EXTRACT]], metadata [[META297:![0-9]+]], metadata !DIExpression()), !dbg [[DBG302]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META298:![0-9]+]], metadata !DIExpression()), !dbg [[DBG303:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_4_VEC_EXTRACT:%.*]] = extractelement <2 x i32> [[X]], i32 1, !dbg [[DBG304:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[A_SROA_0_4_VEC_EXTRACT]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG304]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[A_SROA_0_4_VEC_EXTRACT]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG304]]
; DEBUG-NEXT: [[TMP4:%.*]] = add i32 [[A_SROA_0_0_VEC_EXTRACT]], [[A_SROA_0_4_VEC_EXTRACT]], !dbg [[DBG305:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META300:![0-9]+]], metadata !DIExpression()), !dbg [[DBG305]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[TMP4]], metadata [[META300:![0-9]+]], metadata !DIExpression()), !dbg [[DBG305]]
; DEBUG-NEXT: ret i32 [[TMP4]], !dbg [[DBG306:![0-9]+]]
;
entry:
@@ -786,11 +786,11 @@ define <2 x i32> @test9(i32 %x, i32 %y) {
;
; DEBUG-LABEL: @test9(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META309:![0-9]+]], metadata !DIExpression()), !dbg [[DBG312:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META309:![0-9]+]], metadata !DIExpression()), !dbg [[DBG312:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i32> undef, i32 [[X:%.*]], i32 0, !dbg [[DBG313:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META310:![0-9]+]], metadata !DIExpression()), !dbg [[DBG314:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META310:![0-9]+]], metadata !DIExpression()), !dbg [[DBG314:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_4_VEC_INSERT:%.*]] = insertelement <2 x i32> [[A_SROA_0_0_VEC_INSERT]], i32 [[Y:%.*]], i32 1, !dbg [[DBG315:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <2 x i32> [[A_SROA_0_4_VEC_INSERT]], metadata [[META311:![0-9]+]], metadata !DIExpression()), !dbg [[DBG316:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <2 x i32> [[A_SROA_0_4_VEC_INSERT]], metadata [[META311:![0-9]+]], metadata !DIExpression()), !dbg [[DBG316:![0-9]+]]
; DEBUG-NEXT: ret <2 x i32> [[A_SROA_0_4_VEC_INSERT]], !dbg [[DBG317:![0-9]+]]
;
entry:
@@ -817,11 +817,11 @@ define <2 x i32> @test10(<4 x i16> %x, i32 %y) {
;
; DEBUG-LABEL: @test10(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META320:![0-9]+]], metadata !DIExpression()), !dbg [[DBG323:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META320:![0-9]+]], metadata !DIExpression()), !dbg [[DBG323:![0-9]+]]
; DEBUG-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[X:%.*]] to <2 x i32>, !dbg [[DBG324:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META321:![0-9]+]], metadata !DIExpression()), !dbg [[DBG325:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META321:![0-9]+]], metadata !DIExpression()), !dbg [[DBG325:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_4_VEC_INSERT:%.*]] = insertelement <2 x i32> [[TMP0]], i32 [[Y:%.*]], i32 1, !dbg [[DBG326:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <2 x i32> [[A_SROA_0_4_VEC_INSERT]], metadata [[META322:![0-9]+]], metadata !DIExpression()), !dbg [[DBG327:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <2 x i32> [[A_SROA_0_4_VEC_INSERT]], metadata [[META322:![0-9]+]], metadata !DIExpression()), !dbg [[DBG327:![0-9]+]]
; DEBUG-NEXT: ret <2 x i32> [[A_SROA_0_4_VEC_INSERT]], !dbg [[DBG328:![0-9]+]]
;
entry:
@@ -850,12 +850,12 @@ define <2 x float> @test11(<4 x i16> %x, i32 %y) {
;
; DEBUG-LABEL: @test11(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META331:![0-9]+]], metadata !DIExpression()), !dbg [[DBG334:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META331:![0-9]+]], metadata !DIExpression()), !dbg [[DBG334:![0-9]+]]
; DEBUG-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[X:%.*]] to <2 x i32>, !dbg [[DBG335:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META332:![0-9]+]], metadata !DIExpression()), !dbg [[DBG336:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META332:![0-9]+]], metadata !DIExpression()), !dbg [[DBG336:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_4_VEC_INSERT:%.*]] = insertelement <2 x i32> [[TMP0]], i32 [[Y:%.*]], i32 1, !dbg [[DBG337:![0-9]+]]
; DEBUG-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[A_SROA_0_4_VEC_INSERT]] to <2 x float>, !dbg [[DBG338:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <2 x float> [[TMP1]], metadata [[META333:![0-9]+]], metadata !DIExpression()), !dbg [[DBG338]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <2 x float> [[TMP1]], metadata [[META333:![0-9]+]], metadata !DIExpression()), !dbg [[DBG338]]
; DEBUG-NEXT: ret <2 x float> [[TMP1]], !dbg [[DBG339:![0-9]+]]
;
entry:
@@ -876,9 +876,9 @@ define <4 x float> @test12(<4 x i32> %val) {
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
; DEBUG-LABEL: @test12(
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META342:![0-9]+]], metadata !DIExpression()), !dbg [[DBG344:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META342:![0-9]+]], metadata !DIExpression()), !dbg [[DBG344:![0-9]+]]
; DEBUG-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[VAL:%.*]] to <4 x float>, !dbg [[DBG345:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x float> [[TMP1]], metadata [[META343:![0-9]+]], metadata !DIExpression()), !dbg [[DBG345]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x float> [[TMP1]], metadata [[META343:![0-9]+]], metadata !DIExpression()), !dbg [[DBG345]]
; DEBUG-NEXT: ret <4 x float> [[TMP1]], !dbg [[DBG346:![0-9]+]]
;
%a = alloca <3 x i32>, align 16
@@ -904,16 +904,16 @@ define <2 x i64> @test13(i32 %a, i32 %b, i32 %c, i32 %d) {
;
; DEBUG-LABEL: @test13(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META349:![0-9]+]], metadata !DIExpression()), !dbg [[DBG354:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META349:![0-9]+]], metadata !DIExpression()), !dbg [[DBG354:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_0_VEC_INSERT:%.*]] = insertelement <4 x i32> undef, i32 [[A:%.*]], i32 0, !dbg [[DBG355:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META350:![0-9]+]], metadata !DIExpression()), !dbg [[DBG356:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META350:![0-9]+]], metadata !DIExpression()), !dbg [[DBG356:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_4_VEC_INSERT:%.*]] = insertelement <4 x i32> [[X_SROA_0_0_VEC_INSERT]], i32 [[B:%.*]], i32 1, !dbg [[DBG357:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META351:![0-9]+]], metadata !DIExpression()), !dbg [[DBG358:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META351:![0-9]+]], metadata !DIExpression()), !dbg [[DBG358:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_8_VEC_INSERT:%.*]] = insertelement <4 x i32> [[X_SROA_0_4_VEC_INSERT]], i32 [[C:%.*]], i32 2, !dbg [[DBG359:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META352:![0-9]+]], metadata !DIExpression()), !dbg [[DBG360:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META352:![0-9]+]], metadata !DIExpression()), !dbg [[DBG360:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_12_VEC_INSERT:%.*]] = insertelement <4 x i32> [[X_SROA_0_8_VEC_INSERT]], i32 [[D:%.*]], i32 3, !dbg [[DBG361:![0-9]+]]
; DEBUG-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[X_SROA_0_12_VEC_INSERT]] to <2 x i64>, !dbg [[DBG362:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <2 x i64> [[TMP0]], metadata [[META353:![0-9]+]], metadata !DIExpression()), !dbg [[DBG362]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <2 x i64> [[TMP0]], metadata [[META353:![0-9]+]], metadata !DIExpression()), !dbg [[DBG362]]
; DEBUG-NEXT: ret <2 x i64> [[TMP0]], !dbg [[DBG363:![0-9]+]]
;
entry:
@@ -946,26 +946,26 @@ define i32 @test14(<2 x i64> %x) {
;
; DEBUG-LABEL: @test14(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META366:![0-9]+]], metadata !DIExpression()), !dbg [[DBG378:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META366:![0-9]+]], metadata !DIExpression()), !dbg [[DBG378:![0-9]+]]
; DEBUG-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[X:%.*]] to <4 x i32>, !dbg [[DBG379:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META367:![0-9]+]], metadata !DIExpression()), !dbg [[DBG380:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META367:![0-9]+]], metadata !DIExpression()), !dbg [[DBG380:![0-9]+]]
; DEBUG-NEXT: [[X_ADDR_SROA_0_0_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0, !dbg [[DBG381:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[X_ADDR_SROA_0_0_VEC_EXTRACT]], metadata [[META368:![0-9]+]], metadata !DIExpression()), !dbg [[DBG381]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META369:![0-9]+]], metadata !DIExpression()), !dbg [[DBG382:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[X_ADDR_SROA_0_0_VEC_EXTRACT]], metadata [[META368:![0-9]+]], metadata !DIExpression()), !dbg [[DBG381]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META369:![0-9]+]], metadata !DIExpression()), !dbg [[DBG382:![0-9]+]]
; DEBUG-NEXT: [[X_ADDR_SROA_0_4_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1, !dbg [[DBG383:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[X_ADDR_SROA_0_4_VEC_EXTRACT]], metadata [[META370:![0-9]+]], metadata !DIExpression()), !dbg [[DBG383]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META371:![0-9]+]], metadata !DIExpression()), !dbg [[DBG384:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[X_ADDR_SROA_0_4_VEC_EXTRACT]], metadata [[META370:![0-9]+]], metadata !DIExpression()), !dbg [[DBG383]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META371:![0-9]+]], metadata !DIExpression()), !dbg [[DBG384:![0-9]+]]
; DEBUG-NEXT: [[X_ADDR_SROA_0_8_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2, !dbg [[DBG385:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[X_ADDR_SROA_0_8_VEC_EXTRACT]], metadata [[META372:![0-9]+]], metadata !DIExpression()), !dbg [[DBG385]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META373:![0-9]+]], metadata !DIExpression()), !dbg [[DBG386:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[X_ADDR_SROA_0_8_VEC_EXTRACT]], metadata [[META372:![0-9]+]], metadata !DIExpression()), !dbg [[DBG385]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META373:![0-9]+]], metadata !DIExpression()), !dbg [[DBG386:![0-9]+]]
; DEBUG-NEXT: [[X_ADDR_SROA_0_12_VEC_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3, !dbg [[DBG387:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[X_ADDR_SROA_0_12_VEC_EXTRACT]], metadata [[META374:![0-9]+]], metadata !DIExpression()), !dbg [[DBG387]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[X_ADDR_SROA_0_12_VEC_EXTRACT]], metadata [[META374:![0-9]+]], metadata !DIExpression()), !dbg [[DBG387]]
; DEBUG-NEXT: [[ADD:%.*]] = add i32 [[X_ADDR_SROA_0_0_VEC_EXTRACT]], [[X_ADDR_SROA_0_4_VEC_EXTRACT]], !dbg [[DBG388:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[ADD]], metadata [[META375:![0-9]+]], metadata !DIExpression()), !dbg [[DBG388]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[ADD]], metadata [[META375:![0-9]+]], metadata !DIExpression()), !dbg [[DBG388]]
; DEBUG-NEXT: [[ADD1:%.*]] = add i32 [[X_ADDR_SROA_0_8_VEC_EXTRACT]], [[X_ADDR_SROA_0_12_VEC_EXTRACT]], !dbg [[DBG389:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[ADD1]], metadata [[META376:![0-9]+]], metadata !DIExpression()), !dbg [[DBG389]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[ADD1]], metadata [[META376:![0-9]+]], metadata !DIExpression()), !dbg [[DBG389]]
; DEBUG-NEXT: [[ADD2:%.*]] = add i32 [[ADD]], [[ADD1]], !dbg [[DBG390:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i32 [[ADD2]], metadata [[META377:![0-9]+]], metadata !DIExpression()), !dbg [[DBG390]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i32 [[ADD2]], metadata [[META377:![0-9]+]], metadata !DIExpression()), !dbg [[DBG390]]
; DEBUG-NEXT: ret i32 [[ADD2]], !dbg [[DBG391:![0-9]+]]
;
entry:
@@ -1002,19 +1002,19 @@ define <4 x ptr> @test15(i32 %a, i32 %b, i32 %c, i32 %d) {
; DEBUG-LABEL: @test15(
; DEBUG-NEXT: entry:
; DEBUG-NEXT: [[X_SROA_0:%.*]] = alloca <4 x ptr>, align 32, !dbg [[DBG400:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META394:![0-9]+]], metadata !DIExpression()), !dbg [[DBG400]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META394:![0-9]+]], metadata !DIExpression()), !dbg [[DBG400]]
; DEBUG-NEXT: store i32 [[A:%.*]], ptr [[X_SROA_0]], align 32, !dbg [[DBG401:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META395:![0-9]+]], metadata !DIExpression()), !dbg [[DBG402:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META395:![0-9]+]], metadata !DIExpression()), !dbg [[DBG402:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_4_X_TMP2_SROA_IDX1:%.*]] = getelementptr inbounds i8, ptr [[X_SROA_0]], i64 4, !dbg [[DBG403:![0-9]+]]
; DEBUG-NEXT: store i32 [[B:%.*]], ptr [[X_SROA_0_4_X_TMP2_SROA_IDX1]], align 4, !dbg [[DBG403]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META396:![0-9]+]], metadata !DIExpression()), !dbg [[DBG404:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META396:![0-9]+]], metadata !DIExpression()), !dbg [[DBG404:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_8_X_TMP3_SROA_IDX2:%.*]] = getelementptr inbounds i8, ptr [[X_SROA_0]], i64 8, !dbg [[DBG405:![0-9]+]]
; DEBUG-NEXT: store i32 [[C:%.*]], ptr [[X_SROA_0_8_X_TMP3_SROA_IDX2]], align 8, !dbg [[DBG405]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META397:![0-9]+]], metadata !DIExpression()), !dbg [[DBG406:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META397:![0-9]+]], metadata !DIExpression()), !dbg [[DBG406:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_12_X_TMP4_SROA_IDX3:%.*]] = getelementptr inbounds i8, ptr [[X_SROA_0]], i64 12, !dbg [[DBG407:![0-9]+]]
; DEBUG-NEXT: store i32 [[D:%.*]], ptr [[X_SROA_0_12_X_TMP4_SROA_IDX3]], align 4, !dbg [[DBG407]]
; DEBUG-NEXT: [[X_SROA_0_0_X_SROA_0_0_RESULT:%.*]] = load <4 x ptr>, ptr [[X_SROA_0]], align 32, !dbg [[DBG408:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x ptr> [[X_SROA_0_0_X_SROA_0_0_RESULT]], metadata [[META398:![0-9]+]], metadata !DIExpression()), !dbg [[DBG408]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x ptr> [[X_SROA_0_0_X_SROA_0_0_RESULT]], metadata [[META398:![0-9]+]], metadata !DIExpression()), !dbg [[DBG408]]
; DEBUG-NEXT: ret <4 x ptr> [[X_SROA_0_0_X_SROA_0_0_RESULT]], !dbg [[DBG409:![0-9]+]]
;
entry:
@@ -1045,19 +1045,19 @@ define <4 x ptr> @test16(i64 %a, i64 %b, i64 %c, i64 %d) {
;
; DEBUG-LABEL: @test16(
; DEBUG-NEXT: entry:
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META412:![0-9]+]], metadata !DIExpression()), !dbg [[DBG417:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META412:![0-9]+]], metadata !DIExpression()), !dbg [[DBG417:![0-9]+]]
; DEBUG-NEXT: [[TMP0:%.*]] = inttoptr i64 [[A:%.*]] to ptr, !dbg [[DBG418:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_0_VEC_INSERT:%.*]] = insertelement <4 x ptr> undef, ptr [[TMP0]], i32 0, !dbg [[DBG418]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META413:![0-9]+]], metadata !DIExpression()), !dbg [[DBG419:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META413:![0-9]+]], metadata !DIExpression()), !dbg [[DBG419:![0-9]+]]
; DEBUG-NEXT: [[TMP1:%.*]] = inttoptr i64 [[B:%.*]] to ptr, !dbg [[DBG420:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_8_VEC_INSERT:%.*]] = insertelement <4 x ptr> [[X_SROA_0_0_VEC_INSERT]], ptr [[TMP1]], i32 1, !dbg [[DBG420]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META414:![0-9]+]], metadata !DIExpression()), !dbg [[DBG421:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META414:![0-9]+]], metadata !DIExpression()), !dbg [[DBG421:![0-9]+]]
; DEBUG-NEXT: [[TMP2:%.*]] = inttoptr i64 [[C:%.*]] to ptr, !dbg [[DBG422:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_16_VEC_INSERT:%.*]] = insertelement <4 x ptr> [[X_SROA_0_8_VEC_INSERT]], ptr [[TMP2]], i32 2, !dbg [[DBG422]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META415:![0-9]+]], metadata !DIExpression()), !dbg [[DBG423:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META415:![0-9]+]], metadata !DIExpression()), !dbg [[DBG423:![0-9]+]]
; DEBUG-NEXT: [[TMP3:%.*]] = inttoptr i64 [[D:%.*]] to ptr, !dbg [[DBG424:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_24_VEC_INSERT:%.*]] = insertelement <4 x ptr> [[X_SROA_0_16_VEC_INSERT]], ptr [[TMP3]], i32 3, !dbg [[DBG424]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x ptr> [[X_SROA_0_24_VEC_INSERT]], metadata [[META416:![0-9]+]], metadata !DIExpression()), !dbg [[DBG425:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x ptr> [[X_SROA_0_24_VEC_INSERT]], metadata [[META416:![0-9]+]], metadata !DIExpression()), !dbg [[DBG425:![0-9]+]]
; DEBUG-NEXT: ret <4 x ptr> [[X_SROA_0_24_VEC_INSERT]], !dbg [[DBG426:![0-9]+]]
;
entry:
@@ -1090,19 +1090,19 @@ define <4 x ptr> @test17(i32 %a, i32 %b, i64 %c, i64 %d) {
; DEBUG-LABEL: @test17(
; DEBUG-NEXT: entry:
; DEBUG-NEXT: [[X_SROA_0:%.*]] = alloca <4 x ptr>, align 32, !dbg [[DBG434:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META429:![0-9]+]], metadata !DIExpression()), !dbg [[DBG434]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META429:![0-9]+]], metadata !DIExpression()), !dbg [[DBG434]]
; DEBUG-NEXT: store i32 [[A:%.*]], ptr [[X_SROA_0]], align 32, !dbg [[DBG435:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META430:![0-9]+]], metadata !DIExpression()), !dbg [[DBG436:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META430:![0-9]+]], metadata !DIExpression()), !dbg [[DBG436:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_4_X_TMP2_SROA_IDX1:%.*]] = getelementptr inbounds i8, ptr [[X_SROA_0]], i64 4, !dbg [[DBG437:![0-9]+]]
; DEBUG-NEXT: store i32 [[B:%.*]], ptr [[X_SROA_0_4_X_TMP2_SROA_IDX1]], align 4, !dbg [[DBG437]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META431:![0-9]+]], metadata !DIExpression()), !dbg [[DBG438:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META431:![0-9]+]], metadata !DIExpression()), !dbg [[DBG438:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_16_X_TMP3_SROA_IDX2:%.*]] = getelementptr inbounds i8, ptr [[X_SROA_0]], i64 16, !dbg [[DBG439:![0-9]+]]
; DEBUG-NEXT: store i64 [[C:%.*]], ptr [[X_SROA_0_16_X_TMP3_SROA_IDX2]], align 16, !dbg [[DBG439]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META432:![0-9]+]], metadata !DIExpression()), !dbg [[DBG440:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META432:![0-9]+]], metadata !DIExpression()), !dbg [[DBG440:![0-9]+]]
; DEBUG-NEXT: [[X_SROA_0_24_X_TMP4_SROA_IDX3:%.*]] = getelementptr inbounds i8, ptr [[X_SROA_0]], i64 24, !dbg [[DBG441:![0-9]+]]
; DEBUG-NEXT: store i64 [[D:%.*]], ptr [[X_SROA_0_24_X_TMP4_SROA_IDX3]], align 8, !dbg [[DBG441]]
; DEBUG-NEXT: [[X_SROA_0_0_X_SROA_0_0_RESULT:%.*]] = load <4 x ptr>, ptr [[X_SROA_0]], align 32, !dbg [[DBG442:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x ptr> [[X_SROA_0_0_X_SROA_0_0_RESULT]], metadata [[META433:![0-9]+]], metadata !DIExpression()), !dbg [[DBG442]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x ptr> [[X_SROA_0_0_X_SROA_0_0_RESULT]], metadata [[META433:![0-9]+]], metadata !DIExpression()), !dbg [[DBG442]]
; DEBUG-NEXT: ret <4 x ptr> [[X_SROA_0_0_X_SROA_0_0_RESULT]], !dbg [[DBG443:![0-9]+]]
;
entry:
@@ -1129,10 +1129,10 @@ define i1 @test18() {
;
; DEBUG-LABEL: @test18(
; DEBUG-NEXT: [[A_SROA_0:%.*]] = alloca <2 x i64>, align 32, !dbg [[DBG449:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META446:![0-9]+]], metadata !DIExpression()), !dbg [[DBG449]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META446:![0-9]+]], metadata !DIExpression()), !dbg [[DBG449]]
; DEBUG-NEXT: store <2 x i64> <i64 0, i64 -1>, ptr [[A_SROA_0]], align 32, !dbg [[DBG450:![0-9]+]]
; DEBUG-NEXT: [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load i1, ptr [[A_SROA_0]], align 32, !dbg [[DBG451:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata i1 [[A_SROA_0_0_A_SROA_0_0_L]], metadata [[META447:![0-9]+]], metadata !DIExpression()), !dbg [[DBG451]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata i1 [[A_SROA_0_0_A_SROA_0_0_L]], metadata [[META447:![0-9]+]], metadata !DIExpression()), !dbg [[DBG451]]
; DEBUG-NEXT: ret i1 [[A_SROA_0_0_A_SROA_0_0_L]], !dbg [[DBG452:![0-9]+]]
;
%a = alloca <8 x i32>
@@ -1149,7 +1149,7 @@ define void @swap-8bytes(ptr %x, ptr %y) {
; CHECK-NEXT: ret void
;
; DEBUG-LABEL: @swap-8bytes(
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META455:![0-9]+]], metadata !DIExpression()), !dbg [[DBG456:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META455:![0-9]+]], metadata !DIExpression()), !dbg [[DBG456:![0-9]+]]
; DEBUG-NEXT: [[TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[X:%.*]], align 1, !dbg [[DBG457:![0-9]+]]
; DEBUG-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr [[X]], ptr [[Y:%.*]], i64 8, i1 false), !dbg [[DBG458:![0-9]+]]
; DEBUG-NEXT: store i64 [[TMP_SROA_0_0_COPYLOAD]], ptr [[Y]], align 1, !dbg [[DBG459:![0-9]+]]
@@ -1172,7 +1172,7 @@ define void @swap-7bytes(ptr %x, ptr %y) {
;
; DEBUG-LABEL: @swap-7bytes(
; DEBUG-NEXT: [[TMP:%.*]] = alloca [7 x i8], align 1, !dbg [[DBG464:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[TMP]], metadata [[META463:![0-9]+]], metadata !DIExpression()), !dbg [[DBG464]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[TMP]], metadata [[META463:![0-9]+]], metadata !DIExpression()), !dbg [[DBG464]]
; DEBUG-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[TMP]], ptr [[X:%.*]], i64 7, i1 false), !dbg [[DBG465:![0-9]+]]
; DEBUG-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr [[X]], ptr [[Y:%.*]], i64 7, i1 false), !dbg [[DBG466:![0-9]+]]
; DEBUG-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[Y]], ptr [[TMP]], i64 7, i1 false), !dbg [[DBG467:![0-9]+]]
@@ -1195,7 +1195,7 @@ define void @swap-16bytes(ptr %x, ptr %y) {
;
; DEBUG-LABEL: @swap-16bytes(
; DEBUG-NEXT: [[TMP:%.*]] = alloca [2 x i64], align 8, !dbg [[DBG472:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[TMP]], metadata [[META471:![0-9]+]], metadata !DIExpression()), !dbg [[DBG472]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[TMP]], metadata [[META471:![0-9]+]], metadata !DIExpression()), !dbg [[DBG472]]
; DEBUG-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[TMP]], ptr [[X:%.*]], i64 16, i1 false), !dbg [[DBG473:![0-9]+]]
; DEBUG-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr [[X]], ptr [[Y:%.*]], i64 16, i1 false), !dbg [[DBG474:![0-9]+]]
; DEBUG-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[Y]], ptr [[TMP]], i64 16, i1 false), !dbg [[DBG475:![0-9]+]]
@@ -1218,7 +1218,7 @@ define void @swap-15bytes(ptr %x, ptr %y) {
;
; DEBUG-LABEL: @swap-15bytes(
; DEBUG-NEXT: [[TMP:%.*]] = alloca [15 x i8], align 1, !dbg [[DBG480:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[TMP]], metadata [[META479:![0-9]+]], metadata !DIExpression()), !dbg [[DBG480]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[TMP]], metadata [[META479:![0-9]+]], metadata !DIExpression()), !dbg [[DBG480]]
; DEBUG-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[TMP]], ptr [[X:%.*]], i64 15, i1 false), !dbg [[DBG481:![0-9]+]]
; DEBUG-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr [[X]], ptr [[Y:%.*]], i64 15, i1 false), !dbg [[DBG482:![0-9]+]]
; DEBUG-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[Y]], ptr [[TMP]], i64 15, i1 false), !dbg [[DBG483:![0-9]+]]
@@ -1245,17 +1245,17 @@ define <4 x i32> @ptrLoadStoreTys(ptr %init, i32 %val2) {
;
; DEBUG-LABEL: @ptrLoadStoreTys(
; DEBUG-NEXT: [[VAL0:%.*]] = load ptr, ptr [[INIT:%.*]], align 8, !dbg [[DBG492:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[VAL0]], metadata [[META487:![0-9]+]], metadata !DIExpression()), !dbg [[DBG492]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META488:![0-9]+]], metadata !DIExpression()), !dbg [[DBG493:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[VAL0]], metadata [[META487:![0-9]+]], metadata !DIExpression()), !dbg [[DBG492]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META488:![0-9]+]], metadata !DIExpression()), !dbg [[DBG493:![0-9]+]]
; DEBUG-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[VAL0]] to i64, !dbg [[DBG494:![0-9]+]]
; DEBUG-NEXT: [[TMP2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>, !dbg [[DBG494]]
; DEBUG-NEXT: [[OBJ_0_VEC_EXPAND:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>, !dbg [[DBG494]]
; DEBUG-NEXT: [[OBJ_0_VECBLEND:%.*]] = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x i32> [[OBJ_0_VEC_EXPAND]], <4 x i32> zeroinitializer, !dbg [[DBG494]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META489:![0-9]+]], metadata !DIExpression()), !dbg [[DBG495:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META489:![0-9]+]], metadata !DIExpression()), !dbg [[DBG495:![0-9]+]]
; DEBUG-NEXT: [[OBJ_8_VEC_INSERT:%.*]] = insertelement <4 x i32> [[OBJ_0_VECBLEND]], i32 [[VAL2:%.*]], i32 2, !dbg [[DBG496:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META490:![0-9]+]], metadata !DIExpression()), !dbg [[DBG497:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META490:![0-9]+]], metadata !DIExpression()), !dbg [[DBG497:![0-9]+]]
; DEBUG-NEXT: [[OBJ_12_VEC_INSERT:%.*]] = insertelement <4 x i32> [[OBJ_8_VEC_INSERT]], i32 131072, i32 3, !dbg [[DBG498:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x i32> [[OBJ_12_VEC_INSERT]], metadata [[META491:![0-9]+]], metadata !DIExpression()), !dbg [[DBG499:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x i32> [[OBJ_12_VEC_INSERT]], metadata [[META491:![0-9]+]], metadata !DIExpression()), !dbg [[DBG499:![0-9]+]]
; DEBUG-NEXT: ret <4 x i32> [[OBJ_12_VEC_INSERT]], !dbg [[DBG500:![0-9]+]]
;
%val0 = load ptr, ptr %init, align 8
@@ -1285,19 +1285,19 @@ define <4 x float> @ptrLoadStoreTysFloat(ptr %init, float %val2) {
;
; DEBUG-LABEL: @ptrLoadStoreTysFloat(
; DEBUG-NEXT: [[VAL0:%.*]] = load ptr, ptr [[INIT:%.*]], align 8, !dbg [[DBG508:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[VAL0]], metadata [[META503:![0-9]+]], metadata !DIExpression()), !dbg [[DBG508]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[VAL0]], metadata [[META503:![0-9]+]], metadata !DIExpression()), !dbg [[DBG508]]
; DEBUG-NEXT: [[OBJ:%.*]] = alloca <4 x float>, align 16, !dbg [[DBG509:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[OBJ]], metadata [[META504:![0-9]+]], metadata !DIExpression()), !dbg [[DBG509]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[OBJ]], metadata [[META504:![0-9]+]], metadata !DIExpression()), !dbg [[DBG509]]
; DEBUG-NEXT: store <4 x float> zeroinitializer, ptr [[OBJ]], align 16, !dbg [[DBG510:![0-9]+]]
; DEBUG-NEXT: store ptr [[VAL0]], ptr [[OBJ]], align 16, !dbg [[DBG511:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META505:![0-9]+]], metadata !DIExpression()), !dbg [[DBG512:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META505:![0-9]+]], metadata !DIExpression()), !dbg [[DBG512:![0-9]+]]
; DEBUG-NEXT: [[OBJ_8_PTR2_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 8, !dbg [[DBG513:![0-9]+]]
; DEBUG-NEXT: store float [[VAL2:%.*]], ptr [[OBJ_8_PTR2_SROA_IDX]], align 8, !dbg [[DBG513]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META506:![0-9]+]], metadata !DIExpression()), !dbg [[DBG514:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META506:![0-9]+]], metadata !DIExpression()), !dbg [[DBG514:![0-9]+]]
; DEBUG-NEXT: [[OBJ_12_PTR3_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 12, !dbg [[DBG515:![0-9]+]]
; DEBUG-NEXT: store float 1.310720e+05, ptr [[OBJ_12_PTR3_SROA_IDX]], align 4, !dbg [[DBG515]]
; DEBUG-NEXT: [[OBJ_0_SROAVAL:%.*]] = load <4 x float>, ptr [[OBJ]], align 16, !dbg [[DBG516:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x float> [[OBJ_0_SROAVAL]], metadata [[META507:![0-9]+]], metadata !DIExpression()), !dbg [[DBG516]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x float> [[OBJ_0_SROAVAL]], metadata [[META507:![0-9]+]], metadata !DIExpression()), !dbg [[DBG516]]
; DEBUG-NEXT: ret <4 x float> [[OBJ_0_SROAVAL]], !dbg [[DBG517:![0-9]+]]
;
%val0 = load ptr, ptr %init, align 8
@@ -1325,17 +1325,17 @@ define <4 x i32> @ptrLoadStoreTysAS3(ptr %init, i32 %val2) {
;
; DEBUG-LABEL: @ptrLoadStoreTysAS3(
; DEBUG-NEXT: [[VAL0:%.*]] = load ptr addrspace(3), ptr [[INIT:%.*]], align 8, !dbg [[DBG525:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr addrspace(3) [[VAL0]], metadata [[META520:![0-9]+]], metadata !DIExpression()), !dbg [[DBG525]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META521:![0-9]+]], metadata !DIExpression()), !dbg [[DBG526:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr addrspace(3) [[VAL0]], metadata [[META520:![0-9]+]], metadata !DIExpression()), !dbg [[DBG525]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META521:![0-9]+]], metadata !DIExpression()), !dbg [[DBG526:![0-9]+]]
; DEBUG-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[VAL0]] to i64, !dbg [[DBG527:![0-9]+]]
; DEBUG-NEXT: [[TMP2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>, !dbg [[DBG527]]
; DEBUG-NEXT: [[OBJ_0_VEC_EXPAND:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>, !dbg [[DBG527]]
; DEBUG-NEXT: [[OBJ_0_VECBLEND:%.*]] = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x i32> [[OBJ_0_VEC_EXPAND]], <4 x i32> zeroinitializer, !dbg [[DBG527]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META522:![0-9]+]], metadata !DIExpression()), !dbg [[DBG528:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META522:![0-9]+]], metadata !DIExpression()), !dbg [[DBG528:![0-9]+]]
; DEBUG-NEXT: [[OBJ_8_VEC_INSERT:%.*]] = insertelement <4 x i32> [[OBJ_0_VECBLEND]], i32 [[VAL2:%.*]], i32 2, !dbg [[DBG529:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META523:![0-9]+]], metadata !DIExpression()), !dbg [[DBG530:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META523:![0-9]+]], metadata !DIExpression()), !dbg [[DBG530:![0-9]+]]
; DEBUG-NEXT: [[OBJ_12_VEC_INSERT:%.*]] = insertelement <4 x i32> [[OBJ_8_VEC_INSERT]], i32 131072, i32 3, !dbg [[DBG531:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x i32> [[OBJ_12_VEC_INSERT]], metadata [[META524:![0-9]+]], metadata !DIExpression()), !dbg [[DBG532:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x i32> [[OBJ_12_VEC_INSERT]], metadata [[META524:![0-9]+]], metadata !DIExpression()), !dbg [[DBG532:![0-9]+]]
; DEBUG-NEXT: ret <4 x i32> [[OBJ_12_VEC_INSERT]], !dbg [[DBG533:![0-9]+]]
;
%val0 = load ptr addrspace(3), ptr %init, align 8
@@ -1365,19 +1365,19 @@ define <4 x ptr> @ptrLoadStoreTysPtr(ptr %init, i64 %val2) {
;
; DEBUG-LABEL: @ptrLoadStoreTysPtr(
; DEBUG-NEXT: [[VAL0:%.*]] = load ptr, ptr [[INIT:%.*]], align 8, !dbg [[DBG541:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[VAL0]], metadata [[META536:![0-9]+]], metadata !DIExpression()), !dbg [[DBG541]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[VAL0]], metadata [[META536:![0-9]+]], metadata !DIExpression()), !dbg [[DBG541]]
; DEBUG-NEXT: [[OBJ:%.*]] = alloca <4 x ptr>, align 16, !dbg [[DBG542:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr [[OBJ]], metadata [[META537:![0-9]+]], metadata !DIExpression()), !dbg [[DBG542]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr [[OBJ]], metadata [[META537:![0-9]+]], metadata !DIExpression()), !dbg [[DBG542]]
; DEBUG-NEXT: store <4 x ptr> zeroinitializer, ptr [[OBJ]], align 16, !dbg [[DBG543:![0-9]+]]
; DEBUG-NEXT: store ptr [[VAL0]], ptr [[OBJ]], align 16, !dbg [[DBG544:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META538:![0-9]+]], metadata !DIExpression()), !dbg [[DBG545:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META538:![0-9]+]], metadata !DIExpression()), !dbg [[DBG545:![0-9]+]]
; DEBUG-NEXT: [[OBJ_8_PTR2_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 8, !dbg [[DBG546:![0-9]+]]
; DEBUG-NEXT: store i64 [[VAL2:%.*]], ptr [[OBJ_8_PTR2_SROA_IDX]], align 8, !dbg [[DBG546]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META539:![0-9]+]], metadata !DIExpression()), !dbg [[DBG547:![0-9]+]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata ptr undef, metadata [[META539:![0-9]+]], metadata !DIExpression()), !dbg [[DBG547:![0-9]+]]
; DEBUG-NEXT: [[OBJ_12_PTR3_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[OBJ]], i64 12, !dbg [[DBG548:![0-9]+]]
; DEBUG-NEXT: store i64 131072, ptr [[OBJ_12_PTR3_SROA_IDX]], align 4, !dbg [[DBG548]]
; DEBUG-NEXT: [[OBJ_0_SROAVAL:%.*]] = load <4 x ptr>, ptr [[OBJ]], align 16, !dbg [[DBG549:![0-9]+]]
-; DEBUG-NEXT: call void @llvm.dbg.value(metadata <4 x ptr> [[OBJ_0_SROAVAL]], metadata [[META540:![0-9]+]], metadata !DIExpression()), !dbg [[DBG549]]
+; DEBUG-NEXT: tail call void @llvm.dbg.value(metadata <4 x ptr> [[OBJ_0_SROAVAL]], metadata [[META540:![0-9]+]], metadata !DIExpression()), !dbg [[DBG549]]
; DEBUG-NEXT: ret <4 x ptr> [[OBJ_0_SROAVAL]], !dbg [[DBG550:![0-9]+]]
;
%val0 = load ptr, ptr %init, align 8
diff --git a/llvm/test/Transforms/TypePromotion/ARM/icmps.ll b/llvm/test/Transforms/TypePromotion/ARM/icmps.ll
index 842aab121b96..fb537a1f6470 100644
--- a/llvm/test/Transforms/TypePromotion/ARM/icmps.ll
+++ b/llvm/test/Transforms/TypePromotion/ARM/icmps.ll
@@ -4,8 +4,9 @@
define i32 @test_ult_254_inc_imm(i8 zeroext %x) {
; CHECK-LABEL: @test_ult_254_inc_imm(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ADD:%.*]] = add i8 [[X:%.*]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[ADD]], -2
+; CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[X:%.*]] to i32
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], -255
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD]], -2
; CHECK-NEXT: [[RES:%.*]] = select i1 [[CMP]], i32 35, i32 47
; CHECK-NEXT: ret i32 [[RES]]
;
@@ -368,7 +369,7 @@ if.end:
define i32 @degenerateicmp() {
; CHECK-LABEL: @degenerateicmp(
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 190, 0
-; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt i32 225, [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt i32 -31, [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 1, i32 0
; CHECK-NEXT: ret i32 [[TMP3]]
;
diff --git a/llvm/test/Transforms/TypePromotion/ARM/wrapping.ll b/llvm/test/Transforms/TypePromotion/ARM/wrapping.ll
index 377708cf7113..78c5e7323cea 100644
--- a/llvm/test/Transforms/TypePromotion/ARM/wrapping.ll
+++ b/llvm/test/Transforms/TypePromotion/ARM/wrapping.ll
@@ -89,8 +89,9 @@ define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
; CHECK-LABEL: @overflow_add_positive_const_limit(
-; CHECK-NEXT: [[ADD:%.*]] = add i8 [[A:%.*]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[ADD]], -128
+; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP1]], -255
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[ADD]], -128
; CHECK-NEXT: [[RES:%.*]] = select i1 [[CMP]], i32 8, i32 16
; CHECK-NEXT: ret i32 [[RES]]
;
@@ -144,8 +145,9 @@ define i32 @safe_add_underflow_neg(i8 zeroext %a) {
define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
; CHECK-LABEL: @overflow_sub_negative_const_limit(
-; CHECK-NEXT: [[SUB:%.*]] = sub i8 [[A:%.*]], -1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[SUB]], -128
+; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[TMP1]], 255
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[SUB]], -128
; CHECK-NEXT: [[RES:%.*]] = select i1 [[CMP]], i32 8, i32 16
; CHECK-NEXT: ret i32 [[RES]]
;
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll
new file mode 100644
index 000000000000..d3d13ae2622e
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll
@@ -0,0 +1,15 @@
+; RUN: opt -S < %s | FileCheck %s
+
+define i32 @foo() {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @bar(i32 0, i32 1)
+; CHECK-NEXT: ret i32 [[RESULT]]
+;
+ %result = call i32 @bar(i32 0, i32 1)
+ ret i32 %result
+}
+
+declare i32 @bar(i32, i32)
+; CHECK-LABEL: @bar(
+; CHECK-SAME: i32
+; CHECK-SAME: i32
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll.expected
new file mode 100644
index 000000000000..e76efaedd172
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll.expected
@@ -0,0 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S < %s | FileCheck %s
+
+define i32 @foo() {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @bar(i32 0, i32 1)
+; CHECK-NEXT: ret i32 [[RESULT]]
+;
+ %result = call i32 @bar(i32 0, i32 1)
+ ret i32 %result
+}
+
+declare i32 @bar(i32, i32)
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/global_remove_same.test b/llvm/test/tools/UpdateTestChecks/update_test_checks/global_remove_same.test
new file mode 100644
index 000000000000..5d447babddea
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/global_remove_same.test
@@ -0,0 +1,4 @@
+## Basic test checking global checks split over multiple lines are removed together
+# RUN: cp -f %S/Inputs/global_remove_same.ll %t.ll && %update_test_checks %t.ll
+# RUN: diff -u %t.ll %S/Inputs/global_remove_same.ll.expected
+
diff --git a/llvm/test/tools/llvm-objcopy/ELF/set-symbol-visibility.test b/llvm/test/tools/llvm-objcopy/ELF/set-symbol-visibility.test
new file mode 100644
index 000000000000..de30ee09bfda
--- /dev/null
+++ b/llvm/test/tools/llvm-objcopy/ELF/set-symbol-visibility.test
@@ -0,0 +1,311 @@
+# RUN: yaml2obj --docnum=1 %s -o %t.o
+# RUN: echo '.*' > %t.symbols.regex
+
+## Check that the visibility of all symbols is properly set to DEFAULT.
+# RUN: llvm-objcopy %t.o %t0.o --set-symbols-visibility=%t.symbols.regex=default --regex
+# RUN: llvm-readelf -s %t0.o | FileCheck %s --check-prefix=DEF
+
+# DEF-DAG: DEFAULT 1 default_local
+# DEF-DAG: DEFAULT 1 internal_local
+# DEF-DAG: DEFAULT 1 hidden_local
+# DEF-DAG: DEFAULT 1 protected_local
+# DEF-DAG: DEFAULT 1 default_global
+# DEF-DAG: DEFAULT 1 default_weak
+# DEF-DAG: DEFAULT 1 internal_global
+# DEF-DAG: DEFAULT 1 internal_weak
+# DEF-DAG: DEFAULT 1 hidden_global
+# DEF-DAG: DEFAULT 1 hidden_weak
+# DEF-DAG: DEFAULT 1 protected_global
+# DEF-DAG: DEFAULT 1 protected_weak
+
+## Check that the visibility of all symbols is properly set to HIDDEN.
+# RUN: llvm-objcopy %t.o %t1.o --set-symbols-visibility=%t.symbols.regex=hidden --regex
+# RUN: llvm-readelf -s %t1.o | FileCheck %s --check-prefix=HID
+
+# HID-DAG: HIDDEN 1 default_local
+# HID-DAG: HIDDEN 1 internal_local
+# HID-DAG: HIDDEN 1 hidden_local
+# HID-DAG: HIDDEN 1 protected_local
+# HID-DAG: HIDDEN 1 default_global
+# HID-DAG: HIDDEN 1 default_weak
+# HID-DAG: HIDDEN 1 internal_global
+# HID-DAG: HIDDEN 1 internal_weak
+# HID-DAG: HIDDEN 1 hidden_global
+# HID-DAG: HIDDEN 1 hidden_weak
+# HID-DAG: HIDDEN 1 protected_global
+# HID-DAG: HIDDEN 1 protected_weak
+
+## Check that the visibility of all symbols is properly set to PROTECTED.
+# RUN: llvm-objcopy %t.o %t2.o --set-symbols-visibility=%t.symbols.regex=protected --regex
+# RUN: llvm-readelf -s %t2.o | FileCheck %s --check-prefix=PRO
+
+# PRO-DAG: PROTECTED 1 default_local
+# PRO-DAG: PROTECTED 1 internal_local
+# PRO-DAG: PROTECTED 1 hidden_local
+# PRO-DAG: PROTECTED 1 protected_local
+# PRO-DAG: PROTECTED 1 default_global
+# PRO-DAG: PROTECTED 1 default_weak
+# PRO-DAG: PROTECTED 1 internal_global
+# PRO-DAG: PROTECTED 1 internal_weak
+# PRO-DAG: PROTECTED 1 hidden_global
+# PRO-DAG: PROTECTED 1 hidden_weak
+# PRO-DAG: PROTECTED 1 protected_global
+# PRO-DAG: PROTECTED 1 protected_weak
+
+## Check that the visibility of all symbols is properly set to INTERNAL.
+# RUN: llvm-objcopy %t.o %t3.o --set-symbols-visibility=%t.symbols.regex=internal --regex
+# RUN: llvm-readelf -s %t3.o | FileCheck %s --check-prefix=INT
+
+# INT-DAG: INTERNAL 1 default_local
+# INT-DAG: INTERNAL 1 internal_local
+# INT-DAG: INTERNAL 1 hidden_local
+# INT-DAG: INTERNAL 1 protected_local
+# INT-DAG: INTERNAL 1 default_global
+# INT-DAG: INTERNAL 1 default_weak
+# INT-DAG: INTERNAL 1 internal_global
+# INT-DAG: INTERNAL 1 internal_weak
+# INT-DAG: INTERNAL 1 hidden_global
+# INT-DAG: INTERNAL 1 hidden_weak
+# INT-DAG: INTERNAL 1 protected_global
+# INT-DAG: INTERNAL 1 protected_weak
+
+## Check that setting the visibility of certain symbols that were read from
+## a file does not affect other symbols.
+# RUN: echo -e "default_local\ninternal_local" > %t.symbol.list
+# RUN: llvm-objcopy %t.o %t4.o --set-symbols-visibility=%t.symbol.list=hidden
+# RUN: llvm-readelf -s %t4.o | FileCheck %s --check-prefix=FILE
+
+# FILE-DAG: HIDDEN 1 default_local
+# FILE-DAG: HIDDEN 1 internal_local
+## Unaffected symbols:
+# FILE-DAG: HIDDEN 1 hidden_local
+# FILE-DAG: PROTECTED 1 protected_local
+# FILE-DAG: DEFAULT 1 default_global
+# FILE-DAG: DEFAULT 1 default_weak
+# FILE-DAG: INTERNAL 1 internal_global
+# FILE-DAG: INTERNAL 1 internal_weak
+# FILE-DAG: HIDDEN 1 hidden_global
+# FILE-DAG: HIDDEN 1 hidden_weak
+# FILE-DAG: PROTECTED 1 protected_global
+# FILE-DAG: PROTECTED 1 protected_weak
+
+## Check that the visibility of a single symbol is set correctly,
+## and that no other symbols are affected.
+# RUN: llvm-objcopy %t.o %t5.o --set-symbol-visibility=default_local=hidden \
+# RUN: --set-symbol-visibility=internal_local=protected \
+# RUN: --set-symbol-visibility=hidden_local=internal \
+# RUN: --set-symbol-visibility=protected_local=default
+# RUN: llvm-readelf -s %t5.o | FileCheck %s --check-prefix=SINGLE
+
+# SINGLE-DAG: HIDDEN 1 default_local
+# SINGLE-DAG: PROTECTED 1 internal_local
+# SINGLE-DAG: INTERNAL 1 hidden_local
+# SINGLE-DAG: DEFAULT 1 protected_local
+## Unaffected symbols:
+# SINGLE-DAG: DEFAULT 1 default_global
+# SINGLE-DAG: DEFAULT 1 default_weak
+# SINGLE-DAG: INTERNAL 1 internal_global
+# SINGLE-DAG: INTERNAL 1 internal_weak
+# SINGLE-DAG: HIDDEN 1 hidden_global
+# SINGLE-DAG: HIDDEN 1 hidden_weak
+# SINGLE-DAG: PROTECTED 1 protected_global
+# SINGLE-DAG: PROTECTED 1 protected_weak
+
+## Check that the visibility of symbols specified by a regex are set correctly,
+## and that no other symbols are affected.
+# RUN: llvm-objcopy %t.o %t6.o --set-symbol-visibility='.*'_local=hidden --regex
+# RUN: llvm-readelf -s %t6.o | FileCheck %s --check-prefix=REGEX
+
+# REGEX-DAG: HIDDEN 1 default_local
+# REGEX-DAG: HIDDEN 1 internal_local
+# REGEX-DAG: HIDDEN 1 hidden_local
+# REGEX-DAG: HIDDEN 1 protected_local
+## Unaffected symbols:
+# REGEX-DAG: DEFAULT 1 default_global
+# REGEX-DAG: DEFAULT 1 default_weak
+# REGEX-DAG: INTERNAL 1 internal_global
+# REGEX-DAG: INTERNAL 1 internal_weak
+# REGEX-DAG: HIDDEN 1 hidden_global
+# REGEX-DAG: HIDDEN 1 hidden_weak
+# REGEX-DAG: PROTECTED 1 protected_global
+# REGEX-DAG: PROTECTED 1 protected_weak
+
+## Check that the visibility of symbols specified by a wildcard are set correctly,
+## and that no other symbols are affected.
+# RUN: llvm-objcopy %t.o %t7.o --set-symbol-visibility='*_local'=hidden --wildcard
+# RUN: llvm-readelf -s %t7.o | FileCheck %s --check-prefix=WILDCARD
+
+# WILDCARD-DAG: HIDDEN 1 default_local
+# WILDCARD-DAG: HIDDEN 1 internal_local
+# WILDCARD-DAG: HIDDEN 1 hidden_local
+# WILDCARD-DAG: HIDDEN 1 protected_local
+## Unaffected symbols:
+# WILDCARD-DAG: DEFAULT 1 default_global
+# WILDCARD-DAG: DEFAULT 1 default_weak
+# WILDCARD-DAG: INTERNAL 1 internal_global
+# WILDCARD-DAG: INTERNAL 1 internal_weak
+# WILDCARD-DAG: HIDDEN 1 hidden_global
+# WILDCARD-DAG: HIDDEN 1 hidden_weak
+# WILDCARD-DAG: PROTECTED 1 protected_global
+# WILDCARD-DAG: PROTECTED 1 protected_weak
+
+## Check that the latest option that matches the same symbols as any of the previous
+## options overwrites the visibility of these symbols.
+# RUN: echo -e '*_weak\n*_local' > %t.symbols.pattern
+# RUN: llvm-objcopy %t.o %t8.o --set-symbol-visibility='default_*'=hidden \
+# RUN: --set-symbol-visibility='internal_*'=hidden \
+# RUN: --set-symbols-visibility=%t.symbols.pattern=protected \
+# RUN: --wildcard
+# RUN: llvm-readelf -s %t8.o | FileCheck %s --check-prefix=REWRITE
+
+# REWRITE-DAG: PROTECTED 1 default_local
+# REWRITE-DAG: HIDDEN 1 default_global
+# REWRITE-DAG: PROTECTED 1 default_weak
+# REWRITE-DAG: PROTECTED 1 internal_local
+# REWRITE-DAG: HIDDEN 1 internal_global
+# REWRITE-DAG: PROTECTED 1 internal_weak
+# REWRITE-DAG: PROTECTED 1 hidden_local
+# REWRITE-DAG: PROTECTED 1 hidden_weak
+# REWRITE-DAG: PROTECTED 1 protected_local
+# REWRITE-DAG: PROTECTED 1 protected_weak
+## Unaffected symbols:
+# REWRITE-DAG: HIDDEN 1 hidden_global
+# REWRITE-DAG: PROTECTED 1 protected_global
+
+## Check that a symbol name with a special charater is treated as a plain name
+## when pattern matching options are not enabled.
+# RUN: yaml2obj --docnum=2 %s -o %t9.o
+# RUN: llvm-objcopy %t9.o --set-symbol-visibility='f*o'=hidden
+# RUN: llvm-readelf -s %t9.o | FileCheck %s --check-prefix=SPECIAL
+
+# SPECIAL-DAG: HIDDEN 1 f*o
+## Unaffected symbol:
+# SPECIAL-DAG: DEFAULT 1 foo
+
+# RUN: yaml2obj --docnum=3 %s -o %t10.o
+
+## Check that the visibility of undefined symbols can be changed as well.
+# RUN: llvm-objcopy %t10.o --set-symbol-visibility=foo=hidden
+# RUN: llvm-readelf -s %t10.o | FileCheck %s --check-prefix=UNDEF
+# UNDEF: HIDDEN UND foo
+
+## Check that passing an invalid visibility type generates an error message.
+# RUN: echo 'foo' > %t.symbols
+# RUN: not llvm-objcopy %t10.o --set-symbols-visibility=%t.symbols=invalid-type 2>&1 | \
+# RUN: FileCheck %s --check-prefix=TYPE
+# RUN: not llvm-objcopy %t10.o --set-symbol-visibility=foo=invalid-type 2>&1 | \
+# RUN: FileCheck %s --check-prefix=TYPE
+# TYPE: error: 'invalid-type' is not a valid symbol visibility
+
+## Check that omitting the '=' character generates an error.
+# RUN: not llvm-objcopy %t10.o --set-symbols-visibility=%t.symbols,hidden 2>&1 | \
+# RUN: FileCheck %s --check-prefix=FORMAT -DOPTION=--set-symbols-visibility
+# RUN: not llvm-objcopy %t10.o --set-symbol-visibility=foo default 2>&1 | \
+# RUN: FileCheck %s --check-prefix=FORMAT -DOPTION=--set-symbol-visibility
+# FORMAT: error: bad format for [[OPTION]]
+
+## Check that using an invalid symbol pattern generates an error.
+# RUN: echo '*.' > %t.symbols.regex
+# RUN: not llvm-objcopy %t10.o --set-symbols-visibility=%t.symbols.regex=hidden --regex 2>&1 | \
+# RUN: FileCheck %s --check-prefix=SYMBOL
+# RUN: not llvm-objcopy %t10.o --set-symbol-visibility='*.'=default --regex 2>&1 | \
+# RUN: FileCheck %s --check-prefix=SYMBOL
+# SYMBOL: error: cannot compile regular expression '*.': repetition-operator operand invalid
+
+## Check passing an invalid filename generates an error.
+# RUN: not llvm-objcopy %t10.o --set-symbols-visibility=no_file=hidden 2>&1 | \
+# RUN: FileCheck %s --check-prefix=NO_FILE -DMSG=%errc_ENOENT
+# NO_FILE: error: 'no_file': [[MSG]]
+
+---
+!ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_X86_64
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+Symbols:
+ - Name: default_local
+ Section: .text
+ Binding: STB_LOCAL
+ - Name: protected_local
+ Section: .text
+ Binding: STB_LOCAL
+ Other: [ STV_PROTECTED ]
+ - Name: internal_local
+ Section: .text
+ Binding: STB_LOCAL
+ Other: [ STV_INTERNAL ]
+ - Name: hidden_local
+ Section: .text
+ Binding: STB_LOCAL
+ Other: [ STV_HIDDEN ]
+ - Name: default_weak
+ Section: .text
+ Binding: STB_WEAK
+ - Name: internal_weak
+ Section: .text
+ Binding: STB_WEAK
+ Other: [ STV_INTERNAL ]
+ - Name: hidden_weak
+ Section: .text
+ Binding: STB_WEAK
+ Other: [ STV_HIDDEN ]
+ - Name: protected_weak
+ Section: .text
+ Binding: STB_WEAK
+ Other: [ STV_PROTECTED ]
+ - Name: default_global
+ Section: .text
+ Binding: STB_GLOBAL
+ - Name: internal_global
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_INTERNAL ]
+ - Name: hidden_global
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
+ - Name: protected_global
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_PROTECTED ]
+ - Name: ignored_name
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_INTERNAL ]
+...
+
+---
+!ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_X86_64
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+Symbols:
+ - Name: f*o
+ Section: .text
+ Binding: STB_LOCAL
+ - Name: foo
+ Section: .text
+ Binding: STB_LOCAL
+...
+
+---
+!ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_X86_64
+Symbols:
+ - Name: foo
+ Binding: STB_LOCAL
+...
diff --git a/llvm/test/tools/llvm-readobj/XCOFF/loader-section-relocation.test b/llvm/test/tools/llvm-readobj/XCOFF/loader-section-relocation.test
index 42eb897d0739..d03d07bd8b0c 100644
--- a/llvm/test/tools/llvm-readobj/XCOFF/loader-section-relocation.test
+++ b/llvm/test/tools/llvm-readobj/XCOFF/loader-section-relocation.test
@@ -2,18 +2,27 @@
# RUN: yaml2obj --docnum=1 %s -o %t_xcoff32.o
# RUN: yaml2obj --docnum=2 %s -o %t_xcoff64.o
-# RUN: llvm-readobj --loader-section-relocations --expand-relocs %t_xcoff32.o | FileCheck --check-prefixes=COMMON,EXPAND %s
-# RUN: llvm-readobj --loader-section-relocations --expand-relocs %t_xcoff64.o | FileCheck --check-prefixes=COMMON,EXPAND %s
-# RUN: llvm-readobj --loader-section-relocations %t_xcoff32.o | FileCheck --check-prefixes=COMMON,NOEXPAND32 %s
-# RUN: llvm-readobj --loader-section-relocations %t_xcoff64.o | FileCheck --check-prefixes=COMMON,NOEXPAND64 %s
+# RUN: llvm-readobj --loader-section-relocations --expand-relocs %t_xcoff32.o | FileCheck --check-prefixes=COMMON,EXPAND,NODEMANEXP %s
+# RUN: llvm-readobj --loader-section-relocations --expand-relocs %t_xcoff64.o | FileCheck --check-prefixes=COMMON,EXPAND,NODEMANEXP %s
+# RUN: llvm-readobj --loader-section-relocations %t_xcoff32.o | FileCheck --check-prefixes=COMMON,NOEXPAND32,NODEMAN32 %s
+# RUN: llvm-readobj --loader-section-relocations %t_xcoff64.o | FileCheck --check-prefixes=COMMON,NOEXPAND64,NODEMAN64 %s
+# RUN: llvm-readobj --loader-section-relocations --expand-relocs --no-demangle %t_xcoff32.o | FileCheck --check-prefixes=COMMON,EXPAND,NODEMANEXP %s
+# RUN: llvm-readobj --loader-section-relocations --expand-relocs --no-demangle %t_xcoff64.o | FileCheck --check-prefixes=COMMON,EXPAND,NODEMANEXP %s
+# RUN: llvm-readobj --loader-section-relocations --no-demangle %t_xcoff32.o | FileCheck --check-prefixes=COMMON,NOEXPAND32,NODEMAN32 %s
+# RUN: llvm-readobj --loader-section-relocations --no-demangle %t_xcoff64.o | FileCheck --check-prefixes=COMMON,NOEXPAND64,NODEMAN64 %s
+
+# RUN: llvm-readobj --loader-section-relocations --expand-relocs --demangle %t_xcoff32.o | FileCheck --check-prefixes=COMMON,EXPAND,DEMANEXP %s
+# RUN: llvm-readobj --loader-section-relocations --expand-relocs --demangle %t_xcoff64.o | FileCheck --check-prefixes=COMMON,EXPAND,DEMANEXP %s
+# RUN: llvm-readobj --loader-section-relocations --demangle %t_xcoff32.o | FileCheck --check-prefixes=COMMON,NOEXPAND32,DEMAN32 %s
+# RUN: llvm-readobj --loader-section-relocations --demangle %t_xcoff64.o | FileCheck --check-prefixes=COMMON,NOEXPAND64,DEMAN64 %s
--- !XCOFF
FileHeader:
MagicNumber: 0x1DF
Sections:
- Name: .loader
Flags: [ STYP_LOADER ]
- SectionData: "0000000100000001000000020000016D00000001000000A400000000000000506d79696e747661722000028000021105000000000000000020000294000000011f0000022000029c000000031f000002"
+ SectionData: "0000000100000001000000020000016D00000001000000A400000000000000505f5a3466756e63762000028000021105000000000000000020000294000000011f0000022000029c000000031f000002"
## ^------- -Version=1
## ^------- -NumberOfSymbolEntries=1
## ^------- -NumberOfRelocationEntries=2
@@ -22,7 +31,7 @@ Sections:
## ^------- -OffsetToImportFileIDs=0xA4
## ^------- -LengthOfStringTable=0
## ^------- -OffsetToStringTable=0
-## ^--------------- SymbolName=myintvar
+## ^--------------- SymbolName=_Z4funcv
## ^------- Value=0x20000280
## ^--- sectionNumber = 2
## ^- SymbolType=0x11
@@ -44,7 +53,7 @@ FileHeader:
Sections:
- Name: .loader
Flags: [ STYP_LOADER ]
- SectionData: "0000000200000001000000020000016D000000010000001200000000000000D000000000000000700000000000000038000000000000005000000001100003000000000200021105000000000000000000000000200002941f00000200000001000000002000029C1f0000020000000300096d79696e747661720000"
+ SectionData: "0000000200000001000000020000016D000000010000001200000000000000D000000000000000700000000000000038000000000000005000000001100003000000000200021105000000000000000000000000200002941f00000200000001000000002000029C1f0000020000000300095f5a3466756e63760000"
## ^------- -Version=2
## ^------- -NumberOfSymbolEntries=1
## ^------- -NumberOfRelocationEntries=2
@@ -76,10 +85,12 @@ Sections:
# COMMON-NEXT: Loader Section Relocations {
# NOEXPAND64-NEXT: Vaddr Type SecNum SymbolName (Index)
# NOEXPAND64-NEXT: 0x0000000020000294 0x1f00 (R_POS) 2 .data (1)
-# NOEXPAND64-NEXT: 0x000000002000029c 0x1f00 (R_POS) 2 myintvar (3)
+# NODEMAN64-NEXT: 0x000000002000029c 0x1f00 (R_POS) 2 _Z4funcv (3)
+# DEMAN64-NEXT: 0x000000002000029c 0x1f00 (R_POS) 2 func() (3)
# NOEXPAND32-NEXT: Vaddr Type SecNum SymbolName (Index)
# NOEXPAND32-NEXT: 0x20000294 0x1f00 (R_POS) 2 .data (1)
-# NOEXPAND32-NEXT: 0x2000029c 0x1f00 (R_POS) 2 myintvar (3)
+# NODEMAN32-NEXT: 0x2000029c 0x1f00 (R_POS) 2 _Z4funcv (3)
+# DEMAN32-NEXT: 0x2000029c 0x1f00 (R_POS) 2 func() (3)
# EXPAND-NEXT: Relocation {
# EXPAND-NEXT: Virtual Address: 0x20000294
# EXPAND-NEXT: Symbol: .data (1)
@@ -91,7 +102,8 @@ Sections:
# EXPAND-NEXT: }
# EXPAND-NEXT: Relocation {
# EXPAND-NEXT: Virtual Address: 0x2000029C
-# EXPAND-NEXT: Symbol: myintvar (3)
+# NODEMANEXP-NEXT: Symbol: _Z4funcv (3)
+# DEMANEXP-NEXT: Symbol: func() (3)
# EXPAND-NEXT: IsSigned: No
# EXPAND-NEXT: FixupBitValue: 0
# EXPAND-NEXT: Length: 32
diff --git a/llvm/test/tools/llvm-readobj/XCOFF/loader-section-symbol.test b/llvm/test/tools/llvm-readobj/XCOFF/loader-section-symbol.test
index 8f2b20ca8149..053c5b496f62 100644
--- a/llvm/test/tools/llvm-readobj/XCOFF/loader-section-symbol.test
+++ b/llvm/test/tools/llvm-readobj/XCOFF/loader-section-symbol.test
@@ -6,10 +6,18 @@
# RUN: llvm-readobj --loader-section-symbols %t_xcoff32.o |\
# RUN: FileCheck %s --check-prefixes=CHECK32
+# RUN: llvm-readobj --loader-section-symbols --no-demangle %t_xcoff32.o |\
+# RUN: FileCheck %s --check-prefixes=CHECK32
+# RUN: llvm-readobj --loader-section-symbols --demangle %t_xcoff32.o |\
+# RUN: FileCheck %s --check-prefixes=CHECK32
# RUN: llvm-readobj --loader-section-symbols %t_xcoff32_invalid.o 2>&1 |\
# RUN: FileCheck -DFILE=%t_xcoff32_invalid.o %s --check-prefixes=CHECK32,WARN
# RUN: llvm-readobj --loader-section-symbols %t_xcoff64.o |\
-# RUN: FileCheck %s --check-prefixes=CHECK64
+# RUN: FileCheck %s --check-prefixes=CHECK64,NODEMAN64
+# RUN: llvm-readobj --loader-section-symbols --no-demangle %t_xcoff64.o |\
+# RUN: FileCheck %s --check-prefixes=CHECK64,NODEMAN64
+# RUN: llvm-readobj --loader-section-symbols --demangle %t_xcoff64.o |\
+# RUN: FileCheck %s --check-prefixes=CHECK64,DEMAN64
--- !XCOFF
FileHeader:
@@ -112,7 +120,8 @@ Sections:
# CHECK64-NEXT: ParameterTypeCheck: 0
# CHECK64-NEXT: }
# CHECK64-NEXT: Symbol {
-# CHECK64-NEXT: Name: _Z5func0v
+# NODEMAN64-NEXT: Name: _Z5func0v
+# DEMAN64-NEXT: Name: func0()
# CHECK64-NEXT: Virtual Address: 0x110000308
# CHECK64-NEXT: SectionNum: 2
# CHECK64-NEXT: SymbolType: 0x11
diff --git a/llvm/test/tools/llvm-readobj/XCOFF/relocations.test b/llvm/test/tools/llvm-readobj/XCOFF/relocations.test
index 9e327c4fbbdc..917db8b05e48 100644
--- a/llvm/test/tools/llvm-readobj/XCOFF/relocations.test
+++ b/llvm/test/tools/llvm-readobj/XCOFF/relocations.test
@@ -2,9 +2,19 @@
# RUN: yaml2obj %s -o %t
# RUN: llvm-readobj --relocs --expand-relocs %t | \
-# RUN: FileCheck %s --strict-whitespace --match-full-lines --check-prefix=RELOCSEXP
+# RUN: FileCheck %s --strict-whitespace --check-prefixes=RELOCSEXP,NODEMANEXP
# RUN: llvm-readobj --relocs %t | \
-# RUN: FileCheck %s --strict-whitespace --match-full-lines --check-prefix=RELOCS
+# RUN: FileCheck %s --strict-whitespace --check-prefixes=RELOCS,NODEMAN
+
+# RUN: llvm-readobj --relocs --expand-relocs --no-demangle %t | \
+# RUN: FileCheck %s --strict-whitespace --check-prefixes=RELOCSEXP,NODEMANEXP
+# RUN: llvm-readobj --relocs --no-demangle %t | \
+# RUN: FileCheck %s --strict-whitespace --check-prefixes=RELOCS,NODEMAN
+
+# RUN: llvm-readobj --relocs --expand-relocs --demangle %t | \
+# RUN: FileCheck %s --strict-whitespace --check-prefixes=RELOCSEXP,DEMANEXP
+# RUN: llvm-readobj --relocs --demangle %t | \
+# RUN: FileCheck %s --strict-whitespace --check-prefixes=RELOCS,DEMAN
# RELOCSEXP:Relocations [
# RELOCSEXP-NEXT: Section (index: 1) .text {
@@ -28,7 +38,8 @@
# RELOCSEXP-NEXT: Section (index: 2) .data {
# RELOCSEXP-NEXT: Relocation {
# RELOCSEXP-NEXT: Virtual Address: 0x200
-# RELOCSEXP-NEXT: Symbol: bar (1)
+# NODEMANEXP-NEXT: Symbol: _Z3fwpv (1)
+# DEMANEXP-NEXT: Symbol: fwp() (1)
# RELOCSEXP-NEXT: IsSigned: No
# RELOCSEXP-NEXT: FixupBitValue: 0
# RELOCSEXP-NEXT: Length: 20
@@ -43,7 +54,8 @@
# RELOCS-NEXT: 0x100 R_REL foo(0) 0x14
# RELOCS-NEXT: }
# RELOCS-NEXT: Section (index: 2) .data {
-# RELOCS-NEXT: 0x200 R_TOC bar(1) 0x13
+# NODEMAN-NEXT: 0x200 R_TOC _Z3fwpv(1) 0x13
+# DEMAN-NEXT: 0x200 R_TOC fwp()(1) 0x13
# RELOCS-NEXT: }
# RELOCS-NEXT:]
@@ -73,6 +85,6 @@ Symbols:
- Name: foo
Value: 0x0
Section: .text
- - Name: bar
+ - Name: _Z3fwpv
Value: 0x80
Section: .data
diff --git a/llvm/test/tools/llvm-readobj/XCOFF/symbols.test b/llvm/test/tools/llvm-readobj/XCOFF/symbols.test
index 72ec8967cc95..89439a3d0f02 100644
--- a/llvm/test/tools/llvm-readobj/XCOFF/symbols.test
+++ b/llvm/test/tools/llvm-readobj/XCOFF/symbols.test
@@ -2,7 +2,9 @@
## 32-bit XCOFF object file.
# RUN: yaml2obj %s -o %t
-# RUN: llvm-readobj --symbols %t | FileCheck --check-prefix=SYMBOL32 %s
+# RUN: llvm-readobj --symbols %t | FileCheck --check-prefixes=SYMBOL32,NODEMANGLE %s
+# RUN: llvm-readobj --symbols --no-demangle %t | FileCheck --check-prefixes=SYMBOL32,NODEMANGLE %s
+# RUN: llvm-readobj --symbols --demangle %t | FileCheck --check-prefixes=SYMBOL32,DEMANGLE %s
--- !XCOFF
FileHeader:
@@ -56,7 +58,7 @@ Symbols:
StabInfoIndex: 5
StabSectNum: 6
## The C_EXT symbol with a CSECT auxiliary entry.
- - Name: .fun1
+ - Name: ._Z5func1i
Value: 0x0
Section: .text
Type: 0x20
@@ -224,7 +226,8 @@ Symbols:
# SYMBOL32-NEXT: }
# SYMBOL32-NEXT: Symbol {
# SYMBOL32-NEXT: Index: 8
-# SYMBOL32-NEXT: Name: .fun1
+# NODEMANGLE-NEXT: Name: ._Z5func1i
+# DEMANGLE-NEXT: Name: .func1(int)
# SYMBOL32-NEXT: Value (RelocatableAddress): 0x0
# SYMBOL32-NEXT: Section: .text
# SYMBOL32-NEXT: Type: 0x20
diff --git a/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp b/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
index 2b438a8b1346..2bfc9705368e 100644
--- a/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
+++ b/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
@@ -286,6 +286,8 @@ static opt<bool> Verify("verify", desc("Verify the DWARF debug info."),
cat(DwarfDumpCategory));
static opt<ErrorDetailLevel> ErrorDetails(
"error-display", init(Unspecified),
+ desc("Set the level of detail and summary to display when verifying "
+ "(implies --verify)"),
values(clEnumValN(NoDetailsOrSummary, "quiet",
"Only display whether errors occurred."),
clEnumValN(NoDetailsOnlySummary, "summary",
@@ -295,6 +297,11 @@ static opt<ErrorDetailLevel> ErrorDetails(
clEnumValN(BothDetailsAndSummary, "full",
"Display each error as well as a summary. [default]")),
cat(DwarfDumpCategory));
+static opt<std::string> JsonErrSummaryFile(
+ "verify-json", init(""),
+ desc("Output JSON-formatted error summary to the specified file. "
+ "(Implies --verify)"),
+ value_desc("filename.json"), cat(DwarfDumpCategory));
static opt<bool> Quiet("quiet", desc("Use with -verify to not emit to STDOUT."),
cat(DwarfDumpCategory));
static opt<bool> DumpUUID("uuid", desc("Show the UUID for each architecture."),
@@ -349,6 +356,7 @@ static DIDumpOptions getDumpOpts(DWARFContext &C) {
ErrorDetails != NoDetailsOrSummary;
DumpOpts.ShowAggregateErrors = ErrorDetails != OnlyDetailsNoSummary &&
ErrorDetails != NoDetailsOnlySummary;
+ DumpOpts.JsonErrSummaryFile = JsonErrSummaryFile;
return DumpOpts.noImplicitRecursion();
}
return DumpOpts;
@@ -834,8 +842,10 @@ int main(int argc, char **argv) {
"-verbose is currently not supported";
return 1;
}
- if (!Verify && ErrorDetails != Unspecified)
- WithColor::warning() << "-error-detail has no affect without -verify";
+ // -error-detail and -json-summary-file both imply -verify
+ if (ErrorDetails != Unspecified || !JsonErrSummaryFile.empty()) {
+ Verify = true;
+ }
std::error_code EC;
ToolOutputFile OutputFile(OutputFilename, EC, sys::fs::OF_TextWithCRLF);
diff --git a/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp b/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
index ec9dc0a2a814..6318578b1100 100644
--- a/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
+++ b/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
@@ -254,6 +254,21 @@ parseSetSectionFlagValue(StringRef FlagValue) {
return SFU;
}
+static Expected<uint8_t> parseVisibilityType(StringRef VisType) {
+ const uint8_t Invalid = 0xff;
+ uint8_t type = StringSwitch<uint8_t>(VisType)
+ .Case("default", ELF::STV_DEFAULT)
+ .Case("hidden", ELF::STV_HIDDEN)
+ .Case("internal", ELF::STV_INTERNAL)
+ .Case("protected", ELF::STV_PROTECTED)
+ .Default(Invalid);
+ if (type == Invalid)
+ return createStringError(errc::invalid_argument,
+ "'%s' is not a valid symbol visibility",
+ VisType.str().c_str());
+ return type;
+}
+
namespace {
struct TargetInfo {
FileFormat Format;
@@ -969,6 +984,33 @@ objcopy::parseObjcopyOptions(ArrayRef<const char *> RawArgsArr,
Config.SymbolsToAdd.push_back(*SymInfo);
}
+ for (auto *Arg : InputArgs.filtered(OBJCOPY_set_symbol_visibility)) {
+ if (!StringRef(Arg->getValue()).contains('='))
+ return createStringError(errc::invalid_argument,
+ "bad format for --set-symbol-visibility");
+ auto [Sym, Visibility] = StringRef(Arg->getValue()).split('=');
+ Expected<uint8_t> Type = parseVisibilityType(Visibility);
+ if (!Type)
+ return Type.takeError();
+ ELFConfig.SymbolsToSetVisibility.emplace_back(NameMatcher(), *Type);
+ if (Error E = ELFConfig.SymbolsToSetVisibility.back().first.addMatcher(
+ NameOrPattern::create(Sym, SymbolMatchStyle, ErrorCallback)))
+ return std::move(E);
+ }
+ for (auto *Arg : InputArgs.filtered(OBJCOPY_set_symbols_visibility)) {
+ if (!StringRef(Arg->getValue()).contains('='))
+ return createStringError(errc::invalid_argument,
+ "bad format for --set-symbols-visibility");
+ auto [File, Visibility] = StringRef(Arg->getValue()).split('=');
+ Expected<uint8_t> Type = parseVisibilityType(Visibility);
+ if (!Type)
+ return Type.takeError();
+ ELFConfig.SymbolsToSetVisibility.emplace_back(NameMatcher(), *Type);
+ if (Error E =
+ addSymbolsFromFile(ELFConfig.SymbolsToSetVisibility.back().first,
+ DC.Alloc, File, SymbolMatchStyle, ErrorCallback))
+ return std::move(E);
+ }
ELFConfig.AllowBrokenLinks = InputArgs.hasArg(OBJCOPY_allow_broken_links);
diff --git a/llvm/tools/llvm-objcopy/ObjcopyOpts.td b/llvm/tools/llvm-objcopy/ObjcopyOpts.td
index 86774c889ab8..3c0e5cd475a3 100644
--- a/llvm/tools/llvm-objcopy/ObjcopyOpts.td
+++ b/llvm/tools/llvm-objcopy/ObjcopyOpts.td
@@ -88,6 +88,17 @@ defm set_section_type
"Set the type of section <section> to the integer <type>">,
MetaVarName<"section=type">;
+defm set_symbol_visibility
+ : Eq<"set-symbol-visibility",
+ "Change the visibility of a symbol to the specified value">,
+ MetaVarName<"symbol=visibility">;
+defm set_symbols_visibility
+ : Eq<"set-symbols-visibility",
+ "Read a list of symbols from <filename> and change their "
+ "visibility to the specified value. Visibility values: default, "
+ "internal, hidden, protected">,
+ MetaVarName<"filename=visibility">;
+
def S : Flag<["-"], "S">,
Alias<strip_all>,
HelpText<"Alias for --strip-all">;
diff --git a/llvm/tools/llvm-profgen/PerfReader.cpp b/llvm/tools/llvm-profgen/PerfReader.cpp
index 313d40483a25..c6fcf7e1196e 100644
--- a/llvm/tools/llvm-profgen/PerfReader.cpp
+++ b/llvm/tools/llvm-profgen/PerfReader.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "PerfReader.h"
#include "ProfileGenerator.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Process.h"
@@ -361,8 +362,11 @@ PerfScriptReader::convertPerfDataToTrace(ProfiledBinary *Binary,
exitWithError("Perf not found.");
}
std::string PerfPath = *PerfExecutable;
- std::string PerfTraceFile = PerfData.str() + ".script.tmp";
- std::string ErrorFile = PerfData.str() + ".script.err.tmp";
+
+ SmallString<128> PerfTraceFile;
+ sys::fs::createUniquePath("perf-script-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%.tmp",
+ PerfTraceFile, /*MakeAbsolute=*/true);
+ std::string ErrorFile = std::string(PerfTraceFile) + ".err";
StringRef ScriptMMapArgs[] = {PerfPath, "script", "--show-mmap-events",
"-F", "comm,pid", "-i",
PerfData};
@@ -400,7 +404,8 @@ PerfScriptReader::convertPerfDataToTrace(ProfiledBinary *Binary,
PIDs, "-i", PerfData};
sys::ExecuteAndWait(PerfPath, ScriptSampleArgs, std::nullopt, Redirects);
- return {PerfTraceFile, PerfFormat::PerfScript, PerfContent::UnknownContent};
+ return {std::string(PerfTraceFile), PerfFormat::PerfScript,
+ PerfContent::UnknownContent};
}
void PerfScriptReader::updateBinaryAddress(const MMapEvent &Event) {
diff --git a/llvm/tools/llvm-readobj/XCOFFDumper.cpp b/llvm/tools/llvm-readobj/XCOFFDumper.cpp
index e7f50e8a1884..46b510cfb06a 100644
--- a/llvm/tools/llvm-readobj/XCOFFDumper.cpp
+++ b/llvm/tools/llvm-readobj/XCOFFDumper.cpp
@@ -12,6 +12,7 @@
#include "ObjDumper.h"
#include "llvm-readobj.h"
+#include "llvm/Demangle/Demangle.h"
#include "llvm/Object/XCOFFObjectFile.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -250,7 +251,8 @@ void XCOFFDumper::printLoaderSectionSymbolsHelper(uintptr_t LoaderSectionAddr) {
}
DictScope DS(W, "Symbol");
- W.printString("Name", SymbolNameOrErr.get());
+ StringRef SymbolName = SymbolNameOrErr.get();
+ W.printString("Name", opts::Demangle ? demangle(SymbolName) : SymbolName);
W.printHex("Virtual Address", LoadSecSymEntPtr->Value);
W.printNumber("SectionNum", LoadSecSymEntPtr->SectionNumber);
W.printHex("SymbolType", LoadSecSymEntPtr->SymbolType);
@@ -326,7 +328,8 @@ void XCOFFDumper::printLoaderSectionRelocationEntry(
uint8_t Info = Type >> 8;
W.printHex("Virtual Address", LoaderSecRelEntPtr->VirtualAddr);
- W.printNumber("Symbol", SymbolName, LoaderSecRelEntPtr->SymbolIndex);
+ W.printNumber("Symbol", opts::Demangle ? demangle(SymbolName) : SymbolName,
+ LoaderSecRelEntPtr->SymbolIndex);
W.printString("IsSigned", IsRelocationSigned(Info) ? "Yes" : "No");
W.printNumber("FixupBitValue", IsFixupIndicated(Info) ? 1 : 0);
W.printNumber("Length", GetRelocatedLength(Info));
@@ -340,8 +343,9 @@ void XCOFFDumper::printLoaderSectionRelocationEntry(
<< XCOFF::getRelocationTypeString(
static_cast<XCOFF::RelocationType>(Type))
<< ")" << format_decimal(LoaderSecRelEntPtr->SectionNum, 8)
- << " " << SymbolName << " ("
- << LoaderSecRelEntPtr->SymbolIndex << ")\n";
+ << " "
+ << (opts::Demangle ? demangle(SymbolName) : SymbolName)
+ << " (" << LoaderSecRelEntPtr->SymbolIndex << ")\n";
}
}
@@ -466,15 +470,17 @@ template <typename RelTy> void XCOFFDumper::printRelocation(RelTy Reloc) {
if (opts::ExpandRelocs) {
DictScope Group(W, "Relocation");
W.printHex("Virtual Address", Reloc.VirtualAddress);
- W.printNumber("Symbol", SymbolName, Reloc.SymbolIndex);
+ W.printNumber("Symbol", opts::Demangle ? demangle(SymbolName) : SymbolName,
+ Reloc.SymbolIndex);
W.printString("IsSigned", Reloc.isRelocationSigned() ? "Yes" : "No");
W.printNumber("FixupBitValue", Reloc.isFixupIndicated() ? 1 : 0);
W.printNumber("Length", Reloc.getRelocatedLength());
W.printEnum("Type", (uint8_t)Reloc.Type, ArrayRef(RelocationTypeNameclass));
} else {
raw_ostream &OS = W.startLine();
- OS << W.hex(Reloc.VirtualAddress) << " " << RelocName << " " << SymbolName
- << "(" << Reloc.SymbolIndex << ") " << W.hex(Reloc.Info) << "\n";
+ OS << W.hex(Reloc.VirtualAddress) << " " << RelocName << " "
+ << (opts::Demangle ? demangle(SymbolName) : SymbolName) << "("
+ << Reloc.SymbolIndex << ") " << W.hex(Reloc.Info) << "\n";
}
}
@@ -752,7 +758,7 @@ void XCOFFDumper::printSymbol(const SymbolRef &S) {
XCOFF::StorageClass SymbolClass = SymbolEntRef.getStorageClass();
W.printNumber("Index", SymbolIdx);
- W.printString("Name", SymbolName);
+ W.printString("Name", opts::Demangle ? demangle(SymbolName) : SymbolName);
W.printHex(GetSymbolValueName(SymbolClass), SymbolEntRef.getValue());
StringRef SectionName =
diff --git a/llvm/tools/obj2yaml/dxcontainer2yaml.cpp b/llvm/tools/obj2yaml/dxcontainer2yaml.cpp
index b58d7cd952af..69d9b9a2f784 100644
--- a/llvm/tools/obj2yaml/dxcontainer2yaml.cpp
+++ b/llvm/tools/obj2yaml/dxcontainer2yaml.cpp
@@ -71,10 +71,10 @@ dumpDXContainer(MemoryBufferRef Source) {
break;
}
case dxbc::PartType::SFI0: {
- std::optional<uint64_t> Flags = Container.getShaderFlags();
+ std::optional<uint64_t> Flags = Container.getShaderFeatureFlags();
// Omit the flags in the YAML if they are missing or zero.
if (Flags && *Flags > 0)
- NewPart.Flags = DXContainerYAML::ShaderFlags(*Flags);
+ NewPart.Flags = DXContainerYAML::ShaderFeatureFlags(*Flags);
break;
}
case dxbc::PartType::HASH: {
diff --git a/llvm/unittests/ADT/APFloatTest.cpp b/llvm/unittests/ADT/APFloatTest.cpp
index baf055e503b7..6e4dda8351a1 100644
--- a/llvm/unittests/ADT/APFloatTest.cpp
+++ b/llvm/unittests/ADT/APFloatTest.cpp
@@ -578,6 +578,11 @@ TEST(APFloatTest, MinNum) {
EXPECT_EQ(1.0, minnum(f2, f1).convertToDouble());
EXPECT_EQ(1.0, minnum(f1, nan).convertToDouble());
EXPECT_EQ(1.0, minnum(nan, f1).convertToDouble());
+
+ APFloat zp(0.0);
+ APFloat zn(-0.0);
+ EXPECT_EQ(-0.0, minnum(zp, zn).convertToDouble());
+ EXPECT_EQ(-0.0, minnum(zn, zp).convertToDouble());
}
TEST(APFloatTest, MaxNum) {
@@ -589,6 +594,11 @@ TEST(APFloatTest, MaxNum) {
EXPECT_EQ(2.0, maxnum(f2, f1).convertToDouble());
EXPECT_EQ(1.0, maxnum(f1, nan).convertToDouble());
EXPECT_EQ(1.0, maxnum(nan, f1).convertToDouble());
+
+ APFloat zp(0.0);
+ APFloat zn(-0.0);
+ EXPECT_EQ(0.0, maxnum(zp, zn).convertToDouble());
+ EXPECT_EQ(0.0, maxnum(zn, zp).convertToDouble());
}
TEST(APFloatTest, Minimum) {
diff --git a/llvm/unittests/CodeGen/AArch64SelectionDAGTest.cpp b/llvm/unittests/CodeGen/AArch64SelectionDAGTest.cpp
index bb8e76a2eeb8..e0772684e3a9 100644
--- a/llvm/unittests/CodeGen/AArch64SelectionDAGTest.cpp
+++ b/llvm/unittests/CodeGen/AArch64SelectionDAGTest.cpp
@@ -6,11 +6,13 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/SourceMgr.h"
@@ -728,4 +730,70 @@ TEST_F(AArch64SelectionDAGTest, ReplaceAllUsesWith) {
EXPECT_EQ(DAG->getPCSections(New.getNode()), MD);
}
+TEST_F(AArch64SelectionDAGTest, computeKnownBits_extload_known01) {
+ SDLoc Loc;
+ auto Int8VT = EVT::getIntegerVT(Context, 8);
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+ auto Int64VT = EVT::getIntegerVT(Context, 64);
+ auto Ptr = DAG->getConstant(0, Loc, Int64VT);
+ auto PtrInfo =
+ MachinePointerInfo::getFixedStack(DAG->getMachineFunction(), 0);
+ AAMDNodes AA;
+ MDBuilder MDHelper(*DAG->getContext());
+ MDNode *Range = MDHelper.createRange(APInt(8, 0), APInt(8, 2));
+ MachineMemOperand *MMO = DAG->getMachineFunction().getMachineMemOperand(
+ PtrInfo, MachineMemOperand::MOLoad, 8, Align(8), AA, Range);
+
+ auto ALoad = DAG->getExtLoad(ISD::EXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ KnownBits Known = DAG->computeKnownBits(ALoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0xfe));
+ EXPECT_EQ(Known.One, APInt(32, 0));
+
+ auto ZLoad = DAG->getExtLoad(ISD::ZEXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ Known = DAG->computeKnownBits(ZLoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0xfffffffe));
+ EXPECT_EQ(Known.One, APInt(32, 0));
+
+ auto SLoad = DAG->getExtLoad(ISD::SEXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ Known = DAG->computeKnownBits(SLoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0xfffffffe));
+ EXPECT_EQ(Known.One, APInt(32, 0));
+}
+
+TEST_F(AArch64SelectionDAGTest, computeKnownBits_extload_knownnegative) {
+ SDLoc Loc;
+ auto Int8VT = EVT::getIntegerVT(Context, 8);
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+ auto Int64VT = EVT::getIntegerVT(Context, 64);
+ auto Ptr = DAG->getConstant(0, Loc, Int64VT);
+ auto PtrInfo =
+ MachinePointerInfo::getFixedStack(DAG->getMachineFunction(), 0);
+ AAMDNodes AA;
+ MDBuilder MDHelper(*DAG->getContext());
+ MDNode *Range = MDHelper.createRange(APInt(8, 0xf0), APInt(8, 0xff));
+ MachineMemOperand *MMO = DAG->getMachineFunction().getMachineMemOperand(
+ PtrInfo, MachineMemOperand::MOLoad, 8, Align(8), AA, Range);
+
+ auto ALoad = DAG->getExtLoad(ISD::EXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ KnownBits Known = DAG->computeKnownBits(ALoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0));
+ EXPECT_EQ(Known.One, APInt(32, 0xf0));
+
+ auto ZLoad = DAG->getExtLoad(ISD::ZEXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ Known = DAG->computeKnownBits(ZLoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0xffffff00));
+ EXPECT_EQ(Known.One, APInt(32, 0x000000f0));
+
+ auto SLoad = DAG->getExtLoad(ISD::SEXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ Known = DAG->computeKnownBits(SLoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0));
+ EXPECT_EQ(Known.One, APInt(32, 0xfffffff0));
+}
+
} // end namespace llvm
diff --git a/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp b/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
index 7426884217a0..1f2b8c1754f6 100644
--- a/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
+++ b/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
@@ -110,12 +110,12 @@ TEST_F(SelectionDAGAddressAnalysisTest, sameFrameObject) {
SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
PtrInfo.getWithOffset(Offset));
- std::optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store)->getMemoryVT().getStoreSize());
+ TypeSize NumBytes = cast<StoreSDNode>(Store)->getMemoryVT().getStoreSize();
bool IsAlias;
bool IsValid = BaseIndexOffset::computeAliasing(
- Store.getNode(), NumBytes, Store.getNode(), NumBytes, *DAG, IsAlias);
+ Store.getNode(), LocationSize::precise(NumBytes), Store.getNode(),
+ LocationSize::precise(NumBytes), *DAG, IsAlias);
EXPECT_TRUE(IsValid);
EXPECT_TRUE(IsAlias);
@@ -134,14 +134,10 @@ TEST_F(SelectionDAGAddressAnalysisTest, sameFrameObjectUnknownSize) {
SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
PtrInfo.getWithOffset(Offset));
- // Maybe unlikely that BaseIndexOffset::computeAliasing is used with the
- // optional NumBytes being unset like in this test, but it would be confusing
- // if that function determined IsAlias=false here.
- std::optional<int64_t> NumBytes;
-
bool IsAlias;
bool IsValid = BaseIndexOffset::computeAliasing(
- Store.getNode(), NumBytes, Store.getNode(), NumBytes, *DAG, IsAlias);
+ Store.getNode(), LocationSize::beforeOrAfterPointer(), Store.getNode(),
+ LocationSize::beforeOrAfterPointer(), *DAG, IsAlias);
EXPECT_FALSE(IsValid);
}
@@ -165,14 +161,13 @@ TEST_F(SelectionDAGAddressAnalysisTest, noAliasingFrameObjects) {
PtrInfo.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index1,
PtrInfo.getWithOffset(Offset1));
- std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
- std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
+ TypeSize NumBytes0 = cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize();
+ TypeSize NumBytes1 = cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize();
bool IsAlias;
bool IsValid = BaseIndexOffset::computeAliasing(
- Store0.getNode(), NumBytes0, Store1.getNode(), NumBytes1, *DAG, IsAlias);
+ Store0.getNode(), LocationSize::precise(NumBytes0), Store1.getNode(),
+ LocationSize::precise(NumBytes1), *DAG, IsAlias);
EXPECT_TRUE(IsValid);
EXPECT_FALSE(IsAlias);
@@ -195,14 +190,13 @@ TEST_F(SelectionDAGAddressAnalysisTest, unknownSizeFrameObjects) {
DAG->getStore(DAG->getEntryNode(), Loc, Value, FIPtr, PtrInfo);
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index1,
MachinePointerInfo(PtrInfo.getAddrSpace()));
- std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
- std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
+ TypeSize NumBytes0 = cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize();
+ TypeSize NumBytes1 = cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize();
bool IsAlias;
bool IsValid = BaseIndexOffset::computeAliasing(
- Store0.getNode(), NumBytes0, Store1.getNode(), NumBytes1, *DAG, IsAlias);
+ Store0.getNode(), LocationSize::precise(NumBytes0), Store1.getNode(),
+ LocationSize::precise(NumBytes1), *DAG, IsAlias);
EXPECT_FALSE(IsValid);
}
@@ -220,20 +214,19 @@ TEST_F(SelectionDAGAddressAnalysisTest, globalWithFrameObject) {
SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
PtrInfo.getWithOffset(Offset));
- std::optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store)->getMemoryVT().getStoreSize());
+ TypeSize NumBytes = cast<StoreSDNode>(Store)->getMemoryVT().getStoreSize();
EVT GTy = DAG->getTargetLoweringInfo().getValueType(DAG->getDataLayout(),
G->getType());
SDValue GValue = DAG->getConstant(0, Loc, GTy);
SDValue GAddr = DAG->getGlobalAddress(G, Loc, GTy);
SDValue GStore = DAG->getStore(DAG->getEntryNode(), Loc, GValue, GAddr,
MachinePointerInfo(G, 0));
- std::optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(GStore)->getMemoryVT().getStoreSize());
+ TypeSize GNumBytes = cast<StoreSDNode>(GStore)->getMemoryVT().getStoreSize();
bool IsAlias;
bool IsValid = BaseIndexOffset::computeAliasing(
- Store.getNode(), NumBytes, GStore.getNode(), GNumBytes, *DAG, IsAlias);
+ Store.getNode(), LocationSize::precise(NumBytes), GStore.getNode(),
+ LocationSize::precise(GNumBytes), *DAG, IsAlias);
EXPECT_TRUE(IsValid);
EXPECT_FALSE(IsAlias);
@@ -248,8 +241,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, globalWithAliasedGlobal) {
SDValue GAddr = DAG->getGlobalAddress(G, Loc, GTy);
SDValue GStore = DAG->getStore(DAG->getEntryNode(), Loc, GValue, GAddr,
MachinePointerInfo(G, 0));
- std::optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(GStore)->getMemoryVT().getStoreSize());
+ TypeSize GNumBytes = cast<StoreSDNode>(GStore)->getMemoryVT().getStoreSize();
SDValue AliasedGValue = DAG->getConstant(1, Loc, GTy);
SDValue AliasedGAddr = DAG->getGlobalAddress(AliasedG, Loc, GTy);
@@ -258,9 +250,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, globalWithAliasedGlobal) {
MachinePointerInfo(AliasedG, 0));
bool IsAlias;
- bool IsValid = BaseIndexOffset::computeAliasing(GStore.getNode(), GNumBytes,
- AliasedGStore.getNode(),
- GNumBytes, *DAG, IsAlias);
+ bool IsValid = BaseIndexOffset::computeAliasing(
+ GStore.getNode(), LocationSize::precise(GNumBytes),
+ AliasedGStore.getNode(), LocationSize::precise(GNumBytes), *DAG, IsAlias);
// With some deeper analysis we could detect if G and AliasedG is aliasing or
// not. But computeAliasing is currently defensive and assumes that a
@@ -290,19 +282,19 @@ TEST_F(SelectionDAGAddressAnalysisTest, fixedSizeFrameObjectsWithinDiff) {
PtrInfo.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
PtrInfo.getWithOffset(Offset1));
- std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
- std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
+ TypeSize NumBytes0 = cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize();
+ TypeSize NumBytes1 = cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize();
bool IsAlias;
bool IsValid = BaseIndexOffset::computeAliasing(
- Store0.getNode(), NumBytes0, Store1.getNode(), NumBytes1, *DAG, IsAlias);
+ Store0.getNode(), LocationSize::precise(NumBytes0), Store1.getNode(),
+ LocationSize::precise(NumBytes1), *DAG, IsAlias);
EXPECT_TRUE(IsValid);
EXPECT_FALSE(IsAlias);
IsValid = BaseIndexOffset::computeAliasing(
- Store1.getNode(), NumBytes1, Store0.getNode(), NumBytes0, *DAG, IsAlias);
+ Store1.getNode(), LocationSize::precise(NumBytes1), Store0.getNode(),
+ LocationSize::precise(NumBytes0), *DAG, IsAlias);
EXPECT_TRUE(IsValid);
EXPECT_FALSE(IsAlias);
}
@@ -331,14 +323,13 @@ TEST_F(SelectionDAGAddressAnalysisTest, fixedSizeFrameObjectsOutOfDiff) {
PtrInfo.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
PtrInfo.getWithOffset(Offset1));
- std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
- std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
+ TypeSize NumBytes0 = cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize();
+ TypeSize NumBytes1 = cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize();
bool IsAlias;
bool IsValid = BaseIndexOffset::computeAliasing(
- Store0.getNode(), NumBytes0, Store1.getNode(), NumBytes1, *DAG, IsAlias);
+ Store0.getNode(), LocationSize::precise(NumBytes0), Store1.getNode(),
+ LocationSize::precise(NumBytes1), *DAG, IsAlias);
EXPECT_TRUE(IsValid);
EXPECT_TRUE(IsAlias);
}
@@ -365,14 +356,13 @@ TEST_F(SelectionDAGAddressAnalysisTest, twoFixedStackObjects) {
PtrInfo0.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
PtrInfo1.getWithOffset(Offset0));
- std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
- std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
- cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
+ TypeSize NumBytes0 = cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize();
+ TypeSize NumBytes1 = cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize();
bool IsAlias;
bool IsValid = BaseIndexOffset::computeAliasing(
- Store0.getNode(), NumBytes0, Store1.getNode(), NumBytes1, *DAG, IsAlias);
+ Store0.getNode(), LocationSize::precise(NumBytes0), Store1.getNode(),
+ LocationSize::precise(NumBytes1), *DAG, IsAlias);
EXPECT_TRUE(IsValid);
EXPECT_FALSE(IsAlias);
}
diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
index d923b25fda9f..fdbe8df783b1 100644
--- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
+++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
@@ -3327,8 +3327,8 @@ TEST_F(OpenMPIRBuilderTest, SingleDirective) {
EXPECT_NE(IPBB->end(), IP.getPoint());
};
- Builder.restoreIP(OMPBuilder.createSingle(
- Builder, BodyGenCB, FiniCB, /*IsNowait*/ false, /*DidIt*/ nullptr));
+ Builder.restoreIP(
+ OMPBuilder.createSingle(Builder, BodyGenCB, FiniCB, /*IsNowait*/ false));
Value *EntryBBTI = EntryBB->getTerminator();
EXPECT_NE(EntryBBTI, nullptr);
EXPECT_TRUE(isa<BranchInst>(EntryBBTI));
@@ -3417,8 +3417,8 @@ TEST_F(OpenMPIRBuilderTest, SingleDirectiveNowait) {
EXPECT_NE(IPBB->end(), IP.getPoint());
};
- Builder.restoreIP(OMPBuilder.createSingle(
- Builder, BodyGenCB, FiniCB, /*IsNowait*/ true, /*DidIt*/ nullptr));
+ Builder.restoreIP(
+ OMPBuilder.createSingle(Builder, BodyGenCB, FiniCB, /*IsNowait*/ true));
Value *EntryBBTI = EntryBB->getTerminator();
EXPECT_NE(EntryBBTI, nullptr);
EXPECT_TRUE(isa<BranchInst>(EntryBBTI));
@@ -3464,6 +3464,151 @@ TEST_F(OpenMPIRBuilderTest, SingleDirectiveNowait) {
EXPECT_EQ(ExitBarrier, nullptr);
}
+// Helper class to check each instruction of a BB.
+class BBInstIter {
+ BasicBlock *BB;
+ BasicBlock::iterator BBI;
+
+public:
+ BBInstIter(BasicBlock *BB) : BB(BB), BBI(BB->begin()) {}
+
+ bool hasNext() const { return BBI != BB->end(); }
+
+ template <typename InstTy> InstTy *next() {
+ if (!hasNext())
+ return nullptr;
+ Instruction *Cur = &*BBI++;
+ if (!isa<InstTy>(Cur))
+ return nullptr;
+ return cast<InstTy>(Cur);
+ }
+};
+
+TEST_F(OpenMPIRBuilderTest, SingleDirectiveCopyPrivate) {
+ using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
+ OpenMPIRBuilder OMPBuilder(*M);
+ OMPBuilder.initialize();
+ F->setName("func");
+ IRBuilder<> Builder(BB);
+
+ OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL});
+
+ AllocaInst *PrivAI = nullptr;
+
+ BasicBlock *EntryBB = nullptr;
+ BasicBlock *ThenBB = nullptr;
+
+ Value *CPVar = Builder.CreateAlloca(F->arg_begin()->getType());
+ Builder.CreateStore(F->arg_begin(), CPVar);
+
+ FunctionType *CopyFuncTy = FunctionType::get(
+ Builder.getVoidTy(), {Builder.getPtrTy(), Builder.getPtrTy()}, false);
+ Function *CopyFunc =
+ Function::Create(CopyFuncTy, Function::PrivateLinkage, "copy_var", *M);
+
+ auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP) {
+ if (AllocaIP.isSet())
+ Builder.restoreIP(AllocaIP);
+ else
+ Builder.SetInsertPoint(&*(F->getEntryBlock().getFirstInsertionPt()));
+ PrivAI = Builder.CreateAlloca(F->arg_begin()->getType());
+ Builder.CreateStore(F->arg_begin(), PrivAI);
+
+ llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
+ llvm::Instruction *CodeGenIPInst = &*CodeGenIP.getPoint();
+ EXPECT_EQ(CodeGenIPBB->getTerminator(), CodeGenIPInst);
+
+ Builder.restoreIP(CodeGenIP);
+
+ // collect some info for checks later
+ ThenBB = Builder.GetInsertBlock();
+ EntryBB = ThenBB->getUniquePredecessor();
+
+ // simple instructions for body
+ Value *PrivLoad =
+ Builder.CreateLoad(PrivAI->getAllocatedType(), PrivAI, "local.use");
+ Builder.CreateICmpNE(F->arg_begin(), PrivLoad);
+ };
+
+ auto FiniCB = [&](InsertPointTy IP) {
+ BasicBlock *IPBB = IP.getBlock();
+ // IP must be before the unconditional branch to ExitBB
+ EXPECT_NE(IPBB->end(), IP.getPoint());
+ };
+
+ Builder.restoreIP(OMPBuilder.createSingle(Builder, BodyGenCB, FiniCB,
+ /*IsNowait*/ false, {CPVar},
+ {CopyFunc}));
+ Value *EntryBBTI = EntryBB->getTerminator();
+ EXPECT_NE(EntryBBTI, nullptr);
+ EXPECT_TRUE(isa<BranchInst>(EntryBBTI));
+ BranchInst *EntryBr = cast<BranchInst>(EntryBB->getTerminator());
+ EXPECT_TRUE(EntryBr->isConditional());
+ EXPECT_EQ(EntryBr->getSuccessor(0), ThenBB);
+ BasicBlock *ExitBB = ThenBB->getUniqueSuccessor();
+ EXPECT_EQ(EntryBr->getSuccessor(1), ExitBB);
+
+ CmpInst *CondInst = cast<CmpInst>(EntryBr->getCondition());
+ EXPECT_TRUE(isa<CallInst>(CondInst->getOperand(0)));
+
+ CallInst *SingleEntryCI = cast<CallInst>(CondInst->getOperand(0));
+ EXPECT_EQ(SingleEntryCI->arg_size(), 2U);
+ EXPECT_EQ(SingleEntryCI->getCalledFunction()->getName(), "__kmpc_single");
+ EXPECT_TRUE(isa<GlobalVariable>(SingleEntryCI->getArgOperand(0)));
+
+ // check ThenBB
+ BBInstIter ThenBBI(ThenBB);
+ // load PrivAI
+ auto *PrivLI = ThenBBI.next<LoadInst>();
+ EXPECT_NE(PrivLI, nullptr);
+ EXPECT_EQ(PrivLI->getPointerOperand(), PrivAI);
+ // icmp
+ EXPECT_TRUE(ThenBBI.next<ICmpInst>());
+ // store 1, DidIt
+ auto *DidItSI = ThenBBI.next<StoreInst>();
+ EXPECT_NE(DidItSI, nullptr);
+ EXPECT_EQ(DidItSI->getValueOperand(),
+ ConstantInt::get(Type::getInt32Ty(Ctx), 1));
+ Value *DidIt = DidItSI->getPointerOperand();
+ // call __kmpc_end_single
+ auto *SingleEndCI = ThenBBI.next<CallInst>();
+ EXPECT_NE(SingleEndCI, nullptr);
+ EXPECT_EQ(SingleEndCI->getCalledFunction()->getName(), "__kmpc_end_single");
+ EXPECT_EQ(SingleEndCI->arg_size(), 2U);
+ EXPECT_TRUE(isa<GlobalVariable>(SingleEndCI->getArgOperand(0)));
+ EXPECT_EQ(SingleEndCI->getArgOperand(1), SingleEntryCI->getArgOperand(1));
+ // br ExitBB
+ auto *ExitBBBI = ThenBBI.next<BranchInst>();
+ EXPECT_NE(ExitBBBI, nullptr);
+ EXPECT_TRUE(ExitBBBI->isUnconditional());
+ EXPECT_EQ(ExitBBBI->getOperand(0), ExitBB);
+ EXPECT_FALSE(ThenBBI.hasNext());
+
+ // check ExitBB
+ BBInstIter ExitBBI(ExitBB);
+ // call __kmpc_global_thread_num
+ auto *ThreadNumCI = ExitBBI.next<CallInst>();
+ EXPECT_NE(ThreadNumCI, nullptr);
+ EXPECT_EQ(ThreadNumCI->getCalledFunction()->getName(),
+ "__kmpc_global_thread_num");
+ // load DidIt
+ auto *DidItLI = ExitBBI.next<LoadInst>();
+ EXPECT_NE(DidItLI, nullptr);
+ EXPECT_EQ(DidItLI->getPointerOperand(), DidIt);
+ // call __kmpc_copyprivate
+ auto *CopyPrivateCI = ExitBBI.next<CallInst>();
+ EXPECT_NE(CopyPrivateCI, nullptr);
+ EXPECT_EQ(CopyPrivateCI->arg_size(), 6U);
+ EXPECT_TRUE(isa<AllocaInst>(CopyPrivateCI->getArgOperand(3)));
+ EXPECT_EQ(CopyPrivateCI->getArgOperand(3), CPVar);
+ EXPECT_TRUE(isa<Function>(CopyPrivateCI->getArgOperand(4)));
+ EXPECT_EQ(CopyPrivateCI->getArgOperand(4), CopyFunc);
+ EXPECT_TRUE(isa<LoadInst>(CopyPrivateCI->getArgOperand(5)));
+ DidItLI = cast<LoadInst>(CopyPrivateCI->getArgOperand(5));
+ EXPECT_EQ(DidItLI->getOperand(0), DidIt);
+ EXPECT_FALSE(ExitBBI.hasNext());
+}
+
TEST_F(OpenMPIRBuilderTest, OMPAtomicReadFlt) {
OpenMPIRBuilder OMPBuilder(*M);
OMPBuilder.initialize();
diff --git a/llvm/unittests/IR/MetadataTest.cpp b/llvm/unittests/IR/MetadataTest.cpp
index 767dd1a59d2b..20d4090135dd 100644
--- a/llvm/unittests/IR/MetadataTest.cpp
+++ b/llvm/unittests/IR/MetadataTest.cpp
@@ -106,7 +106,7 @@ protected:
DIType *getDerivedType() {
return DIDerivedType::getDistinct(
Context, dwarf::DW_TAG_pointer_type, "", nullptr, 0, nullptr,
- getBasicType("basictype"), 1, 2, 0, std::nullopt, DINode::FlagZero);
+ getBasicType("basictype"), 1, 2, 0, std::nullopt, {}, DINode::FlagZero);
}
Constant *getConstant() {
return ConstantInt::get(Type::getInt32Ty(Context), Counter++);
@@ -461,7 +461,7 @@ TEST_F(MDNodeTest, PrintTree) {
auto *StructTy = cast<DICompositeType>(getCompositeType());
DIType *PointerTy = DIDerivedType::getDistinct(
Context, dwarf::DW_TAG_pointer_type, "", nullptr, 0, nullptr, StructTy,
- 1, 2, 0, std::nullopt, DINode::FlagZero);
+ 1, 2, 0, std::nullopt, {}, DINode::FlagZero);
StructTy->replaceElements(MDTuple::get(Context, PointerTy));
auto *Var = DILocalVariable::get(Context, Scope, "foo", File,
@@ -1864,13 +1864,17 @@ TEST_F(DIDerivedTypeTest, get) {
DIType *BaseType = getBasicType("basic");
MDTuple *ExtraData = getTuple();
unsigned DWARFAddressSpace = 8;
+ DIDerivedType::PtrAuthData PtrAuthData(1, false, 1234, true, true);
+ DIDerivedType::PtrAuthData PtrAuthData2(1, false, 1234, true, false);
DINode::DIFlags Flags5 = static_cast<DINode::DIFlags>(5);
DINode::DIFlags Flags4 = static_cast<DINode::DIFlags>(4);
- auto *N =
- DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something", File,
- 1, Scope, BaseType, 2, 3, 4, DWARFAddressSpace, Flags5,
- ExtraData);
+ auto *N = DIDerivedType::get(
+ Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope,
+ BaseType, 2, 3, 4, DWARFAddressSpace, std::nullopt, Flags5, ExtraData);
+ auto *N1 = DIDerivedType::get(Context, dwarf::DW_TAG_LLVM_ptrauth_type, "",
+ File, 1, Scope, N, 2, 3, 4, DWARFAddressSpace,
+ PtrAuthData, Flags5, ExtraData);
EXPECT_EQ(dwarf::DW_TAG_pointer_type, N->getTag());
EXPECT_EQ("something", N->getName());
EXPECT_EQ(File, N->getFile());
@@ -1881,53 +1885,73 @@ TEST_F(DIDerivedTypeTest, get) {
EXPECT_EQ(3u, N->getAlignInBits());
EXPECT_EQ(4u, N->getOffsetInBits());
EXPECT_EQ(DWARFAddressSpace, *N->getDWARFAddressSpace());
+ EXPECT_EQ(std::nullopt, N->getPtrAuthData());
+ EXPECT_EQ(PtrAuthData, N1->getPtrAuthData());
+ EXPECT_NE(PtrAuthData2, N1->getPtrAuthData());
EXPECT_EQ(5u, N->getFlags());
EXPECT_EQ(ExtraData, N->getExtraData());
EXPECT_EQ(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type,
"something", File, 1, Scope, BaseType, 2, 3,
- 4, DWARFAddressSpace, Flags5, ExtraData));
+ 4, DWARFAddressSpace, std::nullopt, Flags5,
+ ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_reference_type,
"something", File, 1, Scope, BaseType, 2, 3,
- 4, DWARFAddressSpace, Flags5, ExtraData));
+ 4, DWARFAddressSpace, std::nullopt, Flags5,
+ ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "else",
- File, 1, Scope, BaseType, 2, 3,
- 4, DWARFAddressSpace, Flags5, ExtraData));
+ File, 1, Scope, BaseType, 2, 3, 4,
+ DWARFAddressSpace, std::nullopt, Flags5,
+ ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type,
"something", getFile(), 1, Scope, BaseType, 2,
- 3, 4, DWARFAddressSpace, Flags5, ExtraData));
+ 3, 4, DWARFAddressSpace, std::nullopt, Flags5,
+ ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type,
"something", File, 2, Scope, BaseType, 2, 3,
- 4, DWARFAddressSpace, Flags5, ExtraData));
+ 4, DWARFAddressSpace, std::nullopt, Flags5,
+ ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type,
"something", File, 1, getSubprogram(),
- BaseType, 2, 3, 4, DWARFAddressSpace, Flags5,
- ExtraData));
+ BaseType, 2, 3, 4, DWARFAddressSpace,
+ std::nullopt, Flags5, ExtraData));
EXPECT_NE(N, DIDerivedType::get(
Context, dwarf::DW_TAG_pointer_type, "something", File, 1,
Scope, getBasicType("basic2"), 2, 3, 4, DWARFAddressSpace,
- Flags5, ExtraData));
+ std::nullopt, Flags5, ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type,
"something", File, 1, Scope, BaseType, 3, 3,
- 4, DWARFAddressSpace, Flags5, ExtraData));
+ 4, DWARFAddressSpace, std::nullopt, Flags5,
+ ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type,
"something", File, 1, Scope, BaseType, 2, 2,
- 4, DWARFAddressSpace, Flags5, ExtraData));
+ 4, DWARFAddressSpace, std::nullopt, Flags5,
+ ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type,
"something", File, 1, Scope, BaseType, 2, 3,
- 5, DWARFAddressSpace, Flags5, ExtraData));
+ 5, DWARFAddressSpace, std::nullopt, Flags5,
+ ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type,
"something", File, 1, Scope, BaseType, 2, 3,
- 4, DWARFAddressSpace + 1, Flags5, ExtraData));
+ 4, DWARFAddressSpace + 1, std::nullopt,
+ Flags5, ExtraData));
+ EXPECT_NE(N1,
+ DIDerivedType::get(Context, dwarf::DW_TAG_LLVM_ptrauth_type, "",
+ File, 1, Scope, N, 2, 3, 4, DWARFAddressSpace,
+ std::nullopt, Flags5, ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type,
"something", File, 1, Scope, BaseType, 2, 3,
- 4, DWARFAddressSpace, Flags4, ExtraData));
+ 4, DWARFAddressSpace, std::nullopt, Flags4,
+ ExtraData));
EXPECT_NE(N, DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type,
"something", File, 1, Scope, BaseType, 2, 3,
- 4, DWARFAddressSpace, Flags5, getTuple()));
+ 4, DWARFAddressSpace, std::nullopt, Flags5,
+ getTuple()));
TempDIDerivedType Temp = N->clone();
EXPECT_EQ(N, MDNode::replaceWithUniqued(std::move(Temp)));
+ TempDIDerivedType Temp1 = N1->clone();
+ EXPECT_EQ(N1, MDNode::replaceWithUniqued(std::move(Temp1)));
}
TEST_F(DIDerivedTypeTest, getWithLargeValues) {
@@ -1937,14 +1961,23 @@ TEST_F(DIDerivedTypeTest, getWithLargeValues) {
MDTuple *ExtraData = getTuple();
DINode::DIFlags Flags = static_cast<DINode::DIFlags>(5);
- auto *N = DIDerivedType::get(
- Context, dwarf::DW_TAG_pointer_type, "something", File, 1, Scope,
- BaseType, UINT64_MAX, UINT32_MAX - 1, UINT64_MAX - 2, UINT32_MAX - 3,
- Flags, ExtraData);
+ auto *N = DIDerivedType::get(Context, dwarf::DW_TAG_pointer_type, "something",
+ File, 1, Scope, BaseType, UINT64_MAX,
+ UINT32_MAX - 1, UINT64_MAX - 2, UINT32_MAX - 3,
+ std::nullopt, Flags, ExtraData);
EXPECT_EQ(UINT64_MAX, N->getSizeInBits());
EXPECT_EQ(UINT32_MAX - 1, N->getAlignInBits());
EXPECT_EQ(UINT64_MAX - 2, N->getOffsetInBits());
EXPECT_EQ(UINT32_MAX - 3, *N->getDWARFAddressSpace());
+
+ auto *N1 = DIDerivedType::get(
+ Context, dwarf::DW_TAG_LLVM_ptrauth_type, "", File, 1, Scope, N,
+ UINT64_MAX, UINT32_MAX - 1, UINT64_MAX - 2, UINT32_MAX - 3,
+ DIDerivedType::PtrAuthData(7, true, 0xffff, true, false), Flags,
+ ExtraData);
+ EXPECT_EQ(7U, *N1->getPtrAuthKey());
+ EXPECT_EQ(true, *N1->isPtrAuthAddressDiscriminated());
+ EXPECT_EQ(0xffffU, *N1->getPtrAuthExtraDiscriminator());
}
typedef MetadataTest DICompositeTypeTest;
@@ -4268,7 +4301,7 @@ TEST_F(MDTupleAllocationTest, Tracking2) {
#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG) && !defined(GTEST_HAS_SEH)
typedef MetadataTest MDTupleAllocationDeathTest;
TEST_F(MDTupleAllocationDeathTest, ResizeRejected) {
- MDTuple *A = MDTuple::get(Context, None);
+ MDTuple *A = MDTuple::get(Context, std::nullopt);
auto *Value1 = getConstantAsMetadata();
EXPECT_DEATH(A->push_back(Value1),
"Resizing is not supported for uniqued nodes");
diff --git a/llvm/unittests/IR/VerifierTest.cpp b/llvm/unittests/IR/VerifierTest.cpp
index 31e3b9dfab4b..b2cd71e6a385 100644
--- a/llvm/unittests/IR/VerifierTest.cpp
+++ b/llvm/unittests/IR/VerifierTest.cpp
@@ -339,5 +339,33 @@ TEST(VerifierTest, SwitchInst) {
EXPECT_TRUE(verifyFunction(*F));
}
+TEST(VerifierTest, CrossFunctionRef) {
+ LLVMContext C;
+ Module M("M", C);
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), /*isVarArg=*/false);
+ Function *F1 = Function::Create(FTy, Function::ExternalLinkage, "foo1", M);
+ Function *F2 = Function::Create(FTy, Function::ExternalLinkage, "foo2", M);
+ BasicBlock *Entry1 = BasicBlock::Create(C, "entry", F1);
+ BasicBlock *Entry2 = BasicBlock::Create(C, "entry", F2);
+ Type *I32 = Type::getInt32Ty(C);
+
+ Value *Alloca = new AllocaInst(I32, 0, "alloca", Entry1);
+ ReturnInst::Create(C, Entry1);
+
+ Instruction *Store = new StoreInst(ConstantInt::get(I32, 0), Alloca, Entry2);
+ ReturnInst::Create(C, Entry2);
+
+ std::string Error;
+ raw_string_ostream ErrorOS(Error);
+ EXPECT_TRUE(verifyModule(M, &ErrorOS));
+ EXPECT_TRUE(
+ StringRef(ErrorOS.str())
+ .starts_with("Referring to an instruction in another function!"));
+
+ // Explicitly erase the store to avoid a use-after-free when the module is
+ // destroyed.
+ Store->eraseFromParent();
+}
+
} // end anonymous namespace
} // end namespace llvm
diff --git a/llvm/unittests/Support/ErrorTest.cpp b/llvm/unittests/Support/ErrorTest.cpp
index 11f93203597b..1229282cf0de 100644
--- a/llvm/unittests/Support/ErrorTest.cpp
+++ b/llvm/unittests/Support/ErrorTest.cpp
@@ -1132,4 +1132,30 @@ TEST(Error, moveInto) {
}
}
+TEST(Error, FatalBadAllocErrorHandlersInteraction) {
+ auto ErrorHandler = [](void *Data, const char *, bool) {};
+ install_fatal_error_handler(ErrorHandler, nullptr);
+ // The following call should not crash; previously, a bug in
+ // install_bad_alloc_error_handler asserted that no fatal-error handler is
+ // installed already.
+ install_bad_alloc_error_handler(ErrorHandler, nullptr);
+
+ // Don't interfere with other tests.
+ remove_fatal_error_handler();
+ remove_bad_alloc_error_handler();
+}
+
+TEST(Error, BadAllocFatalErrorHandlersInteraction) {
+ auto ErrorHandler = [](void *Data, const char *, bool) {};
+ install_bad_alloc_error_handler(ErrorHandler, nullptr);
+ // The following call should not crash; related to
+ // FatalBadAllocErrorHandlersInteraction: Ensure that the error does not occur
+ // in the other direction.
+ install_fatal_error_handler(ErrorHandler, nullptr);
+
+ // Don't interfere with other tests.
+ remove_fatal_error_handler();
+ remove_bad_alloc_error_handler();
+}
+
} // namespace
diff --git a/llvm/unittests/Support/KnownBitsTest.cpp b/llvm/unittests/Support/KnownBitsTest.cpp
index c0377d45c303..fb9210bffcb4 100644
--- a/llvm/unittests/Support/KnownBitsTest.cpp
+++ b/llvm/unittests/Support/KnownBitsTest.cpp
@@ -244,6 +244,32 @@ TEST(KnownBitsTest, SubBorrowExhaustive) {
});
}
+TEST(KnownBitsTest, AbsDiffSpecialCase) {
+ // There are 2 implementation of absdiff - both are currently needed to cover
+ // extra cases.
+ KnownBits LHS, RHS, Res;
+
+ // absdiff(LHS,RHS) = sub(umax(LHS,RHS), umin(LHS,RHS)).
+ // Actual: false (Inputs = 1011, 101?, Computed = 000?, Exact = 000?)
+ LHS.One = APInt(4, 0b1011);
+ RHS.One = APInt(4, 0b1010);
+ LHS.Zero = APInt(4, 0b0100);
+ RHS.Zero = APInt(4, 0b0100);
+ Res = KnownBits::absdiff(LHS, RHS);
+ EXPECT_EQ(0b0000ul, Res.One.getZExtValue());
+ EXPECT_EQ(0b1110ul, Res.Zero.getZExtValue());
+
+ // find the common bits between sub(LHS,RHS) and sub(RHS,LHS).
+ // Actual: false (Inputs = ???1, 1000, Computed = ???1, Exact = 0??1)
+ LHS.One = APInt(4, 0b0001);
+ RHS.One = APInt(4, 0b1000);
+ LHS.Zero = APInt(4, 0b0000);
+ RHS.Zero = APInt(4, 0b0111);
+ Res = KnownBits::absdiff(LHS, RHS);
+ EXPECT_EQ(0b0001ul, Res.One.getZExtValue());
+ EXPECT_EQ(0b0000ul, Res.Zero.getZExtValue());
+}
+
TEST(KnownBitsTest, BinaryExhaustive) {
testBinaryOpExhaustive(
[](const KnownBits &Known1, const KnownBits &Known2) {
@@ -281,7 +307,14 @@ TEST(KnownBitsTest, BinaryExhaustive) {
return KnownBits::smin(Known1, Known2);
},
[](const APInt &N1, const APInt &N2) { return APIntOps::smin(N1, N2); });
-
+ testBinaryOpExhaustive(
+ [](const KnownBits &Known1, const KnownBits &Known2) {
+ return KnownBits::absdiff(Known1, Known2);
+ },
+ [](const APInt &N1, const APInt &N2) {
+ return APIntOps::absdiff(N1, N2);
+ },
+ checkCorrectnessOnlyBinary);
testBinaryOpExhaustive(
[](const KnownBits &Known1, const KnownBits &Known2) {
return KnownBits::udiv(Known1, Known2);
diff --git a/llvm/unittests/Support/RISCVISAInfoTest.cpp b/llvm/unittests/Support/RISCVISAInfoTest.cpp
index df4c7f7de8a3..82cce23638d5 100644
--- a/llvm/unittests/Support/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/Support/RISCVISAInfoTest.cpp
@@ -752,6 +752,7 @@ R"(All available -march extensions for RISC-V
zmmul 1.0
za128rs 1.0
za64rs 1.0
+ zacas 1.0
zawrs 1.0
zfa 1.0
zfh 1.0
@@ -873,7 +874,6 @@ Experimental extensions
zimop 0.1
zaamo 0.2
zabha 1.0
- zacas 1.0
zalasr 0.1
zalrsc 0.2
zfbfmin 1.0
diff --git a/llvm/unittests/Support/TypeSizeTest.cpp b/llvm/unittests/Support/TypeSizeTest.cpp
index 34fe376989e7..b02b7e600953 100644
--- a/llvm/unittests/Support/TypeSizeTest.cpp
+++ b/llvm/unittests/Support/TypeSizeTest.cpp
@@ -81,7 +81,6 @@ static_assert(INT64_C(2) * TSFixed32 == TypeSize::getFixed(64));
static_assert(UINT64_C(2) * TSFixed32 == TypeSize::getFixed(64));
static_assert(alignTo(TypeSize::getFixed(7), 8) == TypeSize::getFixed(8));
-static_assert(TypeSize() == TypeSize::getFixed(0));
static_assert(TypeSize::getZero() == TypeSize::getFixed(0));
static_assert(TypeSize::getZero() != TypeSize::getScalable(0));
static_assert(TypeSize::getFixed(0) != TypeSize::getScalable(0));
diff --git a/llvm/unittests/TargetParser/TargetParserTest.cpp b/llvm/unittests/TargetParser/TargetParserTest.cpp
index e89fc687451c..297100441113 100644
--- a/llvm/unittests/TargetParser/TargetParserTest.cpp
+++ b/llvm/unittests/TargetParser/TargetParserTest.cpp
@@ -439,7 +439,7 @@ INSTANTIATE_TEST_SUITE_P(
ARM::AEK_HWDIVARM | ARM::AEK_MP | ARM::AEK_SEC |
ARM::AEK_VIRT | ARM::AEK_DSP | ARM::AEK_BF16 |
ARM::AEK_DOTPROD | ARM::AEK_RAS | ARM::AEK_I8MM |
- ARM::AEK_SB,
+ ARM::AEK_FP16FML | ARM::AEK_SB,
"9-A"),
ARMCPUTestParams<uint64_t>("neoverse-v1", "armv8.4-a", "crypto-neon-fp-armv8",
ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT |
@@ -1575,8 +1575,9 @@ INSTANTIATE_TEST_SUITE_P(
AArch64::AEK_SB, AArch64::AEK_SVE2,
AArch64::AEK_SVE2BITPERM, AArch64::AEK_BF16,
AArch64::AEK_I8MM, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA, AArch64::AEK_PAUTH})),
- "8.5-A"),
+ AArch64::AEK_FCMA, AArch64::AEK_PAUTH,
+ AArch64::AEK_FP16FML})),
+ "9-A"),
ARMCPUTestParams<AArch64::ExtensionBitset>(
"ampere1", "armv8.6-a", "crypto-neon-fp-armv8",
(AArch64::ExtensionBitset(
diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index b5bd9bfd21be..febd96086df2 100644
--- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -1976,7 +1976,8 @@ emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
<< "convertToMCInst(unsigned Kind, MCInst &Inst, "
<< "unsigned Opcode,\n"
<< " const OperandVector &Operands,\n"
- << " const SmallBitVector &OptionalOperandsMask) {\n";
+ << " const SmallBitVector &OptionalOperandsMask,\n"
+ << " ArrayRef<unsigned> DefaultsOffset) {\n";
} else {
CvtOS << "void " << Target.getName() << ClassName << "::\n"
<< "convertToMCInst(unsigned Kind, MCInst &Inst, "
@@ -1985,25 +1986,13 @@ emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
}
CvtOS << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n";
CvtOS << " const uint8_t *Converter = ConversionTable[Kind];\n";
- if (HasOptionalOperands) {
- size_t MaxNumOperands = 0;
- for (const auto &MI : Infos) {
- MaxNumOperands = std::max(MaxNumOperands, MI->AsmOperands.size());
- }
- CvtOS << " unsigned DefaultsOffset[" << (MaxNumOperands + 1)
- << "] = { 0 };\n";
- CvtOS << " assert(OptionalOperandsMask.size() == " << (MaxNumOperands)
- << ");\n";
- CvtOS << " for (unsigned i = 0, NumDefaults = 0; i < " << (MaxNumOperands)
- << "; ++i) {\n";
- CvtOS << " DefaultsOffset[i + 1] = NumDefaults;\n";
- CvtOS << " NumDefaults += (OptionalOperandsMask[i] ? 1 : 0);\n";
- CvtOS << " }\n";
- }
CvtOS << " unsigned OpIdx;\n";
CvtOS << " Inst.setOpcode(Opcode);\n";
CvtOS << " for (const uint8_t *p = Converter; *p; p += 2) {\n";
if (HasOptionalOperands) {
+ // When optional operands are involved, formal and actual operand indices
+ // may differ. Map the former to the latter by subtracting the number of
+ // absent optional operands.
CvtOS << " OpIdx = *(p + 1) - DefaultsOffset[*(p + 1)];\n";
} else {
CvtOS << " OpIdx = *(p + 1);\n";
@@ -3031,15 +3020,17 @@ emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
}
static void emitAsmTiedOperandConstraints(CodeGenTarget &Target,
- AsmMatcherInfo &Info,
- raw_ostream &OS) {
+ AsmMatcherInfo &Info, raw_ostream &OS,
+ bool HasOptionalOperands) {
std::string AsmParserName =
std::string(Info.AsmParser->getValueAsString("AsmParserClassName"));
OS << "static bool ";
OS << "checkAsmTiedOperandConstraints(const " << Target.getName()
<< AsmParserName << "&AsmParser,\n";
- OS << " unsigned Kind,\n";
- OS << " const OperandVector &Operands,\n";
+ OS << " unsigned Kind, const OperandVector "
+ "&Operands,\n";
+ if (HasOptionalOperands)
+ OS << " ArrayRef<unsigned> DefaultsOffset,\n";
OS << " uint64_t &ErrorInfo) {\n";
OS << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n";
OS << " const uint8_t *Converter = ConversionTable[Kind];\n";
@@ -3052,6 +3043,13 @@ static void emitAsmTiedOperandConstraints(CodeGenTarget &Target,
OS << " \"Tied operand not found\");\n";
OS << " unsigned OpndNum1 = TiedAsmOperandTable[OpIdx][1];\n";
OS << " unsigned OpndNum2 = TiedAsmOperandTable[OpIdx][2];\n";
+ if (HasOptionalOperands) {
+ // When optional operands are involved, formal and actual operand indices
+ // may differ. Map the former to the latter by subtracting the number of
+ // absent optional operands.
+ OS << " OpndNum1 = OpndNum1 - DefaultsOffset[OpndNum1];\n";
+ OS << " OpndNum2 = OpndNum2 - DefaultsOffset[OpndNum2];\n";
+ }
OS << " if (OpndNum1 != OpndNum2) {\n";
OS << " auto &SrcOp1 = Operands[OpndNum1];\n";
OS << " auto &SrcOp2 = Operands[OpndNum2];\n";
@@ -3291,7 +3289,8 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
<< "unsigned Opcode,\n"
<< " const OperandVector &Operands,\n"
<< " const SmallBitVector "
- "&OptionalOperandsMask);\n";
+ "&OptionalOperandsMask,\n"
+ << " ArrayRef<unsigned> DefaultsOffset);\n";
} else {
OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
<< "unsigned Opcode,\n"
@@ -3405,7 +3404,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
Info.SubtargetFeatures, OS);
if (!ReportMultipleNearMisses)
- emitAsmTiedOperandConstraints(Target, Info, OS);
+ emitAsmTiedOperandConstraints(Target, Info, OS, HasOptionalOperands);
StringToOffsetTable StringTable;
@@ -3928,11 +3927,39 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " }\n\n";
}
+ // When converting parsed operands to MCInst we need to know whether optional
+ // operands were parsed or not so that we can choose the correct converter
+ // function. We also need to know this when checking tied operand constraints.
+ // DefaultsOffset is an array of deltas between the formal (MCInst) and the
+ // actual (parsed operand array) operand indices. When all optional operands
+ // are present, all elements of the array are zeros. If some of the optional
+ // operands are absent, the array might look like '0, 0, 1, 1, 1, 2, 2, 3',
+ // where each increment in value reflects the absence of an optional operand.
+ if (HasOptionalOperands) {
+ OS << " unsigned DefaultsOffset[" << (MaxNumOperands + 1)
+ << "] = { 0 };\n";
+ OS << " assert(OptionalOperandsMask.size() == " << (MaxNumOperands)
+ << ");\n";
+ OS << " for (unsigned i = 0, NumDefaults = 0; i < " << (MaxNumOperands)
+ << "; ++i) {\n";
+ OS << " DefaultsOffset[i + 1] = NumDefaults;\n";
+ OS << " NumDefaults += (OptionalOperandsMask[i] ? 1 : 0);\n";
+ OS << " }\n\n";
+ }
+
OS << " if (matchingInlineAsm) {\n";
OS << " convertToMapAndConstraints(it->ConvertFn, Operands);\n";
if (!ReportMultipleNearMisses) {
- OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, "
- "Operands, ErrorInfo))\n";
+ if (HasOptionalOperands) {
+ OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, "
+ "Operands,\n";
+ OS << " DefaultsOffset, "
+ "ErrorInfo))\n";
+ } else {
+ OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, "
+ "Operands,\n";
+ OS << " ErrorInfo))\n";
+ }
OS << " return Match_InvalidTiedOperand;\n";
OS << "\n";
}
@@ -3942,7 +3969,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
<< " // operands into the appropriate MCInst.\n";
if (HasOptionalOperands) {
OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands,\n"
- << " OptionalOperandsMask);\n";
+ << " OptionalOperandsMask, DefaultsOffset);\n";
} else {
OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
}
@@ -4022,8 +4049,16 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
}
if (!ReportMultipleNearMisses) {
- OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, "
- "Operands, ErrorInfo))\n";
+ if (HasOptionalOperands) {
+ OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, "
+ "Operands,\n";
+ OS << " DefaultsOffset, "
+ "ErrorInfo))\n";
+ } else {
+ OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, "
+ "Operands,\n";
+ OS << " ErrorInfo))\n";
+ }
OS << " return Match_InvalidTiedOperand;\n";
OS << "\n";
}
diff --git a/llvm/utils/TableGen/CodeEmitterGen.cpp b/llvm/utils/TableGen/CodeEmitterGen.cpp
index d80761d5fe35..1e80eb6b1ad5 100644
--- a/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -365,8 +365,8 @@ void CodeEmitterGen::emitInstructionBaseValues(
if (HwMode == -1)
o << " static const uint64_t InstBits[] = {\n";
else
- o << " static const uint64_t InstBits_" << HWM.getMode(HwMode).Name
- << "[] = {\n";
+ o << " static const uint64_t InstBits_"
+ << HWM.getModeName(HwMode, /*IncludeDefault=*/true) << "[] = {\n";
for (const CodeGenInstruction *CGI : NumberedInstructions) {
Record *R = CGI->TheDef;
@@ -495,8 +495,8 @@ void CodeEmitterGen::run(raw_ostream &o) {
o << " switch (HwMode) {\n";
o << " default: llvm_unreachable(\"Unknown hardware mode!\"); break;\n";
for (unsigned I : HwModes) {
- o << " case " << I << ": InstBits = InstBits_" << HWM.getMode(I).Name
- << "; break;\n";
+ o << " case " << I << ": InstBits = InstBits_"
+ << HWM.getModeName(I, /*IncludeDefault=*/true) << "; break;\n";
}
o << " };\n";
}
diff --git a/llvm/utils/TableGen/CodeGenHwModes.h b/llvm/utils/TableGen/CodeGenHwModes.h
index 56639f741ede..23723b7bd4af 100644
--- a/llvm/utils/TableGen/CodeGenHwModes.h
+++ b/llvm/utils/TableGen/CodeGenHwModes.h
@@ -52,6 +52,11 @@ struct CodeGenHwModes {
assert(Id != 0 && "Mode id of 0 is reserved for the default mode");
return Modes[Id - 1];
}
+ StringRef getModeName(unsigned Id, bool IncludeDefault = false) const {
+ if (IncludeDefault && Id == CodeGenHwModes::DefaultMode)
+ return DefaultModeName;
+ return getMode(Id).Name;
+ }
const HwModeSelect &getHwModeSelect(Record *R) const;
const std::map<Record *, HwModeSelect> &getHwModeSelects() const {
return ModeSelects;
diff --git a/llvm/utils/TableGen/CodeGenTarget.cpp b/llvm/utils/TableGen/CodeGenTarget.cpp
index f26815c2f184..980c9bdb6367 100644
--- a/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -91,6 +91,7 @@ StringRef llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::isVoid: return "MVT::isVoid";
case MVT::v1i1: return "MVT::v1i1";
case MVT::v2i1: return "MVT::v2i1";
+ case MVT::v3i1: return "MVT::v3i1";
case MVT::v4i1: return "MVT::v4i1";
case MVT::v8i1: return "MVT::v8i1";
case MVT::v16i1: return "MVT::v16i1";
@@ -107,6 +108,7 @@ StringRef llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::v128i4: return "MVT::v128i4";
case MVT::v1i8: return "MVT::v1i8";
case MVT::v2i8: return "MVT::v2i8";
+ case MVT::v3i8: return "MVT::v3i8";
case MVT::v4i8: return "MVT::v4i8";
case MVT::v8i8: return "MVT::v8i8";
case MVT::v16i8: return "MVT::v16i8";
diff --git a/llvm/utils/TableGen/DXILEmitter.cpp b/llvm/utils/TableGen/DXILEmitter.cpp
index d47df597d53a..fc958f532873 100644
--- a/llvm/utils/TableGen/DXILEmitter.cpp
+++ b/llvm/utils/TableGen/DXILEmitter.cpp
@@ -11,11 +11,14 @@
//
//===----------------------------------------------------------------------===//
+#include "CodeGenTarget.h"
#include "SequenceToOffsetTable.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/Support/DXILABI.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
@@ -30,28 +33,15 @@ struct DXILShaderModel {
int Minor = 0;
};
-struct DXILParameter {
- int Pos; // position in parameter list
- ParameterKind Kind;
- StringRef Name; // short, unique name
- StringRef Doc; // the documentation description of this parameter
- bool IsConst; // whether this argument requires a constant value in the IR
- StringRef EnumName; // the name of the enum type if applicable
- int MaxValue; // the maximum value for this parameter if applicable
- DXILParameter(const Record *R);
-};
-
struct DXILOperationDesc {
- StringRef OpName; // name of DXIL operation
+ std::string OpName; // name of DXIL operation
int OpCode; // ID of DXIL operation
StringRef OpClass; // name of the opcode class
- StringRef Category; // classification for this instruction
StringRef Doc; // the documentation description of this instruction
-
- SmallVector<DXILParameter> Params; // the operands that this instruction takes
- SmallVector<ParameterKind> OverloadTypes; // overload types if applicable
- StringRef Attr; // operation attribute; reference to string representation
- // of llvm::Attribute::AttrKind
+ SmallVector<MVT::SimpleValueType> OpTypes; // Vector of operand types -
+ // return type is at index 0
+ SmallVector<std::string>
+ OpAttributes; // operation attribute represented as strings
StringRef Intrinsic; // The llvm intrinsic map to OpName. Default is "" which
// means no map exists
bool IsDeriv = false; // whether this is some kind of derivative
@@ -74,81 +64,99 @@ struct DXILOperationDesc {
};
} // end anonymous namespace
-/*!
- Convert DXIL type name string to dxil::ParameterKind
-
- @param typeNameStr Type name string
- @return ParameterKind As defined in llvm/Support/DXILABI.h
-*/
-static ParameterKind lookupParameterKind(StringRef typeNameStr) {
- auto paramKind = StringSwitch<ParameterKind>(typeNameStr)
- .Case("llvm_void_ty", ParameterKind::VOID)
- .Case("llvm_half_ty", ParameterKind::HALF)
- .Case("llvm_float_ty", ParameterKind::FLOAT)
- .Case("llvm_double_ty", ParameterKind::DOUBLE)
- .Case("llvm_i1_ty", ParameterKind::I1)
- .Case("llvm_i8_ty", ParameterKind::I8)
- .Case("llvm_i16_ty", ParameterKind::I16)
- .Case("llvm_i32_ty", ParameterKind::I32)
- .Case("llvm_i64_ty", ParameterKind::I64)
- .Case("llvm_anyfloat_ty", ParameterKind::OVERLOAD)
- .Case("llvm_anyint_ty", ParameterKind::OVERLOAD)
- .Case("dxil_handle_ty", ParameterKind::DXIL_HANDLE)
- .Case("dxil_cbuffer_ty", ParameterKind::CBUFFER_RET)
- .Case("dxil_resource_ty", ParameterKind::RESOURCE_RET)
- .Default(ParameterKind::INVALID);
- assert(paramKind != ParameterKind::INVALID &&
- "Unsupported DXIL Type specified");
- return paramKind;
+/// Convert DXIL type name string to dxil::ParameterKind
+///
+/// \param VT Simple Value Type
+/// \return ParameterKind As defined in llvm/Support/DXILABI.h
+
+static ParameterKind getParameterKind(MVT::SimpleValueType VT) {
+ switch (VT) {
+ case MVT::isVoid:
+ return ParameterKind::VOID;
+ case MVT::f16:
+ return ParameterKind::HALF;
+ case MVT::f32:
+ return ParameterKind::FLOAT;
+ case MVT::f64:
+ return ParameterKind::DOUBLE;
+ case MVT::i1:
+ return ParameterKind::I1;
+ case MVT::i8:
+ return ParameterKind::I8;
+ case MVT::i16:
+ return ParameterKind::I16;
+ case MVT::i32:
+ return ParameterKind::I32;
+ case MVT::fAny:
+ case MVT::iAny:
+ return ParameterKind::OVERLOAD;
+ default:
+ llvm_unreachable("Support for specified DXIL Type not yet implemented");
+ }
}
+/// Construct an object using the DXIL Operation records specified
+/// in DXIL.td. This serves as the single source of reference of
+/// the information extracted from the specified Record R, for
+/// C++ code generated by this TableGen backend.
+// \param R Object representing TableGen record of a DXIL Operation
DXILOperationDesc::DXILOperationDesc(const Record *R) {
- OpName = R->getValueAsString("OpName");
+ OpName = R->getNameInitAsString();
OpCode = R->getValueAsInt("OpCode");
- OpClass = R->getValueAsDef("OpClass")->getValueAsString("Name");
- Category = R->getValueAsDef("OpCategory")->getValueAsString("Name");
- if (R->getValue("llvm_intrinsic")) {
- auto *IntrinsicDef = R->getValueAsDef("llvm_intrinsic");
+ Doc = R->getValueAsString("Doc");
+
+ if (R->getValue("LLVMIntrinsic")) {
+ auto *IntrinsicDef = R->getValueAsDef("LLVMIntrinsic");
auto DefName = IntrinsicDef->getName();
assert(DefName.starts_with("int_") && "invalid intrinsic name");
// Remove the int_ from intrinsic name.
Intrinsic = DefName.substr(4);
+ // TODO: It is expected that return type and parameter types of
+ // DXIL Operation are the same as that of the intrinsic. Deviations
+ // are expected to be encoded in TableGen record specification and
+ // handled accordingly here. Support to be added later, as needed.
+ // Get parameter type list of the intrinsic. Types attribute contains
+ // the list of as [returnType, param1Type,, param2Type, ...]
+
+ OverloadParamIndex = -1;
+ auto TypeRecs = IntrinsicDef->getValueAsListOfDefs("Types");
+ unsigned TypeRecsSize = TypeRecs.size();
+ // Populate return type and parameter type names
+ for (unsigned i = 0; i < TypeRecsSize; i++) {
+ auto TR = TypeRecs[i];
+ OpTypes.emplace_back(getValueType(TR->getValueAsDef("VT")));
+ // Get the overload parameter index.
+ // TODO : Seems hacky. Is it possible that more than one parameter can
+ // be of overload kind??
+ // TODO: Check for any additional constraints specified for DXIL operation
+ // restricting return type.
+ if (i > 0) {
+ auto &CurParam = OpTypes.back();
+ if (getParameterKind(CurParam) >= ParameterKind::OVERLOAD) {
+ OverloadParamIndex = i;
+ }
+ }
+ }
+ // Get the operation class
+ OpClass = R->getValueAsDef("OpClass")->getName();
+
+ // NOTE: For now, assume that attributes of DXIL Operation are the same as
+ // that of the intrinsic. Deviations are expected to be encoded in TableGen
+ // record specification and handled accordingly here. Support to be added
+ // later.
+ auto IntrPropList = IntrinsicDef->getValueAsListInit("IntrProperties");
+ auto IntrPropListSize = IntrPropList->size();
+ for (unsigned i = 0; i < IntrPropListSize; i++) {
+ OpAttributes.emplace_back(IntrPropList->getElement(i)->getAsString());
+ }
}
-
- Doc = R->getValueAsString("Doc");
-
- ListInit *ParamList = R->getValueAsListInit("Params");
- OverloadParamIndex = -1;
- for (unsigned I = 0; I < ParamList->size(); ++I) {
- Record *Param = ParamList->getElementAsRecord(I);
- Params.emplace_back(DXILParameter(Param));
- auto &CurParam = Params.back();
- if (CurParam.Kind >= ParameterKind::OVERLOAD)
- OverloadParamIndex = I;
- }
- ListInit *OverloadTypeList = R->getValueAsListInit("OverloadTypes");
-
- for (unsigned I = 0; I < OverloadTypeList->size(); ++I) {
- Record *R = OverloadTypeList->getElementAsRecord(I);
- OverloadTypes.emplace_back(lookupParameterKind(R->getNameInitAsString()));
- }
- Attr = StringRef(R->getValue("Attribute")->getNameInitAsString());
}
-DXILParameter::DXILParameter(const Record *R) {
- Name = R->getValueAsString("Name");
- Pos = R->getValueAsInt("Pos");
- Kind =
- lookupParameterKind(R->getValue("ParamType")->getValue()->getAsString());
- if (R->getValue("Doc"))
- Doc = R->getValueAsString("Doc");
- IsConst = R->getValueAsBit("IsConstant");
- EnumName = R->getValueAsString("EnumName");
- MaxValue = R->getValueAsInt("MaxValue");
-}
-
-static std::string parameterKindToString(ParameterKind Kind) {
+/// Return a string representation of ParameterKind enum
+/// \param Kind Parameter Kind enum value
+/// \return std::string string representation of input Kind
+static std::string getParameterKindStr(ParameterKind Kind) {
switch (Kind) {
case ParameterKind::INVALID:
return "INVALID";
@@ -182,92 +190,77 @@ static std::string parameterKindToString(ParameterKind Kind) {
llvm_unreachable("Unknown llvm::dxil::ParameterKind enum");
}
-static void emitDXILOpEnum(DXILOperationDesc &Op, raw_ostream &OS) {
- // Name = ID, // Doc
- OS << Op.OpName << " = " << Op.OpCode << ", // " << Op.Doc << "\n";
-}
+/// Return a string representation of OverloadKind enum that maps to
+/// input Simple Value Type enum
+/// \param VT Simple Value Type enum
+/// \return std::string string representation of OverloadKind
-static std::string buildCategoryStr(StringSet<> &Cetegorys) {
- std::string Str;
- raw_string_ostream OS(Str);
- for (auto &It : Cetegorys) {
- OS << " " << It.getKey();
+static std::string getOverloadKindStr(MVT::SimpleValueType VT) {
+ switch (VT) {
+ case MVT::isVoid:
+ return "OverloadKind::VOID";
+ case MVT::f16:
+ return "OverloadKind::HALF";
+ case MVT::f32:
+ return "OverloadKind::FLOAT";
+ case MVT::f64:
+ return "OverloadKind::DOUBLE";
+ case MVT::i1:
+ return "OverloadKind::I1";
+ case MVT::i8:
+ return "OverloadKind::I8";
+ case MVT::i16:
+ return "OverloadKind::I16";
+ case MVT::i32:
+ return "OverloadKind::I32";
+ case MVT::i64:
+ return "OverloadKind::I64";
+ case MVT::iAny:
+ return "OverloadKind::I16 | OverloadKind::I32 | OverloadKind::I64";
+ case MVT::fAny:
+ return "OverloadKind::HALF | OverloadKind::FLOAT | OverloadKind::DOUBLE";
+ default:
+ llvm_unreachable(
+ "Support for specified parameter OverloadKind not yet implemented");
}
- return OS.str();
}
-// Emit enum declaration for DXIL.
+/// Emit Enums of DXIL Ops
+/// \param A vector of DXIL Ops
+/// \param Output stream
static void emitDXILEnums(std::vector<DXILOperationDesc> &Ops,
raw_ostream &OS) {
- // Sort by Category + OpName.
+ // Sort by OpCode
llvm::sort(Ops, [](DXILOperationDesc &A, DXILOperationDesc &B) {
- // Group by Category first.
- if (A.Category == B.Category)
- // Inside same Category, order by OpName.
- return A.OpName < B.OpName;
- else
- return A.Category < B.Category;
+ return A.OpCode < B.OpCode;
});
OS << "// Enumeration for operations specified by DXIL\n";
OS << "enum class OpCode : unsigned {\n";
- StringMap<StringSet<>> ClassMap;
- StringRef PrevCategory = "";
for (auto &Op : Ops) {
- StringRef Category = Op.Category;
- if (Category != PrevCategory) {
- OS << "\n// " << Category << "\n";
- PrevCategory = Category;
- }
- emitDXILOpEnum(Op, OS);
- auto It = ClassMap.find(Op.OpClass);
- if (It != ClassMap.end()) {
- It->second.insert(Op.Category);
- } else {
- ClassMap[Op.OpClass].insert(Op.Category);
- }
+ // Name = ID, // Doc
+ OS << Op.OpName << " = " << Op.OpCode << ", // " << Op.Doc << "\n";
}
OS << "\n};\n\n";
- std::vector<std::pair<std::string, std::string>> ClassVec;
- for (auto &It : ClassMap) {
- ClassVec.emplace_back(
- std::pair(It.getKey().str(), buildCategoryStr(It.second)));
- }
- // Sort by Category + ClassName.
- llvm::sort(ClassVec, [](std::pair<std::string, std::string> &A,
- std::pair<std::string, std::string> &B) {
- StringRef ClassA = A.first;
- StringRef CategoryA = A.second;
- StringRef ClassB = B.first;
- StringRef CategoryB = B.second;
- // Group by Category first.
- if (CategoryA == CategoryB)
- // Inside same Category, order by ClassName.
- return ClassA < ClassB;
- else
- return CategoryA < CategoryB;
- });
-
OS << "// Groups for DXIL operations with equivalent function templates\n";
OS << "enum class OpCodeClass : unsigned {\n";
- PrevCategory = "";
- for (auto &It : ClassVec) {
-
- StringRef Category = It.second;
- if (Category != PrevCategory) {
- OS << "\n// " << Category << "\n";
- PrevCategory = Category;
- }
- StringRef Name = It.first;
- OS << Name << ",\n";
+ // Build an OpClass set to print
+ SmallSet<StringRef, 2> OpClassSet;
+ for (auto &Op : Ops) {
+ OpClassSet.insert(Op.OpClass);
+ }
+ for (auto &C : OpClassSet) {
+ OS << C << ",\n";
}
OS << "\n};\n\n";
}
-// Emit map from llvm intrinsic to DXIL operation.
+/// Emit map of DXIL operation to LLVM or DirectX intrinsic
+/// \param A vector of DXIL Ops
+/// \param Output stream
static void emitDXILIntrinsicMap(std::vector<DXILOperationDesc> &Ops,
raw_ostream &OS) {
OS << "\n";
@@ -285,75 +278,27 @@ static void emitDXILIntrinsicMap(std::vector<DXILOperationDesc> &Ops,
OS << "\n";
}
-/*!
- Convert operation attribute string to Attribute enum
-
- @param Attr string reference
- @return std::string Attribute enum string
- */
-static std::string emitDXILOperationAttr(StringRef Attr) {
- return StringSwitch<std::string>(Attr)
- .Case("ReadNone", "Attribute::ReadNone")
- .Case("ReadOnly", "Attribute::ReadOnly")
- .Default("Attribute::None");
-}
-
-static std::string overloadKindStr(ParameterKind Overload) {
- switch (Overload) {
- case ParameterKind::HALF:
- return "OverloadKind::HALF";
- case ParameterKind::FLOAT:
- return "OverloadKind::FLOAT";
- case ParameterKind::DOUBLE:
- return "OverloadKind::DOUBLE";
- case ParameterKind::I1:
- return "OverloadKind::I1";
- case ParameterKind::I8:
- return "OverloadKind::I8";
- case ParameterKind::I16:
- return "OverloadKind::I16";
- case ParameterKind::I32:
- return "OverloadKind::I32";
- case ParameterKind::I64:
- return "OverloadKind::I64";
- case ParameterKind::VOID:
- return "OverloadKind::VOID";
- default:
- return "OverloadKind::UNKNOWN";
- }
-}
-
-static std::string
-getDXILOperationOverloads(SmallVector<ParameterKind> Overloads) {
- // Format is: OverloadKind::FLOAT | OverloadKind::HALF
- auto It = Overloads.begin();
- std::string Result;
- raw_string_ostream OS(Result);
- OS << overloadKindStr(*It);
- for (++It; It != Overloads.end(); ++It) {
- OS << " | " << overloadKindStr(*It);
+/// Convert operation attribute string to Attribute enum
+///
+/// \param Attr string reference
+/// \return std::string Attribute enum string
+
+static std::string emitDXILOperationAttr(SmallVector<std::string> Attrs) {
+ for (auto Attr : Attrs) {
+ // TODO: For now just recognize IntrNoMem and IntrReadMem as valid and
+ // ignore others.
+ if (Attr == "IntrNoMem") {
+ return "Attribute::ReadNone";
+ } else if (Attr == "IntrReadMem") {
+ return "Attribute::ReadOnly";
+ }
}
- return OS.str();
-}
-
-static std::string lowerFirstLetter(StringRef Name) {
- if (Name.empty())
- return "";
-
- std::string LowerName = Name.str();
- LowerName[0] = llvm::toLower(Name[0]);
- return LowerName;
-}
-
-static std::string getDXILOpClassName(StringRef OpClass) {
- // Lower first letter expect for special case.
- return StringSwitch<std::string>(OpClass)
- .Case("CBufferLoad", "cbufferLoad")
- .Case("CBufferLoadLegacy", "cbufferLoadLegacy")
- .Case("GSInstanceID", "gsInstanceID")
- .Default(lowerFirstLetter(OpClass));
+ return "Attribute::None";
}
+/// Emit DXIL operation table
+/// \param A vector of DXIL Ops
+/// \param Output stream
static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
raw_ostream &OS) {
// Sort by OpCode.
@@ -369,15 +314,16 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
StringMap<SmallVector<ParameterKind>> ParameterMap;
StringSet<> ClassSet;
for (auto &Op : Ops) {
- OpStrings.add(Op.OpName.str());
+ OpStrings.add(Op.OpName);
if (ClassSet.contains(Op.OpClass))
continue;
ClassSet.insert(Op.OpClass);
- OpClassStrings.add(getDXILOpClassName(Op.OpClass));
+ OpClassStrings.add(Op.OpClass.data());
SmallVector<ParameterKind> ParamKindVec;
- for (auto &Param : Op.Params) {
- ParamKindVec.emplace_back(Param.Kind);
+ // ParamKindVec is a vector of parameters. Skip return type at index 0
+ for (unsigned i = 1; i < Op.OpTypes.size(); i++) {
+ ParamKindVec.emplace_back(getParameterKind(Op.OpTypes[i]));
}
ParameterMap[Op.OpClass] = ParamKindVec;
Parameters.add(ParamKindVec);
@@ -389,7 +335,7 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
Parameters.layout();
// Emit the DXIL operation table.
- //{dxil::OpCode::Sin, OpCodeNameIndex, OpCodeClass::Unary,
+ //{dxil::OpCode::Sin, OpCodeNameIndex, OpCodeClass::unary,
// OpCodeClassNameIndex,
// OverloadKind::FLOAT | OverloadKind::HALF, Attribute::AttrKind::ReadNone, 0,
// 3, ParameterTableOffset},
@@ -398,12 +344,12 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
OS << " static const OpCodeProperty OpCodeProps[] = {\n";
for (auto &Op : Ops) {
- OS << " { dxil::OpCode::" << Op.OpName << ", "
- << OpStrings.get(Op.OpName.str()) << ", OpCodeClass::" << Op.OpClass
- << ", " << OpClassStrings.get(getDXILOpClassName(Op.OpClass)) << ", "
- << getDXILOperationOverloads(Op.OverloadTypes) << ", "
- << emitDXILOperationAttr(Op.Attr) << ", " << Op.OverloadParamIndex
- << ", " << Op.Params.size() << ", "
+ OS << " { dxil::OpCode::" << Op.OpName << ", " << OpStrings.get(Op.OpName)
+ << ", OpCodeClass::" << Op.OpClass << ", "
+ << OpClassStrings.get(Op.OpClass.data()) << ", "
+ << getOverloadKindStr(Op.OpTypes[0]) << ", "
+ << emitDXILOperationAttr(Op.OpAttributes) << ", "
+ << Op.OverloadParamIndex << ", " << Op.OpTypes.size() - 1 << ", "
<< Parameters.get(ParameterMap[Op.OpClass]) << " },\n";
}
OS << " };\n";
@@ -418,7 +364,7 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
"OpCodeProperty &B) {\n";
OS << " return A.OpCode < B.OpCode;\n";
OS << " });\n";
- OS << " assert(Prop && \"fail to find OpCodeProperty\");\n";
+ OS << " assert(Prop && \"failed to find OpCodeProperty\");\n";
OS << " return Prop;\n";
OS << "}\n\n";
@@ -450,7 +396,7 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
Parameters.emit(
OS,
[](raw_ostream &ParamOS, ParameterKind Kind) {
- ParamOS << "ParameterKind::" << parameterKindToString(Kind);
+ ParamOS << "ParameterKind::" << getParameterKindStr(Kind);
},
"ParameterKind::INVALID");
OS << " };\n\n";
@@ -459,30 +405,28 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
OS << "}\n ";
}
+/// Entry function call that invokes the functionality of this TableGen backend
+/// \param Records TableGen records of DXIL Operations defined in DXIL.td
+/// \param OS output stream
static void EmitDXILOperation(RecordKeeper &Records, raw_ostream &OS) {
- std::vector<Record *> Ops = Records.getAllDerivedDefinitions("DXILOperation");
OS << "// Generated code, do not edit.\n";
OS << "\n";
-
+ // Get all DXIL Ops to intrinsic mapping records
+ std::vector<Record *> OpIntrMaps =
+ Records.getAllDerivedDefinitions("DXILOpMapping");
std::vector<DXILOperationDesc> DXILOps;
- DXILOps.reserve(Ops.size());
- for (auto *Record : Ops) {
+ for (auto *Record : OpIntrMaps) {
DXILOps.emplace_back(DXILOperationDesc(Record));
}
-
OS << "#ifdef DXIL_OP_ENUM\n";
emitDXILEnums(DXILOps, OS);
OS << "#endif\n\n";
-
OS << "#ifdef DXIL_OP_INTRINSIC_MAP\n";
emitDXILIntrinsicMap(DXILOps, OS);
OS << "#endif\n\n";
-
OS << "#ifdef DXIL_OP_OPERATION_TABLE\n";
emitDXILOperationTable(DXILOps, OS);
OS << "#endif\n\n";
-
- OS << "\n";
}
static TableGen::Emitter::Opt X("gen-dxil-operation", EmitDXILOperation,
diff --git a/llvm/utils/TableGen/DecoderEmitter.cpp b/llvm/utils/TableGen/DecoderEmitter.cpp
index 4ce5a73d7756..27ff84bce405 100644
--- a/llvm/utils/TableGen/DecoderEmitter.cpp
+++ b/llvm/utils/TableGen/DecoderEmitter.cpp
@@ -2461,8 +2461,9 @@ collectHwModesReferencedForEncodings(const CodeGenHwModes &HWM,
BV.set(P.first);
}
}
- transform(BV.set_bits(), std::back_inserter(Names),
- [&HWM](const int &M) { return HWM.getMode(M).Name; });
+ transform(BV.set_bits(), std::back_inserter(Names), [&HWM](const int &M) {
+ return HWM.getModeName(M, /*IncludeDefault=*/true);
+ });
}
// Emits disassembler code for instruction decoding.
@@ -2503,8 +2504,9 @@ void DecoderEmitter::run(raw_ostream &o) {
if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
EncodingInfoByHwMode EBM(DI->getDef(), HWM);
for (auto &KV : EBM)
- NumberedEncodings.emplace_back(KV.second, NumberedInstruction,
- HWM.getMode(KV.first).Name);
+ NumberedEncodings.emplace_back(
+ KV.second, NumberedInstruction,
+ HWM.getModeName(KV.first, /*IncludeDefault=*/true));
continue;
}
}
diff --git a/llvm/utils/TableGen/SearchableTableEmitter.cpp b/llvm/utils/TableGen/SearchableTableEmitter.cpp
index 5bab4ff188e8..51f18f360ed3 100644
--- a/llvm/utils/TableGen/SearchableTableEmitter.cpp
+++ b/llvm/utils/TableGen/SearchableTableEmitter.cpp
@@ -215,12 +215,15 @@ int64_t SearchableTableEmitter::getNumericKey(const SearchIndex &Index,
Record *Rec) {
assert(Index.Fields.size() == 1);
+ // To be consistent with compareBy and primaryRepresentation elsewhere,
+ // we check for IsInstruction before Enum-- these fields are not exclusive.
+ if (Index.Fields[0].IsInstruction) {
+ Record *TheDef = Rec->getValueAsDef(Index.Fields[0].Name);
+ return Target->getInstrIntValue(TheDef);
+ }
if (Index.Fields[0].Enum) {
Record *EnumEntry = Rec->getValueAsDef(Index.Fields[0].Name);
return Index.Fields[0].Enum->EntryMap[EnumEntry]->second;
- } else if (Index.Fields[0].IsInstruction) {
- Record *TheDef = Rec->getValueAsDef(Index.Fields[0].Name);
- return Target->getInstrIntValue(TheDef);
}
return getInt(Rec, Index.Fields[0].Name);
diff --git a/llvm/utils/TableGen/X86DisassemblerTables.cpp b/llvm/utils/TableGen/X86DisassemblerTables.cpp
index a48b9cfe42e3..f4d282f54ac0 100644
--- a/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -567,7 +567,9 @@ static inline bool inheritsFrom(InstructionContext child,
case IC_EVEX_L2_W_OPSIZE_KZ_B:
return false;
case IC_EVEX_NF:
+ return WIG && inheritsFrom(child, IC_EVEX_W_NF);
case IC_EVEX_B_NF:
+ return WIG && inheritsFrom(child, IC_EVEX_W_B_NF);
case IC_EVEX_OPSIZE_NF:
case IC_EVEX_OPSIZE_B_NF:
case IC_EVEX_W_NF:
diff --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py
index 4a02a92f824e..53777523ec2a 100644
--- a/llvm/utils/UpdateTestChecks/common.py
+++ b/llvm/utils/UpdateTestChecks/common.py
@@ -388,7 +388,12 @@ def itertests(
def should_add_line_to_output(
- input_line, prefix_set, skip_global_checks=False, comment_marker=";"
+ input_line,
+ prefix_set,
+ *,
+ skip_global_checks=False,
+ skip_same_checks=False,
+ comment_marker=";",
):
# Skip any blank comment lines in the IR.
if not skip_global_checks and input_line.strip() == comment_marker:
@@ -402,9 +407,14 @@ def should_add_line_to_output(
# And skip any CHECK lines. We're building our own.
m = CHECK_RE.match(input_line)
if m and m.group(1) in prefix_set:
+ if skip_same_checks and CHECK_SAME_RE.match(input_line):
+ # The previous CHECK line was removed, so don't leave this dangling
+ return False
if skip_global_checks:
+ # Skip checks only if they are of global value definitions
global_ir_value_re = re.compile(r"(\[\[|@)", flags=(re.M))
- return not global_ir_value_re.search(input_line)
+ is_global = global_ir_value_re.search(input_line)
+ return not is_global
return False
return True
@@ -483,6 +493,7 @@ PREFIX_RE = re.compile("^[a-zA-Z0-9_-]+$")
CHECK_RE = re.compile(
r"^\s*(?://|[;#])\s*([^:]+?)(?:-NEXT|-NOT|-DAG|-LABEL|-SAME|-EMPTY)?:"
)
+CHECK_SAME_RE = re.compile(r"^\s*(?://|[;#])\s*([^:]+?)(?:-SAME)?:")
UTC_ARGS_KEY = "UTC_ARGS:"
UTC_ARGS_CMD = re.compile(r".*" + UTC_ARGS_KEY + r"\s*(?P<cmd>.*)\s*$")
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/modernize/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/modernize/BUILD.gn
index 551c790e3c4d..c9e081383fa0 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/modernize/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/modernize/BUILD.gn
@@ -40,6 +40,7 @@ static_library("modernize") {
"UseBoolLiteralsCheck.cpp",
"UseConstraintsCheck.cpp",
"UseDefaultMemberInitCheck.cpp",
+ "UseDesignatedInitializersCheck.cpp",
"UseEmplaceCheck.cpp",
"UseEqualsDefaultCheck.cpp",
"UseEqualsDeleteCheck.cpp",
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/utils/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/utils/BUILD.gn
index c5bb21ba9669..da3a37d46153 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/utils/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/utils/BUILD.gn
@@ -14,6 +14,7 @@ static_library("utils") {
"ASTUtils.cpp",
"Aliasing.cpp",
"DeclRefExprUtils.cpp",
+ "DesignatedInitializers.cpp",
"ExceptionAnalyzer.cpp",
"ExceptionSpecAnalyzer.cpp",
"ExprSequence.cpp",
diff --git a/llvm/utils/gn/secondary/clang/lib/InstallAPI/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/InstallAPI/BUILD.gn
index fbff113613d2..5e533bf23ec4 100644
--- a/llvm/utils/gn/secondary/clang/lib/InstallAPI/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/InstallAPI/BUILD.gn
@@ -8,6 +8,8 @@ static_library("InstallAPI") {
]
sources = [
"FileList.cpp",
+ "Frontend.cpp",
"HeaderFile.cpp",
+ "Visitor.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/lldb/test/BUILD.gn b/llvm/utils/gn/secondary/lldb/test/BUILD.gn
index 06ef7383ad3b..414ea4933c51 100644
--- a/llvm/utils/gn/secondary/lldb/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/test/BUILD.gn
@@ -60,7 +60,8 @@ write_lit_cfg("lit_api_site_cfg") {
"LLDB_TEST_COMMON_ARGS=",
"LLDB_TEST_USER_ARGS=",
"LLDB_ENABLE_PYTHON=0",
- "LLDB_HAS_LIBCXX=0", # FIXME: support this (?)
+ "LLDB_HAS_LIBCXX=False", # FIXME: support this (?)
+ "LLDB_TEST_USE_VENDOR_PACKAGES=False",
"LLDB_LIBS_DIR=", # FIXME: for shared builds only (?)
"LLDB_TEST_ARCH=$current_cpu",
"LLDB_TEST_COMPILER=" + rebase_path("$root_build_dir/bin/clang"),
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/Hexagon/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/Hexagon/BUILD.gn
index b966b7484267..cae491a34331 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/Hexagon/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/Hexagon/BUILD.gn
@@ -67,6 +67,7 @@ static_library("LLVMHexagonCodeGen") {
"HexagonISelLowering.cpp",
"HexagonISelLoweringHVX.cpp",
"HexagonInstrInfo.cpp",
+ "HexagonLoopAlign.cpp",
"HexagonLoopIdiomRecognition.cpp",
"HexagonMCInstLower.cpp",
"HexagonMachineFunctionInfo.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/WebAssembly/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/WebAssembly/BUILD.gn
index 949b3b214740..a8d6290f1b99 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/WebAssembly/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/WebAssembly/BUILD.gn
@@ -61,6 +61,7 @@ static_library("LLVMWebAssemblyCodeGen") {
"WebAssemblyOptimizeLiveIntervals.cpp",
"WebAssemblyOptimizeReturned.cpp",
"WebAssemblyPeephole.cpp",
+ "WebAssemblyRefTypeMem2Local.cpp",
"WebAssemblyRegColoring.cpp",
"WebAssemblyRegNumbering.cpp",
"WebAssemblyRegStackify.cpp",
diff --git a/llvm/utils/update_test_checks.py b/llvm/utils/update_test_checks.py
index 06c247c8010a..b5077d793513 100755
--- a/llvm/utils/update_test_checks.py
+++ b/llvm/utils/update_test_checks.py
@@ -235,6 +235,7 @@ def main():
)
else:
# "Normal" mode.
+ dropped_previous_line = False
for input_line_info in ti.iterlines(output_lines):
input_line = input_line_info.line
args = input_line_info.args
@@ -282,7 +283,10 @@ def main():
has_checked_pre_function_globals = True
if common.should_add_line_to_output(
- input_line, prefix_set, not is_in_function
+ input_line,
+ prefix_set,
+ skip_global_checks=not is_in_function,
+ skip_same_checks=dropped_previous_line,
):
# This input line of the function body will go as-is into the output.
# Except make leading whitespace uniform: 2 spaces.
@@ -290,9 +294,13 @@ def main():
r" ", input_line
)
output_lines.append(input_line)
+ dropped_previous_line = False
if input_line.strip() == "}":
is_in_function = False
continue
+ else:
+ # If we are removing a check line, and the next line is CHECK-SAME, it MUST also be removed
+ dropped_previous_line = True
if is_in_function:
continue
diff --git a/llvm/utils/vim/syntax/mir.vim b/llvm/utils/vim/syntax/mir.vim
index 51ac4982b7c9..024a795a23c5 100644
--- a/llvm/utils/vim/syntax/mir.vim
+++ b/llvm/utils/vim/syntax/mir.vim
@@ -43,6 +43,8 @@ if version >= 508 || !exists("did_c_syn_inits")
endif
HiLink mirSpecialComment SpecialComment
+
+ delcommand HiLink
endif
let b:current_syntax = "mir"
diff --git a/mlir/CMakeLists.txt b/mlir/CMakeLists.txt
index 16c898bdeb6e..070609c94a3b 100644
--- a/mlir/CMakeLists.txt
+++ b/mlir/CMakeLists.txt
@@ -111,8 +111,6 @@ if ("NVPTX" IN_LIST LLVM_TARGETS_TO_BUILD)
else()
set(MLIR_ENABLE_CUDA_CONVERSIONS 0)
endif()
-# TODO: we should use a config.h file like LLVM does
-add_definitions(-DMLIR_CUDA_CONVERSIONS_ENABLED=${MLIR_ENABLE_CUDA_CONVERSIONS})
# Build the ROCm conversions and run according tests if the AMDGPU backend
# is available.
diff --git a/mlir/docs/Dialects/GPU.md b/mlir/docs/Dialects/GPU.md
index 85255fdc5e64..8a3acc33600a 100644
--- a/mlir/docs/Dialects/GPU.md
+++ b/mlir/docs/Dialects/GPU.md
@@ -50,6 +50,7 @@ An example of how the compilation workflow look is:
```
mlir-opt example.mlir \
--pass-pipeline="builtin.module( \
+ gpu-kernel-outlining, \ # Outline gpu.launch body to a kernel.
nvvm-attach-target{chip=sm_90 O=3}, \ # Attach an NVVM target to a gpu.module op.
gpu.module(convert-gpu-to-nvvm), \ # Convert GPU to NVVM.
gpu-to-llvm, \ # Convert GPU to LLVM.
diff --git a/mlir/docs/PassManagement.md b/mlir/docs/PassManagement.md
index ff86bbfef7b0..c9d705f0506a 100644
--- a/mlir/docs/PassManagement.md
+++ b/mlir/docs/PassManagement.md
@@ -56,8 +56,7 @@ By default, an operation pass is `op-agnostic`, meaning that it operates on the
operation type of the pass manager that it is added to. This means a pass may operate
on many different types of operations. Agnostic passes should be written such that
they do not make assumptions on the operation they run on. Examples of this type of pass are
-[Canonicalization](Pass.md/-canonicalize-canonicalize-operations)
-[Common Sub-Expression Elimination](Passes.md/#-cse-eliminate-common-sub-expressions).
+[Canonicalization](Passes.md/#-canonicalize) and [Common Sub-Expression Elimination](Passes.md/#-cse).
To create an agnostic operation pass, a derived class must adhere to the following:
diff --git a/mlir/docs/PatternRewriter.md b/mlir/docs/PatternRewriter.md
index 011cd1417563..0ba76199874c 100644
--- a/mlir/docs/PatternRewriter.md
+++ b/mlir/docs/PatternRewriter.md
@@ -366,7 +366,7 @@ Note: This driver listens for IR changes via the callbacks provided by
rewriter and do not bypass the rewriter API by modifying ops directly.
Note: This driver is the one used by the [canonicalization](Canonicalization.md)
-[pass](Passes.md/#-canonicalize-canonicalize-operations) in MLIR.
+[pass](Passes.md/#-canonicalize) in MLIR.
### Debugging
diff --git a/mlir/examples/CMakeLists.txt b/mlir/examples/CMakeLists.txt
index d256bf1a5cbb..2a1cac34d8c2 100644
--- a/mlir/examples/CMakeLists.txt
+++ b/mlir/examples/CMakeLists.txt
@@ -1,3 +1,4 @@
add_subdirectory(toy)
add_subdirectory(transform)
+add_subdirectory(transform-opt)
add_subdirectory(minimal-opt)
diff --git a/mlir/examples/transform-opt/CMakeLists.txt b/mlir/examples/transform-opt/CMakeLists.txt
new file mode 100644
index 000000000000..8e23555d0b5d
--- /dev/null
+++ b/mlir/examples/transform-opt/CMakeLists.txt
@@ -0,0 +1,26 @@
+get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
+get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS)
+get_property(extension_libs GLOBAL PROPERTY MLIR_EXTENSION_LIBS)
+
+set(LIBS
+ MLIRAnalysis
+ MLIRIR
+ MLIRParser
+ MLIRSupport
+ MLIRTransformDialect
+ MLIRTransformDialectTransforms
+ MLIRTransforms
+ ${dialect_libs}
+ ${conversion_libs}
+ ${extension_libs}
+)
+
+add_mlir_tool(mlir-transform-opt
+ mlir-transform-opt.cpp
+
+ DEPENDS
+ ${LIBS}
+)
+target_link_libraries(mlir-transform-opt PRIVATE ${LIBS})
+llvm_update_compile_flags(mlir-transform-opt)
+mlir_check_all_link_libraries(mlir-transform-opt)
diff --git a/mlir/examples/transform-opt/README.md b/mlir/examples/transform-opt/README.md
new file mode 100644
index 000000000000..e9c8cc0173c7
--- /dev/null
+++ b/mlir/examples/transform-opt/README.md
@@ -0,0 +1,40 @@
+# Standalone Transform Dialect Interpreter
+
+This is an example of using the Transform dialect interpreter functionality standalone, that is, outside of the regular pass pipeline. The example is a
+binary capable of processing MLIR source files similar to `mlir-opt` and other
+optimizer drivers, with the entire transformation process driven by a Transform
+dialect script. This script can be embedded into the source file or provided in
+a separate MLIR source file.
+
+Either the input module or the transform module must contain a top-level symbol
+named `__transform_main`, which is used as the entry point to the transformation
+script.
+
+```sh
+mlir-transform-opt payload_with_embedded_transform.mlir
+mlir-transform-opt payload.mlir -transform=transform.mlir
+```
+
+The name of the entry point can be overridden using command-line options.
+
+```sh
+mlir-transform-opt payload-mlir -transform-entry-point=another_entry_point
+```
+
+Transform scripts can reference symbols defined in other source files, called
+libraries, which can be supplied to the binary through command-line options.
+Libraries will be embedded into the main transformation module by the tool and
+the interpreter will process everything as a single module. A debug option is
+available to see the contents of the transform module before it goes into the interpreter.
+
+```sh
+mlir-transform-opt payload.mlir -transform=transform.mlir \
+ -transform-library=external_definitions_1.mlir \
+ -transform-library=external_definitions_2.mlir \
+ -dump-library-module
+```
+
+Check out the [Transform dialect
+tutorial](https://mlir.llvm.org/docs/Tutorials/transform/) as well as
+[documentation](https://mlir.llvm.org/docs/Dialects/Transform/) to learn more
+about the dialect.
diff --git a/mlir/examples/transform-opt/mlir-transform-opt.cpp b/mlir/examples/transform-opt/mlir-transform-opt.cpp
new file mode 100644
index 000000000000..41a17f18726b
--- /dev/null
+++ b/mlir/examples/transform-opt/mlir-transform-opt.cpp
@@ -0,0 +1,389 @@
+//===- mlir-transform-opt.cpp -----------------------------------*- C++ -*-===//
+//
+// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Transform/IR/TransformDialect.h"
+#include "mlir/Dialect/Transform/IR/Utils.h"
+#include "mlir/Dialect/Transform/Transforms/TransformInterpreterUtils.h"
+#include "mlir/IR/AsmState.h"
+#include "mlir/IR/BuiltinOps.h"
+#include "mlir/IR/Diagnostics.h"
+#include "mlir/IR/DialectRegistry.h"
+#include "mlir/IR/MLIRContext.h"
+#include "mlir/InitAllDialects.h"
+#include "mlir/InitAllExtensions.h"
+#include "mlir/InitAllPasses.h"
+#include "mlir/Parser/Parser.h"
+#include "mlir/Support/FileUtilities.h"
+#include "mlir/Tools/mlir-opt/MlirOptMain.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/InitLLVM.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include <cstdlib>
+
+namespace {
+
+using namespace llvm;
+
+/// Structure containing command line options for the tool, these will get
+/// initialized when an instance is created.
+struct MlirTransformOptCLOptions {
+ cl::opt<bool> allowUnregisteredDialects{
+ "allow-unregistered-dialect",
+ cl::desc("Allow operations coming from an unregistered dialect"),
+ cl::init(false)};
+
+ cl::opt<bool> verifyDiagnostics{
+ "verify-diagnostics",
+ cl::desc("Check that emitted diagnostics match expected-* lines "
+ "on the corresponding line"),
+ cl::init(false)};
+
+ cl::opt<std::string> payloadFilename{cl::Positional, cl::desc("<input file>"),
+ cl::init("-")};
+
+ cl::opt<std::string> outputFilename{"o", cl::desc("Output filename"),
+ cl::value_desc("filename"),
+ cl::init("-")};
+
+ cl::opt<std::string> transformMainFilename{
+ "transform",
+ cl::desc("File containing entry point of the transform script, if "
+ "different from the input file"),
+ cl::value_desc("filename"), cl::init("")};
+
+ cl::list<std::string> transformLibraryFilenames{
+ "transform-library", cl::desc("File(s) containing definitions of "
+ "additional transform script symbols")};
+
+ cl::opt<std::string> transformEntryPoint{
+ "transform-entry-point",
+ cl::desc("Name of the entry point transform symbol"),
+ cl::init(mlir::transform::TransformDialect::kTransformEntryPointSymbolName
+ .str())};
+
+ cl::opt<bool> disableExpensiveChecks{
+ "disable-expensive-checks",
+ cl::desc("Disables potentially expensive checks in the transform "
+ "interpreter, providing more speed at the expense of "
+ "potential memory problems and silent corruptions"),
+ cl::init(false)};
+
+ cl::opt<bool> dumpLibraryModule{
+ "dump-library-module",
+ cl::desc("Prints the combined library module before the output"),
+ cl::init(false)};
+};
+} // namespace
+
+/// "Managed" static instance of the command-line options structure. This makes
+/// them locally-scoped and explicitly initialized/deinitialized. While this is
+/// not strictly necessary in the tool source file that is not being used as a
+/// library (where the options would pollute the global list of options), it is
+/// good practice to follow this.
+static llvm::ManagedStatic<MlirTransformOptCLOptions> clOptions;
+
+/// Explicitly registers command-line options.
+static void registerCLOptions() { *clOptions; }
+
+namespace {
+/// A wrapper class for source managers diagnostic. This provides both unique
+/// ownership and virtual function-like overload for a pair of
+/// inheritance-related classes that do not use virtual functions.
+class DiagnosticHandlerWrapper {
+public:
+ /// Kind of the diagnostic handler to use.
+ enum class Kind { EmitDiagnostics, VerifyDiagnostics };
+
+ /// Constructs the diagnostic handler of the specified kind of the given
+ /// source manager and context.
+ DiagnosticHandlerWrapper(Kind kind, llvm::SourceMgr &mgr,
+ mlir::MLIRContext *context) {
+ if (kind == Kind::EmitDiagnostics)
+ handler = new mlir::SourceMgrDiagnosticHandler(mgr, context);
+ else
+ handler = new mlir::SourceMgrDiagnosticVerifierHandler(mgr, context);
+ }
+
+ /// This object is non-copyable but movable.
+ DiagnosticHandlerWrapper(const DiagnosticHandlerWrapper &) = delete;
+ DiagnosticHandlerWrapper(DiagnosticHandlerWrapper &&other) = default;
+ DiagnosticHandlerWrapper &
+ operator=(const DiagnosticHandlerWrapper &) = delete;
+ DiagnosticHandlerWrapper &operator=(DiagnosticHandlerWrapper &&) = default;
+
+ /// Verifies the captured "expected-*" diagnostics if required.
+ mlir::LogicalResult verify() const {
+ if (auto *ptr =
+ handler.dyn_cast<mlir::SourceMgrDiagnosticVerifierHandler *>()) {
+ return ptr->verify();
+ }
+ return mlir::success();
+ }
+
+ /// Destructs the object of the same type as allocated.
+ ~DiagnosticHandlerWrapper() {
+ if (auto *ptr = handler.dyn_cast<mlir::SourceMgrDiagnosticHandler *>()) {
+ delete ptr;
+ } else {
+ delete handler.get<mlir::SourceMgrDiagnosticVerifierHandler *>();
+ }
+ }
+
+private:
+ /// Internal storage is a type-safe union.
+ llvm::PointerUnion<mlir::SourceMgrDiagnosticHandler *,
+ mlir::SourceMgrDiagnosticVerifierHandler *>
+ handler;
+};
+
+/// MLIR has deeply rooted expectations that the LLVM source manager contains
+/// exactly one buffer, until at least the lexer level. This class wraps
+/// multiple LLVM source managers each managing a buffer to match MLIR's
+/// expectations while still providing a centralized handling mechanism.
+class TransformSourceMgr {
+public:
+ /// Constructs the source manager indicating whether diagnostic messages will
+ /// be verified later on.
+ explicit TransformSourceMgr(bool verifyDiagnostics)
+ : verifyDiagnostics(verifyDiagnostics) {}
+
+ /// Deconstructs the source manager. Note that `checkResults` must have been
+ /// called on this instance before deconstructing it.
+ ~TransformSourceMgr() {
+ assert(resultChecked && "must check the result of diagnostic handlers by "
+ "running TransformSourceMgr::checkResult");
+ }
+
+ /// Parses the given buffer and creates the top-level operation of the kind
+ /// specified as template argument in the given context. Additional parsing
+ /// options may be provided.
+ template <typename OpTy = mlir::Operation *>
+ mlir::OwningOpRef<OpTy> parseBuffer(std::unique_ptr<MemoryBuffer> buffer,
+ mlir::MLIRContext &context,
+ const mlir::ParserConfig &config) {
+ // Create a single-buffer LLVM source manager. Note that `unique_ptr` allows
+ // the code below to capture a reference to the source manager in such a way
+ // that it is not invalidated when the vector contents is eventually
+ // reallocated.
+ llvm::SourceMgr &mgr =
+ *sourceMgrs.emplace_back(std::make_unique<llvm::SourceMgr>());
+ mgr.AddNewSourceBuffer(std::move(buffer), llvm::SMLoc());
+
+ // Choose the type of diagnostic handler depending on whether diagnostic
+ // verification needs to happen and store it.
+ if (verifyDiagnostics) {
+ diagHandlers.emplace_back(
+ DiagnosticHandlerWrapper::Kind::VerifyDiagnostics, mgr, &context);
+ } else {
+ diagHandlers.emplace_back(DiagnosticHandlerWrapper::Kind::EmitDiagnostics,
+ mgr, &context);
+ }
+
+ // Defer to MLIR's parser.
+ return mlir::parseSourceFile<OpTy>(mgr, config);
+ }
+
+ /// If diagnostic message verification has been requested upon construction of
+ /// this source manager, performs the verification, reports errors and returns
+ /// the result of the verification. Otherwise passes through the given value.
+ mlir::LogicalResult checkResult(mlir::LogicalResult result) {
+ resultChecked = true;
+ if (!verifyDiagnostics)
+ return result;
+
+ return mlir::failure(llvm::any_of(diagHandlers, [](const auto &handler) {
+ return mlir::failed(handler.verify());
+ }));
+ }
+
+private:
+ /// Indicates whether diagnostic message verification is requested.
+ const bool verifyDiagnostics;
+
+ /// Indicates that diagnostic message verification has taken place, and the
+ /// deconstruction is therefore safe.
+ bool resultChecked = false;
+
+ /// Storage for per-buffer source managers and diagnostic handlers. These are
+ /// wrapped into unique pointers in order to make it safe to capture
+ /// references to these objects: if the vector is reallocated, the unique
+ /// pointer objects are moved by the pointer addresses won't change. Also, for
+ /// handlers, this allows to store the pointer to the base class.
+ SmallVector<std::unique_ptr<llvm::SourceMgr>> sourceMgrs;
+ SmallVector<DiagnosticHandlerWrapper> diagHandlers;
+};
+} // namespace
+
+/// Trivial wrapper around `applyTransforms` that doesn't support extra mapping
+/// and doesn't enforce the entry point transform ops being top-level.
+static mlir::LogicalResult
+applyTransforms(mlir::Operation *payloadRoot,
+ mlir::transform::TransformOpInterface transformRoot,
+ const mlir::transform::TransformOptions &options) {
+ return applyTransforms(payloadRoot, transformRoot, {}, options,
+ /*enforceToplevelTransformOp=*/false);
+}
+
+/// Applies transforms indicated in the transform dialect script to the input
+/// buffer. The transform script may be embedded in the input buffer or as a
+/// separate buffer. The transform script may have external symbols, the
+/// definitions of which must be provided in transform library buffers. If the
+/// application is successful, prints the transformed input buffer into the
+/// given output stream. Additional configuration options are derived from
+/// command-line options.
+static mlir::LogicalResult processPayloadBuffer(
+ raw_ostream &os, std::unique_ptr<MemoryBuffer> inputBuffer,
+ std::unique_ptr<llvm::MemoryBuffer> transformBuffer,
+ MutableArrayRef<std::unique_ptr<MemoryBuffer>> transformLibraries,
+ mlir::DialectRegistry &registry) {
+
+ // Initialize the MLIR context, and various configurations.
+ mlir::MLIRContext context(registry, mlir::MLIRContext::Threading::DISABLED);
+ context.allowUnregisteredDialects(clOptions->allowUnregisteredDialects);
+ mlir::ParserConfig config(&context);
+ TransformSourceMgr sourceMgr(
+ /*verifyDiagnostics=*/clOptions->verifyDiagnostics);
+
+ // Parse the input buffer that will be used as transform payload.
+ mlir::OwningOpRef<mlir::Operation *> payloadRoot =
+ sourceMgr.parseBuffer(std::move(inputBuffer), context, config);
+ if (!payloadRoot)
+ return sourceMgr.checkResult(mlir::failure());
+
+ // Identify the module containing the transform script entry point. This may
+ // be the same module as the input or a separate module. In the former case,
+ // make a copy of the module so it can be modified freely. Modification may
+ // happen in the script itself (at which point it could be rewriting itself
+ // during interpretation, leading to tricky memory errors) or by embedding
+ // library modules in the script.
+ mlir::OwningOpRef<mlir::ModuleOp> transformRoot;
+ if (transformBuffer) {
+ transformRoot = sourceMgr.parseBuffer<mlir::ModuleOp>(
+ std::move(transformBuffer), context, config);
+ if (!transformRoot)
+ return sourceMgr.checkResult(mlir::failure());
+ } else {
+ transformRoot = cast<mlir::ModuleOp>(payloadRoot->clone());
+ }
+
+ // Parse and merge the libraries into the main transform module.
+ for (auto &&transformLibrary : transformLibraries) {
+ mlir::OwningOpRef<mlir::ModuleOp> libraryModule =
+ sourceMgr.parseBuffer<mlir::ModuleOp>(std::move(transformLibrary),
+ context, config);
+
+ if (!libraryModule ||
+ mlir::failed(mlir::transform::detail::mergeSymbolsInto(
+ *transformRoot, std::move(libraryModule))))
+ return sourceMgr.checkResult(mlir::failure());
+ }
+
+ // If requested, dump the combined transform module.
+ if (clOptions->dumpLibraryModule)
+ transformRoot->dump();
+
+ // Find the entry point symbol. Even if it had originally been in the payload
+ // module, it was cloned into the transform module so only look there.
+ mlir::transform::TransformOpInterface entryPoint =
+ mlir::transform::detail::findTransformEntryPoint(
+ *transformRoot, mlir::ModuleOp(), clOptions->transformEntryPoint);
+ if (!entryPoint)
+ return sourceMgr.checkResult(mlir::failure());
+
+ // Apply the requested transformations.
+ mlir::transform::TransformOptions transformOptions;
+ transformOptions.enableExpensiveChecks(!clOptions->disableExpensiveChecks);
+ if (mlir::failed(applyTransforms(*payloadRoot, entryPoint, transformOptions)))
+ return sourceMgr.checkResult(mlir::failure());
+
+ // Print the transformed result and check the captured diagnostics if
+ // requested.
+ payloadRoot->print(os);
+ return sourceMgr.checkResult(mlir::success());
+}
+
+/// Tool entry point.
+static mlir::LogicalResult runMain(int argc, char **argv) {
+ // Register all upstream dialects and extensions. Specific uses are advised
+ // not to register all dialects indiscriminately but rather hand-pick what is
+ // necessary for their use case.
+ mlir::DialectRegistry registry;
+ mlir::registerAllDialects(registry);
+ mlir::registerAllExtensions(registry);
+ mlir::registerAllPasses();
+
+ // Explicitly register the transform dialect. This is not strictly necessary
+ // since it has been already registered as part of the upstream dialect list,
+ // but useful for example purposes for cases when dialects to register are
+ // hand-picked. The transform dialect must be registered.
+ registry.insert<mlir::transform::TransformDialect>();
+
+ // Register various command-line options. Note that the LLVM initializer
+ // object is a RAII that ensures correct deconstruction of command-line option
+ // objects inside ManagedStatic.
+ llvm::InitLLVM y(argc, argv);
+ mlir::registerAsmPrinterCLOptions();
+ mlir::registerMLIRContextCLOptions();
+ registerCLOptions();
+ llvm::cl::ParseCommandLineOptions(argc, argv,
+ "Minimal Transform dialect driver\n");
+
+ // Try opening the main input file.
+ std::string errorMessage;
+ std::unique_ptr<llvm::MemoryBuffer> payloadFile =
+ mlir::openInputFile(clOptions->payloadFilename, &errorMessage);
+ if (!payloadFile) {
+ llvm::errs() << errorMessage << "\n";
+ return mlir::failure();
+ }
+
+ // Try opening the output file.
+ std::unique_ptr<llvm::ToolOutputFile> outputFile =
+ mlir::openOutputFile(clOptions->outputFilename, &errorMessage);
+ if (!outputFile) {
+ llvm::errs() << errorMessage << "\n";
+ return mlir::failure();
+ }
+
+ // Try opening the main transform file if provided.
+ std::unique_ptr<llvm::MemoryBuffer> transformRootFile;
+ if (!clOptions->transformMainFilename.empty()) {
+ if (clOptions->transformMainFilename == clOptions->payloadFilename) {
+ llvm::errs() << "warning: " << clOptions->payloadFilename
+ << " is provided as both payload and transform file\n";
+ } else {
+ transformRootFile =
+ mlir::openInputFile(clOptions->transformMainFilename, &errorMessage);
+ if (!transformRootFile) {
+ llvm::errs() << errorMessage << "\n";
+ return mlir::failure();
+ }
+ }
+ }
+
+ // Try opening transform library files if provided.
+ SmallVector<std::unique_ptr<llvm::MemoryBuffer>> transformLibraries;
+ transformLibraries.reserve(clOptions->transformLibraryFilenames.size());
+ for (llvm::StringRef filename : clOptions->transformLibraryFilenames) {
+ transformLibraries.emplace_back(
+ mlir::openInputFile(filename, &errorMessage));
+ if (!transformLibraries.back()) {
+ llvm::errs() << errorMessage << "\n";
+ return mlir::failure();
+ }
+ }
+
+ return processPayloadBuffer(outputFile->os(), std::move(payloadFile),
+ std::move(transformRootFile), transformLibraries,
+ registry);
+}
+
+int main(int argc, char **argv) {
+ return mlir::asMainReturnCode(runMain(argc, argv));
+}
diff --git a/mlir/include/mlir/Config/mlir-config.h.cmake b/mlir/include/mlir/Config/mlir-config.h.cmake
index e152a36c0ce0..4a7d75e22668 100644
--- a/mlir/include/mlir/Config/mlir-config.h.cmake
+++ b/mlir/include/mlir/Config/mlir-config.h.cmake
@@ -29,4 +29,8 @@
/* If set, enables PDL usage. */
#cmakedefine01 MLIR_ENABLE_PDL_IN_PATTERNMATCH
+/* If set, enables CUDA-related features in CUDA-related transforms, pipelines,
+ and targets. */
+#cmakedefine01 MLIR_ENABLE_CUDA_CONVERSIONS
+
#endif
diff --git a/mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.td b/mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.td
index 23873d86b495..0eb670506086 100644
--- a/mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.td
+++ b/mlir/include/mlir/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.td
@@ -13,8 +13,8 @@ include "mlir/Dialect/Transform/IR/TransformAttrs.td"
include "mlir/Dialect/Transform/IR/TransformDialect.td"
include "mlir/Dialect/Transform/IR/TransformInterfaces.td"
include "mlir/Dialect/Transform/IR/TransformTypes.td"
-include "mlir/Interfaces/SideEffectInterfaces.td"
+include "mlir/Interfaces/SideEffectInterfaces.td"
//===----------------------------------------------------------------------===//
// ApplyOptimizeSharedMemoryReadsAndWritesOp
//===----------------------------------------------------------------------===//
@@ -28,7 +28,9 @@ def ApplyOptimizeSharedMemoryReadsAndWritesOp :
reads/writes with the goal of avoiding bank conflicts.
}];
- let arguments = (ins TransformHandleTypeInterface:$target);
+ let arguments = (ins TransformHandleTypeInterface:$target,
+ DefaultValuedOptionalAttr<I64Attr, "128">:$sharedMemoryLineSizeBytes,
+ DefaultValuedOptionalAttr<I64Attr, "128">:$defaultVectorSizeBits);
let results = (outs);
let assemblyFormat = "$target attr-dict `:` functional-type(operands, results)";
diff --git a/mlir/include/mlir/Dialect/AMDGPU/Transforms/Passes.td b/mlir/include/mlir/Dialect/AMDGPU/Transforms/Passes.td
index c8059e6d316e..67f951fd19d1 100644
--- a/mlir/include/mlir/Dialect/AMDGPU/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/AMDGPU/Transforms/Passes.td
@@ -37,10 +37,17 @@ def OptimizeSharedMemory : Pass<"amdgpu-optimize-shared-memory"> {
attempts to optimize reads/writes from a memref representing GPU shared
memory in order to avoid bank conflicts.
}];
-
let dependentDialects = [
"memref::MemRefDialect", "vector::VectorDialect"
];
+ let options = [
+ Option<"sharedMemoryLineSizeBytes", "shared-memory-line-size-bytes", "int64_t",
+ /*default=*/"128",
+ "Shared memory line size in bytes">,
+ Option<"defaultVectorSizeBits", "default-vector-size-bits", "int64_t",
+ /*default=*/"128",
+ "Default vector size in bits">,
+ ];
}
#endif // MLIR_DIALECT_AMDGPU_TRANSFORMS_PASSES_TD_
diff --git a/mlir/include/mlir/Dialect/AMDGPU/Transforms/Transforms.h b/mlir/include/mlir/Dialect/AMDGPU/Transforms/Transforms.h
index 79f9ab71a2b4..843cea2c503b 100644
--- a/mlir/include/mlir/Dialect/AMDGPU/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/AMDGPU/Transforms/Transforms.h
@@ -45,11 +45,15 @@ namespace amdgpu {
/// function that depends on the row Index. The permutation function is chosen
/// to ensure that sequential distributed+vectorized reads/writes down a single
/// dimension of the memref have minimal conflicts.
-LogicalResult optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
- Value memrefValue);
+LogicalResult
+optimizeSharedMemoryReadsAndWrites(Operation *parentOp, Value memrefValue,
+ int64_t sharedMemoryLineSizeBytes,
+ int64_t defaultVectorSizeBits);
std::optional<LogicalResult>
-optimizeSharedMemoryReadsAndWritesOp(func::FuncOp funcOp);
+optimizeSharedMemoryReadsAndWritesOp(func::FuncOp funcOp,
+ int64_t sharedMemoryLineSizeBytes,
+ int64_t defaultVectorSizeBits);
} // namespace amdgpu
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h b/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h
index 92f3d5a2c492..1f64b57cac57 100644
--- a/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h
+++ b/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h
@@ -60,6 +60,26 @@ uint64_t getLargestDivisorOfTripCount(AffineForOp forOp);
DenseSet<Value, DenseMapInfo<Value>>
getInvariantAccesses(Value iv, ArrayRef<Value> indices);
+/// Given:
+/// 1. an induction variable `iv` of type AffineForOp;
+/// 2. a `memoryOp` of type const LoadOp& or const StoreOp&;
+/// determines whether `memoryOp` has a contiguous access along `iv`. Contiguous
+/// is defined as either invariant or varying only along a unique MemRef dim.
+/// Upon success, the unique MemRef dim is written in `memRefDim` (or -1 to
+/// convey the memRef access is invariant along `iv`).
+///
+/// Prerequisites:
+/// 1. `memRefDim` ~= nullptr;
+/// 2. `iv` of the proper type;
+/// 3. the MemRef accessed by `memoryOp` has no layout map or at most an
+/// identity layout map.
+///
+/// Currently only supports no layout map or identity layout map in the memref.
+/// Returns false if the memref has a non-identity layoutMap. This behavior is
+/// conservative.
+template <typename LoadOrStoreOp>
+bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp, int *memRefDim);
+
using VectorizableLoopFun = std::function<bool(AffineForOp)>;
/// Checks whether the loop is structurally vectorizable; i.e.:
diff --git a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td
index c50fdf397a0f..5679742bfa16 100644
--- a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td
+++ b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td
@@ -31,6 +31,14 @@ include "mlir/IR/RegionKindInterface.td"
class EmitC_Op<string mnemonic, list<Trait> traits = []>
: Op<EmitC_Dialect, mnemonic, traits>;
+// Base class for unary operations.
+class EmitC_UnaryOp<string mnemonic, list<Trait> traits = []> :
+ EmitC_Op<mnemonic, traits> {
+ let arguments = (ins AnyType);
+ let results = (outs AnyType);
+ let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)";
+}
+
// Base class for binary operations.
class EmitC_BinaryOp<string mnemonic, list<Trait> traits = []> :
EmitC_Op<mnemonic, traits> {
@@ -95,6 +103,114 @@ def EmitC_ApplyOp : EmitC_Op<"apply", []> {
let hasVerifier = 1;
}
+def EmitC_BitwiseAndOp : EmitC_BinaryOp<"bitwise_and", []> {
+ let summary = "Bitwise and operation";
+ let description = [{
+ With the `bitwise_and` operation the bitwise operator & (and) can
+ be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.bitwise_and %arg0, %arg1 : (i32, i32) -> i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ int32_t v3 = v1 & v2;
+ ```
+ }];
+}
+
+def EmitC_BitwiseLeftShiftOp : EmitC_BinaryOp<"bitwise_left_shift", []> {
+ let summary = "Bitwise left shift operation";
+ let description = [{
+ With the `bitwise_left_shift` operation the bitwise operator <<
+ (left shift) can be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.bitwise_left_shift %arg0, %arg1 : (i32, i32) -> i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ int32_t v3 = v1 << v2;
+ ```
+ }];
+}
+
+def EmitC_BitwiseNotOp : EmitC_UnaryOp<"bitwise_not", []> {
+ let summary = "Bitwise not operation";
+ let description = [{
+ With the `bitwise_not` operation the bitwise operator ~ (not) can
+ be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.bitwise_not %arg0 : (i32) -> i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ int32_t v2 = ~v1;
+ ```
+ }];
+}
+
+def EmitC_BitwiseOrOp : EmitC_BinaryOp<"bitwise_or", []> {
+ let summary = "Bitwise or operation";
+ let description = [{
+ With the `bitwise_or` operation the bitwise operator | (or)
+ can be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.bitwise_or %arg0, %arg1 : (i32, i32) -> i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ int32_t v3 = v1 | v2;
+ ```
+ }];
+}
+
+def EmitC_BitwiseRightShiftOp : EmitC_BinaryOp<"bitwise_right_shift", []> {
+ let summary = "Bitwise right shift operation";
+ let description = [{
+ With the `bitwise_right_shift` operation the bitwise operator >>
+ (right shift) can be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.bitwise_right_shift %arg0, %arg1 : (i32, i32) -> i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ int32_t v3 = v1 >> v2;
+ ```
+ }];
+}
+
+def EmitC_BitwiseXorOp : EmitC_BinaryOp<"bitwise_xor", []> {
+ let summary = "Bitwise xor operation";
+ let description = [{
+ With the `bitwise_xor` operation the bitwise operator ^ (xor)
+ can be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.bitwise_xor %arg0, %arg1 : (i32, i32) -> i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ int32_t v3 = v1 ^ v2;
+ ```
+ }];
+}
+
def EmitC_CallOpaqueOp : EmitC_Op<"call_opaque", []> {
let summary = "Opaque call operation";
let description = [{
@@ -658,6 +774,69 @@ def EmitC_LiteralOp : EmitC_Op<"literal", [Pure]> {
let assemblyFormat = "$value attr-dict `:` type($result)";
}
+def EmitC_LogicalAndOp : EmitC_BinaryOp<"logical_and", []> {
+ let summary = "Logical and operation";
+ let description = [{
+ With the `logical_and` operation the logical operator && (and) can
+ be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.logical_and %arg0, %arg1 : i32, i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ bool v3 = v1 && v2;
+ ```
+ }];
+
+ let results = (outs I1);
+ let assemblyFormat = "operands attr-dict `:` type(operands)";
+}
+
+def EmitC_LogicalNotOp : EmitC_UnaryOp<"logical_not", []> {
+ let summary = "Logical not operation";
+ let description = [{
+ With the `logical_not` operation the logical operator ! (negation) can
+ be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.logical_not %arg0 : i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ bool v2 = !v1;
+ ```
+ }];
+
+ let results = (outs I1);
+ let assemblyFormat = "operands attr-dict `:` type(operands)";
+}
+
+def EmitC_LogicalOrOp : EmitC_BinaryOp<"logical_or", []> {
+ let summary = "Logical or operation";
+ let description = [{
+ With the `logical_or` operation the logical operator || (inclusive or)
+ can be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.logical_or %arg0, %arg1 : i32, i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ bool v3 = v1 || v2;
+ ```
+ }];
+
+ let results = (outs I1);
+ let assemblyFormat = "operands attr-dict `:` type(operands)";
+}
+
def EmitC_MulOp : EmitC_BinaryOp<"mul", []> {
let summary = "Multiplication operation";
let description = [{
diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
index 955dd1e20d24..bb373afa40ad 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
@@ -24,6 +24,7 @@ include "mlir/IR/EnumAttr.td"
include "mlir/IR/SymbolInterfaces.td"
include "mlir/Interfaces/ControlFlowInterfaces.td"
include "mlir/Interfaces/DataLayoutInterfaces.td"
+include "mlir/IR/OpAsmInterface.td"
include "mlir/Interfaces/FunctionInterfaces.td"
include "mlir/Interfaces/InferIntRangeInterface.td"
include "mlir/Interfaces/InferTypeOpInterface.td"
@@ -50,9 +51,21 @@ def GPU_DimensionAttr : EnumAttr<GPU_Dialect, GPU_Dimension, "dim">;
class GPU_IndexOp<string mnemonic, list<Trait> traits = []> :
GPU_Op<mnemonic, !listconcat(traits, [
- Pure, DeclareOpInterfaceMethods<InferIntRangeInterface>])>,
+ Pure,
+ DeclareOpInterfaceMethods<InferIntRangeInterface>,
+ DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>])>,
Arguments<(ins GPU_DimensionAttr:$dimension)>, Results<(outs Index)> {
let assemblyFormat = "$dimension attr-dict";
+ let extraClassDefinition = [{
+ void $cppClass::getAsmResultNames(
+ llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) {
+ auto dimStr = stringifyDimension(getDimensionAttr().getValue());
+ auto opName = getOperationName();
+ opName.consume_front("gpu.");
+ SmallString<8> resultName({opName, "_", dimStr});
+ setNameFn(getResult(),resultName);
+ }
+ }];
}
def GPU_ClusterDimOp : GPU_IndexOp<"cluster_dim"> {
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 309573a56287..53ed31877c6f 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -2296,6 +2296,49 @@ def ConvertConv2DToImg2ColOp : Op<Transform_Dialect,
}
//===----------------------------------------------------------------------===//
+// FlattenElementwiseLinalgOp
+//===----------------------------------------------------------------------===//
+
+def FlattenElementwiseLinalgOp : Op<Transform_Dialect,
+ "structured.flatten_elementwise",
+ [FunctionalStyleTransformOpTrait,
+ MemoryEffectsOpInterface,
+ TransformOpInterface,
+ TransformEachOpTrait,
+ ReportTrackingListenerFailuresOpTrait]> {
+ let description = [{
+ Flattens the iteration space and (applicable) operands of elementwise
+ linalg ops to a single dimension.
+
+ Returns one handle:
+ - Flattened linalg operation.
+
+ #### Return modes:
+
+ Returns a definite failure if target is not isolated from above.
+ Returns a silenceable failure if the pattern application failed.
+ }];
+
+ let arguments = (ins TransformHandleTypeInterface:$target);
+ let results = (outs TransformHandleTypeInterface:$transformed);
+
+ let assemblyFormat =
+ "$target attr-dict `:` functional-type($target, results)";
+
+ let builders = [
+ OpBuilder<(ins "Value":$target)>
+ ];
+
+ let extraClassDeclaration = [{
+ ::mlir::DiagnosedSilenceableFailure applyToOne(
+ ::mlir::transform::TransformRewriter &rewriter,
+ ::mlir::linalg::LinalgOp target,
+ ::mlir::transform::ApplyToEachResultList &results,
+ ::mlir::transform::TransformState &state);
+ }];
+}
+
+//===----------------------------------------------------------------------===//
// Transpose Conv2D
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index a848d12fbbb5..65cf19e7a4fc 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -1074,6 +1074,11 @@ bool isDimSequencePreserved(AffineMap map, ReassociationIndicesRef dimSequence);
bool areDimSequencesPreserved(ArrayRef<AffineMap> maps,
ArrayRef<ReassociationIndices> dimSequences);
+struct CollapseResult {
+ SmallVector<Value> results;
+ LinalgOp collapsedOp;
+};
+
/// Collapses dimensions of linalg.generic/linalg.copy operation. A precondition
/// to calling this method is that for each list in `foldedIterationDim`, the
/// sequence of dimensions is contiguous in domains of all `indexing_maps` of
@@ -1081,9 +1086,8 @@ bool areDimSequencesPreserved(ArrayRef<AffineMap> maps,
/// When valid, the method also collapses the operands of the op. Returns
/// replacement values of the results of the original `linalgOp` by inserting
/// reshapes to get back values of compatible types.
-template <typename LinalgType>
-FailureOr<SmallVector<Value>>
-collapseOpIterationDims(LinalgType op,
+FailureOr<CollapseResult>
+collapseOpIterationDims(LinalgOp op,
ArrayRef<ReassociationIndices> foldedIterationDims,
RewriterBase &rewriter);
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index 3127cf1b1bcf..3a5447d29f86 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -257,9 +257,10 @@ def SparseTensor_ReinterpretMapOp : SparseTensor_Op<"reinterpret_map", [NoMemory
let hasVerifier = 1;
}
-def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions", [Pure]>,
+def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions",
+ [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
Arguments<(ins AnySparseTensor:$tensor, LevelAttr:$level)>,
- Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ Results<(outs AnyNon0RankedMemRef:$result)> {
let summary = "Extracts the `level`-th positions array of the `tensor`";
let description = [{
Returns the positions array of the tensor's storage at the given
@@ -283,9 +284,10 @@ def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions", [Pure]>,
let hasVerifier = 1;
}
-def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates", [Pure]>,
+def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates",
+ [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
Arguments<(ins AnySparseTensor:$tensor, LevelAttr:$level)>,
- Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ Results<(outs AnyNon0RankedMemRef:$result)> {
let summary = "Extracts the `level`-th coordinates array of the `tensor`";
let description = [{
Returns the coordinates array of the tensor's storage at the given
@@ -309,9 +311,10 @@ def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates", [Pure]>,
let hasVerifier = 1;
}
-def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer", [Pure]>,
+def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer",
+ [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
Arguments<(ins AnySparseTensor:$tensor)>,
- Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ Results<(outs AnyNon0RankedMemRef:$result)> {
let summary = "Extracts the linear coordinates array from a tensor";
let description = [{
Returns the linear coordinates array for a sparse tensor with
@@ -340,9 +343,10 @@ def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer", [
let hasVerifier = 1;
}
-def SparseTensor_ToValuesOp : SparseTensor_Op<"values", [Pure]>,
+def SparseTensor_ToValuesOp : SparseTensor_Op<"values",
+ [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
Arguments<(ins AnySparseTensor:$tensor)>,
- Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ Results<(outs AnyNon0RankedMemRef:$result)> {
let summary = "Extracts numerical values array from a tensor";
let description = [{
Returns the values array of the sparse storage format for the given
@@ -1453,4 +1457,26 @@ def SparseTensor_ForeachOp : SparseTensor_Op<"foreach",
let hasVerifier = 1;
}
+//===----------------------------------------------------------------------===//
+// Sparse Tensor Debugging Operations.
+//===----------------------------------------------------------------------===//
+
+def SparseTensor_PrintOp : SparseTensor_Op<"print">,
+ Arguments<(ins AnySparseTensor:$tensor)> {
+ string summary = "Prints a sparse tensor (for testing and debugging)";
+ string description = [{
+ Prints the individual components of a sparse tensors (the positions,
+ coordinates, and values components) to stdout for testing and debugging
+ purposes. This operation lowers to just a few primitives in a light-weight
+ runtime support to simplify supporting this operation on new platforms.
+
+ Example:
+
+ ```mlir
+ sparse_tensor.print %tensor : tensor<1024x1024xf64, #CSR>
+ ```
+ }];
+ let assemblyFormat = "$tensor attr-dict `:` type($tensor)";
+}
+
#endif // SPARSETENSOR_OPS
diff --git a/mlir/include/mlir/Dialect/Transform/Transforms/Passes.td b/mlir/include/mlir/Dialect/Transform/Transforms/Passes.td
index 1d6eb24156e3..86a2b3c21faf 100644
--- a/mlir/include/mlir/Dialect/Transform/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Transform/Transforms/Passes.td
@@ -66,7 +66,25 @@ def InterpreterPass : Pass<"transform-interpreter"> {
let description = [{
This pass runs the transform dialect interpreter and applies the named
sequence transformation specified by the provided name (defaults to
- `TransformDialect::kTransformEntryPointSymbolName` (i.e. `__transform_main`)).
+ `TransformDialect::kTransformEntryPointSymbolName`,
+ i.e. `__transform_main`).
+
+ Additional options can be used to narrow down the pass applicability for
+ debugging purposes:
+ * `debugPayloadRootTag` makes the transform script apply to the payload
+ operation that has a `transform.target_tag` string attribute with the
+ given value, rather than to the anchor operation of the pass.
+ * `debugBindTrailingArgs` allows one to bind values to trailing arguments
+ of the transform entry point as follows:
+ * arguments of `TransformHandleTypeInterface` type can be bound to all
+ payload operations with the name provided as a simple string;
+ * arguments of `TransformValueHandleTypeInterface` type can be bound to
+ a flattened list of results of all operations with the name provided
+ as a string prefixed with `^`;
+ * arguments of `TransformParamTypeInterface` type can be bound to
+ integer constants provided as `;`-separated list prefixed with `#`.
+ * `entryPoint` specifies the name of the transform symbol to serve as the
+ entry point.
}];
let dependentDialects = ["::mlir::transform::TransformDialect"];
let options = [
@@ -83,7 +101,9 @@ def InterpreterPass : Pass<"transform-interpreter"> {
"false",
"Disable expensive checks in the interpreter for a faster run.">,
Option<"entryPoint", "entry-point", "std::string",
- /*default=*/[{TransformDialect::kTransformEntryPointSymbolName.str()}],
+ /*default=*/[{
+ TransformDialect::kTransformEntryPointSymbolName.str()
+ }],
"Entry point of the pass pipeline.">,
];
}
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 6d50b0654bc5..06360bd10e52 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -2809,7 +2809,8 @@ def Vector_SplatOp : Vector_Op<"splat", [
// call to the function. For that, it might be useful to have a
// 'vector.scale.global' and a 'vector.scale.local' operation.
def VectorScaleOp : Vector_Op<"vscale",
- [Pure]> {
+ [Pure, DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>]
+> {
let summary = "Load vector scale size";
let description = [{
The `vscale` op returns the scale of the scalable vectors, a positive
@@ -2825,6 +2826,13 @@ def VectorScaleOp : Vector_Op<"vscale",
}];
let results = (outs Index:$res);
let assemblyFormat = "attr-dict";
+
+ let extraClassDefinition = [{
+ void $cppClass::getAsmResultNames(
+ ::llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) {
+ setNameFn(getResult(), "vscale");
+ }
+ }];
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h
index 2ce3bc3fc2e7..f8d22cfb22af 100644
--- a/mlir/include/mlir/IR/PatternMatch.h
+++ b/mlir/include/mlir/IR/PatternMatch.h
@@ -579,7 +579,7 @@ public:
/// Split the operations starting at "before" (inclusive) out of the given
/// block into a new block, and return it.
- virtual Block *splitBlock(Block *block, Block::iterator before);
+ Block *splitBlock(Block *block, Block::iterator before);
/// Unlink this operation from its current block and insert it right before
/// `existingOp` which may be in the same or another block in the same
diff --git a/mlir/include/mlir/InitAllPasses.h b/mlir/include/mlir/InitAllPasses.h
index e28921619fe5..5d90c197a6cc 100644
--- a/mlir/include/mlir/InitAllPasses.h
+++ b/mlir/include/mlir/InitAllPasses.h
@@ -14,6 +14,7 @@
#ifndef MLIR_INITALLPASSES_H_
#define MLIR_INITALLPASSES_H_
+#include "mlir/Config/mlir-config.h"
#include "mlir/Conversion/Passes.h"
#include "mlir/Dialect/AMDGPU/Transforms/Passes.h"
#include "mlir/Dialect/Affine/Passes.h"
@@ -96,7 +97,7 @@ inline void registerAllPasses() {
bufferization::registerBufferizationPipelines();
sparse_tensor::registerSparseTensorPipelines();
tosa::registerTosaToLinalgPipelines();
-#if MLIR_CUDA_CONVERSIONS_ENABLED
+#if MLIR_ENABLE_CUDA_CONVERSIONS
gpu::registerGPUToNVVMPipeline();
#endif
}
diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h
index 7e8e67a9d178..84396529eb7c 100644
--- a/mlir/include/mlir/Transforms/DialectConversion.h
+++ b/mlir/include/mlir/Transforms/DialectConversion.h
@@ -741,9 +741,6 @@ public:
/// implemented for dialect conversion.
void eraseBlock(Block *block) override;
- /// PatternRewriter hook for splitting a block into two parts.
- Block *splitBlock(Block *block, Block::iterator before) override;
-
/// PatternRewriter hook for inlining the ops of a block into another block.
void inlineBlockBefore(Block *source, Block *dest, Block::iterator before,
ValueRange argValues = std::nullopt) override;
diff --git a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
index e69f9c837ca1..10ccd5c97783 100644
--- a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
+++ b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
@@ -14,6 +14,7 @@
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
+#include "mlir/Dialect/Affine/Transforms/Transforms.h"
#include "mlir/Dialect/Affine/Utils.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
@@ -558,6 +559,7 @@ class LowerAffinePass
RewritePatternSet patterns(&getContext());
populateAffineToStdConversionPatterns(patterns);
populateAffineToVectorConversionPatterns(patterns);
+ populateAffineExpandIndexOpsPatterns(patterns);
ConversionTarget target(getContext());
target.addLegalDialect<arith::ArithDialect, memref::MemRefDialect,
scf::SCFDialect, VectorDialect>();
diff --git a/mlir/lib/Conversion/AffineToStandard/CMakeLists.txt b/mlir/lib/Conversion/AffineToStandard/CMakeLists.txt
index 2ba0f30b1190..f41e3ca27ee4 100644
--- a/mlir/lib/Conversion/AffineToStandard/CMakeLists.txt
+++ b/mlir/lib/Conversion/AffineToStandard/CMakeLists.txt
@@ -12,12 +12,13 @@ add_mlir_conversion_library(MLIRAffineToStandard
LINK_LIBS PUBLIC
MLIRAffineDialect
+ MLIRAffineTransforms
MLIRAffineUtils
MLIRArithDialect
MLIRIR
MLIRMemRefDialect
- MLIRSCFDialect
MLIRPass
+ MLIRSCFDialect
MLIRTransforms
MLIRVectorDialect
)
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 19cc914efae0..337f8bb6ab99 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -1532,7 +1532,8 @@ public:
auto punct = printOp.getPunctuation();
if (auto stringLiteral = printOp.getStringLiteral()) {
LLVM::createPrintStrCall(rewriter, loc, parent, "vector_print_str",
- *stringLiteral, *getTypeConverter());
+ *stringLiteral, *getTypeConverter(),
+ /*addNewline=*/false);
} else if (punct != PrintPunctuation::NoPunctuation) {
emitCall(rewriter, printOp->getLoc(), [&] {
switch (punct) {
diff --git a/mlir/lib/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp b/mlir/lib/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp
index ff29f9f69385..b7e17a928973 100644
--- a/mlir/lib/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp
+++ b/mlir/lib/Dialect/AMDGPU/TransformOps/AMDGPUTransformOps.cpp
@@ -27,7 +27,8 @@ DiagnosedSilenceableFailure
ApplyOptimizeSharedMemoryReadsAndWritesOp::applyToOne(
TransformRewriter &rewriter, FuncOp funcOp, ApplyToEachResultList &results,
TransformState &state) {
- optimizeSharedMemoryReadsAndWritesOp(funcOp);
+ optimizeSharedMemoryReadsAndWritesOp(funcOp, getSharedMemoryLineSizeBytes(),
+ getDefaultVectorSizeBits());
return DiagnosedSilenceableFailure::success();
}
diff --git a/mlir/lib/Dialect/AMDGPU/Transforms/OptimizeSharedMemory.cpp b/mlir/lib/Dialect/AMDGPU/Transforms/OptimizeSharedMemory.cpp
index 6bd03ed83389..32fab265e03c 100644
--- a/mlir/lib/Dialect/AMDGPU/Transforms/OptimizeSharedMemory.cpp
+++ b/mlir/lib/Dialect/AMDGPU/Transforms/OptimizeSharedMemory.cpp
@@ -35,13 +35,6 @@ namespace amdgpu {
using namespace mlir;
using namespace mlir::amdgpu;
-/// The size of a shared memory line according to AMD documentation.
-/// https://www.amd.com/content/dam/amd/en/documents/instinct-tech-docs/instruction-set-architectures/instinct-mi200-cdna2-instruction-set-architecture.pdf
-constexpr int64_t kSharedMemoryLineSizeBytes = 64;
-/// We optimize for 64bit accesses, but this can be made an argument in the
-/// future.
-constexpr int64_t kDefaultVectorSizeBits = 64;
-
/// Uses `srcIndexValue` to permute `tgtIndexValue` via
/// `result = xor(floordiv(srcIdxVal,permuteEveryN),
/// floordiv(tgtIdxVal,vectorSize)))
@@ -49,7 +42,9 @@ constexpr int64_t kDefaultVectorSizeBits = 64;
/// This is done using an optimized sequence of `arith` operations.
static Value permuteVectorOffset(OpBuilder &b, Location loc,
ArrayRef<Value> indices, MemRefType memrefTy,
- int64_t srcDim, int64_t tgtDim) {
+ int64_t srcDim, int64_t tgtDim,
+ int64_t sharedMemoryLineSizeBytes,
+ int64_t defaultVectorSizeBits) {
// Adjust the src index to change how often the permutation changes
// if necessary.
Value src = indices[srcDim];
@@ -57,9 +52,9 @@ static Value permuteVectorOffset(OpBuilder &b, Location loc,
// We only want to permute every N iterations of the target dim where N is
// ceil(sharedMemoryLineSizeBytes / dimSizeBytes(tgtDim)).
const int64_t permuteEveryN = std::max<int64_t>(
- 1, kSharedMemoryLineSizeBytes / ((memrefTy.getDimSize(tgtDim) *
- memrefTy.getElementTypeBitWidth()) /
- 8));
+ 1, sharedMemoryLineSizeBytes / ((memrefTy.getDimSize(tgtDim) *
+ memrefTy.getElementTypeBitWidth()) /
+ 8));
// clang-format off
// Index bit representation (b0 = least significant bit) for dim(1)
@@ -71,7 +66,7 @@ static Value permuteVectorOffset(OpBuilder &b, Location loc,
// bits[N:M] = vector index
// clang-format on
int64_t n =
- llvm::Log2_64(kDefaultVectorSizeBits / memrefTy.getElementTypeBitWidth());
+ llvm::Log2_64(defaultVectorSizeBits / memrefTy.getElementTypeBitWidth());
int64_t m = llvm::Log2_64(memrefTy.getDimSize(tgtDim));
// Capture bits[0:(M-N)] of src by first creating a (M-N) mask.
@@ -105,9 +100,11 @@ static Value permuteVectorOffset(OpBuilder &b, Location loc,
static void transformIndices(OpBuilder &builder, Location loc,
SmallVector<Value, 4> &indices,
MemRefType memrefTy, int64_t srcDim,
- int64_t tgtDim) {
+ int64_t tgtDim, int64_t sharedMemoryLineSizeBytes,
+ int64_t defaultVectorSizeBits) {
indices[tgtDim] =
- permuteVectorOffset(builder, loc, indices, memrefTy, srcDim, tgtDim);
+ permuteVectorOffset(builder, loc, indices, memrefTy, srcDim, tgtDim,
+ sharedMemoryLineSizeBytes, defaultVectorSizeBits);
}
// Return all operations within `parentOp` that read from or write to
@@ -149,8 +146,9 @@ getShmReadAndWriteOps(Operation *parentOp, Value shmMemRef,
return success();
}
-LogicalResult amdgpu::optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
- Value memrefValue) {
+LogicalResult amdgpu::optimizeSharedMemoryReadsAndWrites(
+ Operation *parentOp, Value memrefValue, int64_t sharedMemoryLineSizeBytes,
+ int64_t defaultVectorSizeBits) {
auto memRefType = dyn_cast<MemRefType>(memrefValue.getType());
if (!memRefType ||
!amdgpu::AMDGPUDialect::hasSharedMemoryAddressSpace(memRefType))
@@ -167,10 +165,10 @@ LogicalResult amdgpu::optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
// If dim[rank-1] is small enough to fit 8 rows in a 128B line.
const int64_t rowSize = memRefType.getDimSize(memRefType.getRank() - 1);
const int64_t rowsPerLine =
- (8 * kSharedMemoryLineSizeBytes / memRefType.getElementTypeBitWidth()) /
+ (8 * sharedMemoryLineSizeBytes / memRefType.getElementTypeBitWidth()) /
rowSize;
const int64_t threadGroupSize =
- 1LL << (7 - llvm::Log2_64(kDefaultVectorSizeBits / 8));
+ 1LL << (7 - llvm::Log2_64(defaultVectorSizeBits / 8));
if (rowsPerLine >= threadGroupSize)
return failure();
@@ -198,7 +196,8 @@ LogicalResult amdgpu::optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
auto indices = amdgpu::getIndices(shmWriteOp);
SmallVector<Value, 4> transformedIndices(indices->begin(), indices->end());
transformIndices(builder, shmWriteOp->getLoc(), transformedIndices,
- memRefType, srcDim, tgtDim);
+ memRefType, srcDim, tgtDim, sharedMemoryLineSizeBytes,
+ defaultVectorSizeBits);
amdgpu::setIndices(shmWriteOp, transformedIndices);
}
@@ -210,7 +209,8 @@ LogicalResult amdgpu::optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
auto indices = amdgpu::getIndices(shmReadOp);
SmallVector<Value, 4> transformedIndices(indices->begin(), indices->end());
transformIndices(builder, shmReadOp->getLoc(), transformedIndices,
- memRefType, srcDim, tgtDim);
+ memRefType, srcDim, tgtDim, sharedMemoryLineSizeBytes,
+ defaultVectorSizeBits);
amdgpu::setIndices(shmReadOp, transformedIndices);
}
@@ -218,7 +218,9 @@ LogicalResult amdgpu::optimizeSharedMemoryReadsAndWrites(Operation *parentOp,
}
std::optional<LogicalResult>
-amdgpu::optimizeSharedMemoryReadsAndWritesOp(func::FuncOp funcOp) {
+amdgpu::optimizeSharedMemoryReadsAndWritesOp(func::FuncOp funcOp,
+ int64_t sharedMemoryLineSizeBytes,
+ int64_t defaultVectorSizeBits) {
SmallVector<memref::AllocOp> shmAllocOps;
funcOp.walk([&](memref::AllocOp allocOp) {
if (!amdgpu::AMDGPUDialect::hasSharedMemoryAddressSpace(allocOp.getType()))
@@ -226,8 +228,9 @@ amdgpu::optimizeSharedMemoryReadsAndWritesOp(func::FuncOp funcOp) {
shmAllocOps.push_back(allocOp);
});
for (auto allocOp : shmAllocOps) {
- if (failed(amdgpu::optimizeSharedMemoryReadsAndWrites(funcOp,
- allocOp.getMemref())))
+ if (failed(amdgpu::optimizeSharedMemoryReadsAndWrites(
+ funcOp, allocOp.getMemref(), sharedMemoryLineSizeBytes,
+ defaultVectorSizeBits)))
return failure();
}
return success();
@@ -237,7 +240,8 @@ struct OptimizeSharedMemoryPass
: public amdgpu::impl::OptimizeSharedMemoryBase<OptimizeSharedMemoryPass> {
public:
OptimizeSharedMemoryPass() = default;
-
+ OptimizeSharedMemoryPass(const OptimizeSharedMemoryOptions &options)
+ : OptimizeSharedMemoryBase(options) {}
void runOnOperation() override {
Operation *op = getOperation();
SmallVector<memref::AllocOp> shmAllocOps;
@@ -248,8 +252,9 @@ public:
shmAllocOps.push_back(allocOp);
});
for (auto allocOp : shmAllocOps) {
- if (failed(optimizeSharedMemoryReadsAndWrites(getOperation(),
- allocOp.getMemref())))
+ if (failed(optimizeSharedMemoryReadsAndWrites(op, allocOp.getMemref(),
+ sharedMemoryLineSizeBytes,
+ defaultVectorSizeBits)))
return;
}
}
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index e645afe7cd3e..fc0515ba95f4 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -195,43 +195,25 @@ DenseSet<Value> mlir::affine::getInvariantAccesses(Value iv,
return res;
}
-/// Given:
-/// 1. an induction variable `iv` of type AffineForOp;
-/// 2. a `memoryOp` of type const LoadOp& or const StoreOp&;
-/// determines whether `memoryOp` has a contiguous access along `iv`. Contiguous
-/// is defined as either invariant or varying only along a unique MemRef dim.
-/// Upon success, the unique MemRef dim is written in `memRefDim` (or -1 to
-/// convey the memRef access is invariant along `iv`).
-///
-/// Prerequisites:
-/// 1. `memRefDim` ~= nullptr;
-/// 2. `iv` of the proper type;
-/// 3. the MemRef accessed by `memoryOp` has no layout map or at most an
-/// identity layout map.
-///
-/// Currently only supports no layoutMap or identity layoutMap in the MemRef.
-/// Returns false if the MemRef has a non-identity layoutMap or more than 1
-/// layoutMap. This is conservative.
-///
-// TODO: check strides.
+// TODO: check access stride.
template <typename LoadOrStoreOp>
-static bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
- int *memRefDim) {
- static_assert(
- llvm::is_one_of<LoadOrStoreOp, AffineLoadOp, AffineStoreOp>::value,
- "Must be called on either LoadOp or StoreOp");
+bool mlir::affine::isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
+ int *memRefDim) {
+ static_assert(llvm::is_one_of<LoadOrStoreOp, AffineReadOpInterface,
+ AffineWriteOpInterface>::value,
+ "Must be called on either an affine read or write op");
assert(memRefDim && "memRefDim == nullptr");
auto memRefType = memoryOp.getMemRefType();
if (!memRefType.getLayout().isIdentity())
- return memoryOp.emitError("NYI: non-trivial layoutMap"), false;
+ return memoryOp.emitError("NYI: non-trivial layout map"), false;
int uniqueVaryingIndexAlongIv = -1;
auto accessMap = memoryOp.getAffineMap();
SmallVector<Value, 4> mapOperands(memoryOp.getMapOperands());
unsigned numDims = accessMap.getNumDims();
for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
- // Gather map operands used result expr 'i' in 'exprOperands'.
+ // Gather map operands used in result expr 'i' in 'exprOperands'.
SmallVector<Value, 4> exprOperands;
auto resultExpr = accessMap.getResult(i);
resultExpr.walk([&](AffineExpr expr) {
@@ -241,7 +223,7 @@ static bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
exprOperands.push_back(mapOperands[numDims + symExpr.getPosition()]);
});
// Check access invariance of each operand in 'exprOperands'.
- for (auto exprOperand : exprOperands) {
+ for (Value exprOperand : exprOperands) {
if (!isAccessIndexInvariant(iv, exprOperand)) {
if (uniqueVaryingIndexAlongIv != -1) {
// 2+ varying indices -> do not vectorize along iv.
@@ -259,6 +241,13 @@ static bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
return true;
}
+template bool mlir::affine::isContiguousAccess(Value iv,
+ AffineReadOpInterface loadOp,
+ int *memRefDim);
+template bool mlir::affine::isContiguousAccess(Value iv,
+ AffineWriteOpInterface loadOp,
+ int *memRefDim);
+
template <typename LoadOrStoreOp>
static bool isVectorElement(LoadOrStoreOp memoryOp) {
auto memRefType = memoryOp.getMemRefType();
@@ -344,10 +333,13 @@ bool mlir::affine::isVectorizableLoopBody(
auto load = dyn_cast<AffineLoadOp>(op);
auto store = dyn_cast<AffineStoreOp>(op);
int thisOpMemRefDim = -1;
- bool isContiguous = load ? isContiguousAccess(loop.getInductionVar(), load,
- &thisOpMemRefDim)
- : isContiguousAccess(loop.getInductionVar(), store,
- &thisOpMemRefDim);
+ bool isContiguous =
+ load ? isContiguousAccess(loop.getInductionVar(),
+ cast<AffineReadOpInterface>(*load),
+ &thisOpMemRefDim)
+ : isContiguousAccess(loop.getInductionVar(),
+ cast<AffineWriteOpInterface>(*store),
+ &thisOpMemRefDim);
if (thisOpMemRefDim != -1) {
// If memory accesses vary across different dimensions then the loop is
// not vectorizable.
diff --git a/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp b/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp
index 8deb8f028ba4..7f246daf99ff 100644
--- a/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp
+++ b/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp
@@ -261,68 +261,62 @@ struct BFloat16TruncFOpConverter : public OpRewritePattern<arith::TruncFOp> {
return rewriter.notifyMatchFailure(op, "not a trunc of f32 to bf16.");
}
- Type i1Ty = b.getI1Type();
Type i16Ty = b.getI16Type();
Type i32Ty = b.getI32Type();
Type f32Ty = b.getF32Type();
if (auto shapedTy = dyn_cast<ShapedType>(operandTy)) {
- i1Ty = shapedTy.clone(i1Ty);
i16Ty = shapedTy.clone(i16Ty);
i32Ty = shapedTy.clone(i32Ty);
f32Ty = shapedTy.clone(f32Ty);
}
- Value bitcast = b.create<arith::BitcastOp>(i32Ty, operand);
-
- Value c23 = createConst(op.getLoc(), i32Ty, 23, rewriter);
- Value c31 = createConst(op.getLoc(), i32Ty, 31, rewriter);
- Value c23Mask = createConst(op.getLoc(), i32Ty, (1 << 23) - 1, rewriter);
- Value expMask =
- createConst(op.getLoc(), i32Ty, ((1 << 8) - 1) << 23, rewriter);
- Value expMax =
- createConst(op.getLoc(), i32Ty, ((1 << 8) - 2) << 23, rewriter);
-
- // Grab the sign bit.
- Value sign = b.create<arith::ShRUIOp>(bitcast, c31);
-
- // Our mantissa rounding value depends on the sign bit and the last
- // truncated bit.
- Value cManRound = createConst(op.getLoc(), i32Ty, (1 << 15), rewriter);
- cManRound = b.create<arith::SubIOp>(cManRound, sign);
-
- // Grab out the mantissa and directly apply rounding.
- Value man = b.create<arith::AndIOp>(bitcast, c23Mask);
- Value manRound = b.create<arith::AddIOp>(man, cManRound);
-
- // Grab the overflow bit and shift right if we overflow.
- Value roundBit = b.create<arith::ShRUIOp>(manRound, c23);
- Value manNew = b.create<arith::ShRUIOp>(manRound, roundBit);
-
- // Grab the exponent and round using the mantissa's carry bit.
- Value exp = b.create<arith::AndIOp>(bitcast, expMask);
- Value expCarry = b.create<arith::AddIOp>(exp, manRound);
- expCarry = b.create<arith::AndIOp>(expCarry, expMask);
-
- // If the exponent is saturated, we keep the max value.
- Value expCmp =
- b.create<arith::CmpIOp>(arith::CmpIPredicate::uge, exp, expMax);
- exp = b.create<arith::SelectOp>(expCmp, exp, expCarry);
-
- // If the exponent is max and we rolled over, keep the old mantissa.
- Value roundBitBool = b.create<arith::TruncIOp>(i1Ty, roundBit);
- Value keepOldMan = b.create<arith::AndIOp>(expCmp, roundBitBool);
- man = b.create<arith::SelectOp>(keepOldMan, man, manNew);
-
- // Assemble the now rounded f32 value (as an i32).
- Value rounded = b.create<arith::ShLIOp>(sign, c31);
- rounded = b.create<arith::OrIOp>(rounded, exp);
- rounded = b.create<arith::OrIOp>(rounded, man);
-
+ // Algorithm borrowed from this excellent code:
+ // https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/c10/util/BFloat16.h#L60-L79
+ // There is a magic idea there, to let the addition of the rounding_bias to
+ // the mantissa simply overflow into the exponent bits. It's a bit of an
+ // aggressive, obfuscating optimization, but it is well-tested code, and it
+ // results in more concise and efficient IR.
+ // The case of NaN is handled separately (see isNaN and the final select).
+ // The case of infinities is NOT handled separately, which deserves an
+ // explanation. As the encoding of infinities has zero mantissa, the
+ // rounding-bias addition never carries into the exponent so that just gets
+ // truncated away, and as bfloat16 and float32 have the same number of
+ // exponent bits, that simple truncation is the desired outcome for
+ // infinities.
+ Value isNan =
+ b.create<arith::CmpFOp>(arith::CmpFPredicate::UNE, operand, operand);
+ // Constant used to make the rounding bias.
+ Value c7FFF = createConst(op.getLoc(), i32Ty, 0x7fff, rewriter);
+ // Constant used to generate a quiet NaN.
+ Value c7FC0_i16 = createConst(op.getLoc(), i16Ty, 0x7fc0, rewriter);
+ // Small constants used to address bits.
Value c16 = createConst(op.getLoc(), i32Ty, 16, rewriter);
- Value shr = b.create<arith::ShRUIOp>(rounded, c16);
- Value trunc = b.create<arith::TruncIOp>(i16Ty, shr);
- Value result = b.create<arith::BitcastOp>(resultTy, trunc);
-
+ Value c1 = createConst(op.getLoc(), i32Ty, 1, rewriter);
+ // Reinterpret the input f32 value as bits.
+ Value bitcast = b.create<arith::BitcastOp>(i32Ty, operand);
+ // Read bit 16 as a value in {0,1}.
+ Value bit16 =
+ b.create<arith::AndIOp>(b.create<arith::ShRUIOp>(bitcast, c16), c1);
+ // Determine the rounding bias to add as either 0x7fff or 0x8000 depending
+ // on bit 16, implementing the tie-breaking "to nearest even".
+ Value roundingBias = b.create<arith::AddIOp>(bit16, c7FFF);
+ // Add the rounding bias. Generally we want this to be added to the
+ // mantissa, but nothing prevents this to from carrying into the exponent
+ // bits, which would feel like a bug, but this is the magic trick here:
+ // when that happens, the mantissa gets reset to zero and the exponent
+ // gets incremented by the carry... which is actually exactly what we
+ // want.
+ Value biased = b.create<arith::AddIOp>(bitcast, roundingBias);
+ // Now that the rounding-bias has been added, truncating the low bits
+ // yields the correctly rounded result.
+ Value biasedAndShifted = b.create<arith::ShRUIOp>(biased, c16);
+ Value normalCaseResult_i16 =
+ b.create<arith::TruncIOp>(i16Ty, biasedAndShifted);
+ // Select either the above-computed result, or a quiet NaN constant
+ // if the input was NaN.
+ Value select =
+ b.create<arith::SelectOp>(isNan, c7FC0_i16, normalCaseResult_i16);
+ Value result = b.create<arith::BitcastOp>(resultTy, select);
rewriter.replaceOp(op, result);
return success();
}
diff --git a/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp b/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
index 935f0deaf9c8..db1974ddb377 100644
--- a/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
+++ b/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "mlir/Config/mlir-config.h"
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h"
#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h"
@@ -38,7 +39,7 @@
using namespace mlir;
-#if MLIR_CUDA_CONVERSIONS_ENABLED
+#if MLIR_ENABLE_CUDA_CONVERSIONS
namespace {
//===----------------------------------------------------------------------===//
@@ -127,4 +128,4 @@ void mlir::gpu::registerGPUToNVVMPipeline() {
buildLowerToNVVMPassPipeline);
}
-#endif // MLIR_CUDA_CONVERSIONS_ENABLED
+#endif // MLIR_ENABLE_CUDA_CONVERSIONS
diff --git a/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp b/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp
index 0527073da85b..f379ea819392 100644
--- a/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp
@@ -13,6 +13,7 @@
#include "mlir/Dialect/GPU/Transforms/Passes.h"
+#include "mlir/Config/mlir-config.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
@@ -48,7 +49,7 @@ void GpuModuleToBinaryPass::getDependentDialects(
// Register all GPU related translations.
registry.insert<gpu::GPUDialect>();
registry.insert<LLVM::LLVMDialect>();
-#if MLIR_CUDA_CONVERSIONS_ENABLED == 1
+#if MLIR_ENABLE_CUDA_CONVERSIONS
registry.insert<NVVM::NVVMDialect>();
#endif
#if MLIR_ROCM_CONVERSIONS_ENABLED == 1
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 299965bcfc3a..ef9cd5561665 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -3245,6 +3245,31 @@ DiagnosedSilenceableFailure transform::ConvertConv2DToImg2ColOp::applyToOne(
}
//===----------------------------------------------------------------------===//
+// FlattenElementwiseLinalgOp.
+//===----------------------------------------------------------------------===//
+
+DiagnosedSilenceableFailure transform::FlattenElementwiseLinalgOp::applyToOne(
+ transform::TransformRewriter &rewriter, linalg::LinalgOp target,
+ transform::ApplyToEachResultList &results,
+ transform::TransformState &state) {
+ rewriter.setInsertionPoint(target);
+ if (target.getNumLoops() <= 1)
+ return DiagnosedSilenceableFailure::success();
+ ReassociationIndices reassociation(target.getNumLoops());
+ std::iota(reassociation.begin(), reassociation.end(), 0);
+ auto maybeFlattened =
+ (isElementwise(target))
+ ? collapseOpIterationDims(target, reassociation, rewriter)
+ : FailureOr<CollapseResult>(rewriter.notifyMatchFailure(
+ target, "only elementwise flattening is supported"));
+ if (failed(maybeFlattened))
+ return emitDefaultSilenceableFailure(target);
+ results.push_back(maybeFlattened->collapsedOp);
+ rewriter.replaceOp(target, maybeFlattened->results);
+ return DiagnosedSilenceableFailure::success();
+}
+
+//===----------------------------------------------------------------------===//
// TransposeConv2DOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 4977940cfbd7..4797bfb2267d 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -1446,24 +1446,20 @@ void generateCollapsedIndexingRegion(Location loc, Block *block,
}
}
-template <typename LinalgType>
-Operation *createCollapsedOp(LinalgType op,
- const CollapsingInfo &collapsingInfo,
- RewriterBase &rewriter) {
- static_assert(llvm::is_one_of<LinalgType, GenericOp, CopyOp>::value,
- "unsupported linalg op type to create");
+void collapseOperandsAndResults(LinalgOp op,
+ const CollapsingInfo &collapsingInfo,
+ RewriterBase &rewriter,
+ SmallVectorImpl<Value> &inputOperands,
+ SmallVectorImpl<Value> &outputOperands,
+ SmallVectorImpl<Type> &resultTypes) {
Location loc = op->getLoc();
-
- // Get the input operands.
- SmallVector<Value> inputOperands =
+ inputOperands =
llvm::map_to_vector(op.getDpsInputOperands(), [&](OpOperand *opOperand) {
return getCollapsedOpOperand(loc, op, opOperand, collapsingInfo,
rewriter);
});
// Get the output operands and result types.
- SmallVector<Type> resultTypes;
- SmallVector<Value> outputOperands;
resultTypes.reserve(op.getNumDpsInits());
outputOperands.reserve(op.getNumDpsInits());
for (OpOperand &output : op.getDpsInitsMutable()) {
@@ -1475,41 +1471,69 @@ Operation *createCollapsedOp(LinalgType op,
if (!op.hasPureBufferSemantics())
resultTypes.push_back(newOutput.getType());
}
+}
- if (isa<linalg::CopyOp>(op)) {
- return rewriter.create<linalg::CopyOp>(loc, inputOperands[0],
- outputOperands[0]);
- }
+/// Clone a `LinalgOp` to a collapsed version of same name
+template <typename OpTy>
+OpTy cloneToCollapsedOp(RewriterBase &rewriter, OpTy origOp,
+ const CollapsingInfo &collapsingInfo) {
+ return nullptr;
+}
- // Get the iterator types for the operand.
- SmallVector<utils::IteratorType> iteratorTypes =
- getCollapsedOpIteratorTypes(op.getIteratorTypesArray(), collapsingInfo);
+/// Collapse any `LinalgOp` that does not require any specialization such as
+/// indexing_maps, iterator_types, etc.
+template <>
+LinalgOp cloneToCollapsedOp<LinalgOp>(RewriterBase &rewriter, LinalgOp origOp,
+ const CollapsingInfo &collapsingInfo) {
+ SmallVector<Value> inputOperands, outputOperands;
+ SmallVector<Type> resultTypes;
+ collapseOperandsAndResults(origOp, collapsingInfo, rewriter, inputOperands,
+ outputOperands, resultTypes);
+ return cast<LinalgOp>(clone(
+ rewriter, origOp, resultTypes,
+ llvm::to_vector(llvm::concat<Value>(inputOperands, outputOperands))));
+}
- // Get the indexing maps.
- auto indexingMaps =
- llvm::map_to_vector(op.getIndexingMapsArray(), [&](AffineMap map) {
+/// Collapse a `GenericOp`
+template <>
+GenericOp cloneToCollapsedOp<GenericOp>(RewriterBase &rewriter,
+ GenericOp origOp,
+ const CollapsingInfo &collapsingInfo) {
+ SmallVector<Value> inputOperands, outputOperands;
+ SmallVector<Type> resultTypes;
+ collapseOperandsAndResults(origOp, collapsingInfo, rewriter, inputOperands,
+ outputOperands, resultTypes);
+ SmallVector<AffineMap> indexingMaps(
+ llvm::map_range(origOp.getIndexingMapsArray(), [&](AffineMap map) {
return getCollapsedOpIndexingMap(map, collapsingInfo);
- });
+ }));
+
+ SmallVector<utils::IteratorType> iteratorTypes(getCollapsedOpIteratorTypes(
+ origOp.getIteratorTypesArray(), collapsingInfo));
- Operation *collapsedOp = rewriter.create<linalg::GenericOp>(
- loc, resultTypes, inputOperands, outputOperands, indexingMaps,
+ GenericOp collapsedOp = rewriter.create<linalg::GenericOp>(
+ origOp.getLoc(), resultTypes, inputOperands, outputOperands, indexingMaps,
iteratorTypes, [](OpBuilder &builder, Location loc, ValueRange args) {});
- Block *origOpBlock = &op->getRegion(0).front();
+ Block *origOpBlock = &origOp->getRegion(0).front();
Block *collapsedOpBlock = &collapsedOp->getRegion(0).front();
rewriter.mergeBlocks(origOpBlock, collapsedOpBlock,
collapsedOpBlock->getArguments());
-
return collapsedOp;
}
+LinalgOp createCollapsedOp(LinalgOp op, const CollapsingInfo &collapsingInfo,
+ RewriterBase &rewriter) {
+ if (GenericOp genericOp = dyn_cast<GenericOp>(op.getOperation())) {
+ return cloneToCollapsedOp(rewriter, genericOp, collapsingInfo);
+ } else {
+ return cloneToCollapsedOp(rewriter, op, collapsingInfo);
+ }
+}
+
/// Implementation of fusion with reshape operation by collapsing dimensions.
-template <typename LinalgType>
-FailureOr<SmallVector<Value>> mlir::linalg::collapseOpIterationDims(
- LinalgType op, ArrayRef<ReassociationIndices> foldedIterationDims,
+FailureOr<CollapseResult> mlir::linalg::collapseOpIterationDims(
+ LinalgOp op, ArrayRef<ReassociationIndices> foldedIterationDims,
RewriterBase &rewriter) {
- static_assert(llvm::is_one_of<LinalgType, GenericOp, CopyOp>::value,
- "unsupported linalg op type to collapse");
-
// Bail on trivial no-op cases.
if (op.getNumLoops() <= 1 || foldedIterationDims.empty() ||
llvm::all_of(foldedIterationDims, [](ReassociationIndicesRef foldedDims) {
@@ -1538,8 +1562,7 @@ FailureOr<SmallVector<Value>> mlir::linalg::collapseOpIterationDims(
}
// Bail on non-canonical ranges.
- SmallVector<Range> loopRanges =
- cast<LinalgOp>(op.getOperation()).createLoopRanges(rewriter, op.getLoc());
+ SmallVector<Range> loopRanges = op.createLoopRanges(rewriter, op.getLoc());
auto opFoldIsConstantValue = [](OpFoldResult ofr, int64_t value) {
if (auto attr = llvm::dyn_cast_if_present<Attribute>(ofr))
return cast<IntegerAttr>(attr).getInt() == value;
@@ -1555,8 +1578,7 @@ FailureOr<SmallVector<Value>> mlir::linalg::collapseOpIterationDims(
op, "expected all loop ranges to have zero start and unit stride");
}
- LinalgType collapsedOp = cast<LinalgType>(
- createCollapsedOp<LinalgType>(op, collapsingInfo, rewriter));
+ LinalgOp collapsedOp = createCollapsedOp(op, collapsingInfo, rewriter);
Location loc = op->getLoc();
if (collapsedOp.hasIndexSemantics()) {
@@ -1597,7 +1619,7 @@ FailureOr<SmallVector<Value>> mlir::linalg::collapseOpIterationDims(
results.push_back(collapsedOpResult);
}
}
- return results;
+ return CollapseResult{results, collapsedOp};
}
namespace {
@@ -1629,15 +1651,14 @@ public:
continue;
}
- std::optional<SmallVector<Value>> replacements =
- collapseOpIterationDims<linalg::GenericOp>(
- genericOp, collapsableIterationDims, rewriter);
- if (!replacements) {
+ std::optional<CollapseResult> collapseResult = collapseOpIterationDims(
+ genericOp, collapsableIterationDims, rewriter);
+ if (!collapseResult) {
return rewriter.notifyMatchFailure(
genericOp, "failed to do the fusion by collapsing transformation");
}
- rewriter.replaceOp(genericOp, *replacements);
+ rewriter.replaceOp(genericOp, collapseResult->results);
return success();
}
return failure();
@@ -1671,13 +1692,12 @@ public:
op, "specified dimensions cannot be collapsed");
}
- std::optional<SmallVector<Value>> replacements =
- collapseOpIterationDims<LinalgType>(op, collapsableIterationDims,
- rewriter);
- if (!replacements) {
+ std::optional<CollapseResult> collapseResult =
+ collapseOpIterationDims(op, collapsableIterationDims, rewriter);
+ if (!collapseResult) {
return rewriter.notifyMatchFailure(op, "failed to collapse dimensions");
}
- rewriter.replaceOp(op, *replacements);
+ rewriter.replaceOp(op, collapseResult->results);
return success();
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index ac043e87223d..1e703dacfd0c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -891,8 +891,7 @@ static bool isContiguousLoadIdx(LinalgOp &linalgOp, Value &val,
// Conservatively reject Ops that could lead to indices with stride other
// than 1.
- if (!isa<arith::AddIOp, arith::SubIOp, arith::ConstantOp, linalg::IndexOp>(
- ancestor))
+ if (!isa<arith::AddIOp, arith::ConstantOp, linalg::IndexOp>(ancestor))
return false;
bool result = false;
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index c2b471ab9618..8a6980e2c6a2 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -1957,7 +1957,10 @@ LogicalResult PrivateClauseOp::verify() {
Type symType = getType();
auto verifyTerminator = [&](Operation *terminator) -> LogicalResult {
- if (!terminator->hasSuccessors() && !llvm::isa<YieldOp>(terminator))
+ if (!terminator->getBlock()->getSuccessors().empty())
+ return success();
+
+ if (!llvm::isa<YieldOp>(terminator))
return mlir::emitError(terminator->getLoc())
<< "expected exit block terminator to be an `omp.yield` op.";
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 69c3413f35ea..232635ca84a4 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -1445,6 +1445,38 @@ OpFoldResult ReinterpretMapOp::fold(FoldAdaptor adaptor) {
return {};
}
+template <typename ToBufferOp>
+static LogicalResult inferSparseBufferType(ValueRange ops, DictionaryAttr attr,
+ OpaqueProperties prop,
+ RegionRange region,
+ SmallVectorImpl<mlir::Type> &ret) {
+ typename ToBufferOp::Adaptor adaptor(ops, attr, prop, region);
+ SparseTensorType stt = getSparseTensorType(adaptor.getTensor());
+ Type elemTp = nullptr;
+ bool withStride = false;
+ if constexpr (std::is_same_v<ToBufferOp, ToPositionsOp>) {
+ elemTp = stt.getPosType();
+ } else if constexpr (std::is_same_v<ToBufferOp, ToCoordinatesOp> ||
+ std::is_same_v<ToBufferOp, ToCoordinatesBufferOp>) {
+ elemTp = stt.getCrdType();
+ if constexpr (std::is_same_v<ToBufferOp, ToCoordinatesOp>)
+ withStride = stt.getAoSCOOStart() <= adaptor.getLevel();
+ } else if constexpr (std::is_same_v<ToBufferOp, ToValuesOp>) {
+ elemTp = stt.getElementType();
+ }
+
+ assert(elemTp && "unhandled operation.");
+ SmallVector<int64_t> bufShape = stt.getBatchLvlShape();
+ bufShape.push_back(ShapedType::kDynamic);
+
+ auto layout = withStride ? StridedLayoutAttr::StridedLayoutAttr::get(
+ stt.getContext(), ShapedType::kDynamic,
+ {ShapedType::kDynamic})
+ : StridedLayoutAttr();
+ ret.emplace_back(MemRefType::get(bufShape, elemTp, layout));
+ return success();
+}
+
LogicalResult ToPositionsOp::verify() {
auto stt = getSparseTensorType(getTensor());
if (failed(lvlIsInBounds(getLevel(), getTensor())))
@@ -1454,6 +1486,14 @@ LogicalResult ToPositionsOp::verify() {
return success();
}
+LogicalResult
+ToPositionsOp::inferReturnTypes(MLIRContext *ctx, std::optional<Location> loc,
+ ValueRange ops, DictionaryAttr attr,
+ OpaqueProperties prop, RegionRange region,
+ SmallVectorImpl<mlir::Type> &ret) {
+ return inferSparseBufferType<ToPositionsOp>(ops, attr, prop, region, ret);
+}
+
LogicalResult ToCoordinatesOp::verify() {
auto stt = getSparseTensorType(getTensor());
if (failed(lvlIsInBounds(getLevel(), getTensor())))
@@ -1463,6 +1503,14 @@ LogicalResult ToCoordinatesOp::verify() {
return success();
}
+LogicalResult
+ToCoordinatesOp::inferReturnTypes(MLIRContext *ctx, std::optional<Location> loc,
+ ValueRange ops, DictionaryAttr attr,
+ OpaqueProperties prop, RegionRange region,
+ SmallVectorImpl<mlir::Type> &ret) {
+ return inferSparseBufferType<ToCoordinatesOp>(ops, attr, prop, region, ret);
+}
+
LogicalResult ToCoordinatesBufferOp::verify() {
auto stt = getSparseTensorType(getTensor());
if (stt.getAoSCOOStart() >= stt.getLvlRank())
@@ -1470,6 +1518,14 @@ LogicalResult ToCoordinatesBufferOp::verify() {
return success();
}
+LogicalResult ToCoordinatesBufferOp::inferReturnTypes(
+ MLIRContext *ctx, std::optional<Location> loc, ValueRange ops,
+ DictionaryAttr attr, OpaqueProperties prop, RegionRange region,
+ SmallVectorImpl<mlir::Type> &ret) {
+ return inferSparseBufferType<ToCoordinatesBufferOp>(ops, attr, prop, region,
+ ret);
+}
+
LogicalResult ToValuesOp::verify() {
auto stt = getSparseTensorType(getTensor());
auto mtp = getMemRefType(getResult());
@@ -1478,6 +1534,15 @@ LogicalResult ToValuesOp::verify() {
return success();
}
+LogicalResult ToValuesOp::inferReturnTypes(MLIRContext *ctx,
+ std::optional<Location> loc,
+ ValueRange ops, DictionaryAttr attr,
+ OpaqueProperties prop,
+ RegionRange region,
+ SmallVectorImpl<mlir::Type> &ret) {
+ return inferSparseBufferType<ToValuesOp>(ops, attr, prop, region, ret);
+}
+
LogicalResult ToSliceOffsetOp::verify() {
auto rank = getRankedTensorType(getSlice()).getRank();
if (rank <= getDim().getSExtValue() || getDim().getSExtValue() < 0)
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
index cdee8a46f551..cb75f6a0ea88 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
@@ -496,11 +496,11 @@ static Value genFirstPosOrCrds(OpBuilder &builder, Location loc, Value a,
if (format == CuSparseFormat::kCOO) {
// Library uses SoA COO, direct IR uses AoS COO.
if (enableRT)
- return genToCoordinates(builder, loc, a, 0);
- return genToCoordinatesBuffer(builder, loc, a);
+ return builder.create<ToCoordinatesOp>(loc, a, 0);
+ return builder.create<ToCoordinatesBufferOp>(loc, a);
}
// Formats CSR/CSC and BSR use positions at 1.
- return genToPositions(builder, loc, a, 1);
+ return builder.create<ToPositionsOp>(loc, a, 1);
}
/// Generates the second coordinates of a sparse matrix.
@@ -510,7 +510,7 @@ static Value genSecondCrds(OpBuilder &builder, Location loc, Value a,
if (isCOO && !enableRT)
return Value(); // nothing needed
// Formats CSR/CSC and BSR use coordinates at 1.
- return genToCoordinates(builder, loc, a, 1);
+ return builder.create<ToCoordinatesOp>(loc, a, 1);
}
/// Generates the sparse matrix handle.
@@ -584,7 +584,7 @@ static LogicalResult rewriteSpMV(PatternRewriter &rewriter,
Value szX = linalg::createOrFoldDimOp(rewriter, loc, a, 1);
Value memR = genFirstPosOrCrds(rewriter, loc, a, format, enableRT);
Value memC = genSecondCrds(rewriter, loc, a, format, enableRT); // or empty
- Value memV = genToValues(rewriter, loc, a);
+ Value memV = rewriter.create<ToValuesOp>(loc, a);
Value rowA = genAllocCopy(rewriter, loc, memR, tokens);
Value colA = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value();
Value valA = genAllocCopy(rewriter, loc, memV, tokens);
@@ -682,7 +682,7 @@ static LogicalResult rewriteSpMM(PatternRewriter &rewriter,
Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1);
Value memR = genFirstPosOrCrds(rewriter, loc, a, format, enableRT);
Value memC = genSecondCrds(rewriter, loc, a, format, enableRT); // or empty
- Value memV = genToValues(rewriter, loc, a);
+ Value memV = rewriter.create<ToValuesOp>(loc, a);
Value rowA = genAllocCopy(rewriter, loc, memR, tokens);
Value colA = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value();
Value valA = genAllocCopy(rewriter, loc, memV, tokens);
@@ -785,10 +785,10 @@ static LogicalResult rewriteSpGEMM(PatternRewriter &rewriter,
Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1);
Value amemR = genFirstPosOrCrds(rewriter, loc, a, format, enableRT);
Value amemC = genSecondCrds(rewriter, loc, a, format, enableRT); // not empty
- Value amemV = genToValues(rewriter, loc, a);
+ Value amemV = rewriter.create<ToValuesOp>(loc, a);
Value bmemR = genFirstPosOrCrds(rewriter, loc, b, format, enableRT);
Value bmemC = genSecondCrds(rewriter, loc, b, format, enableRT); // not empty
- Value bmemV = genToValues(rewriter, loc, b);
+ Value bmemV = rewriter.create<ToValuesOp>(loc, b);
Value rowA = genAllocCopy(rewriter, loc, amemR, tokens);
Value colA = genAllocCopy(rewriter, loc, amemC, tokens);
Value valA = genAllocCopy(rewriter, loc, amemV, tokens);
@@ -1081,7 +1081,7 @@ static LogicalResult rewriteSDDMM(PatternRewriter &rewriter,
Value matB = genAllocCopy(rewriter, loc, bufB, tokens);
Value memR = genFirstPosOrCrds(rewriter, loc, c, format, enableRT);
Value memC = genSecondCrds(rewriter, loc, c, format, enableRT); // or empty
- Value memV = genToValues(rewriter, loc, c);
+ Value memV = rewriter.create<ToValuesOp>(loc, c);
Value rowC = genAllocCopy(rewriter, loc, memR, tokens);
Value colC = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value();
Value valC = genAllocCopy(rewriter, loc, memV, tokens);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index d5eec4ae67e7..4e3393195813 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -1058,17 +1058,9 @@ public:
// Replace the requested coordinates access with corresponding field.
// The cast_op is inserted by type converter to intermix 1:N type
// conversion.
- Location loc = op.getLoc();
auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
- Value field = desc.getCrdMemRefOrView(rewriter, loc, op.getLevel());
-
- // Insert a cast to bridge the actual type to the user expected type. If the
- // actual type and the user expected type aren't compatible, the compiler or
- // the runtime will issue an error.
- Type resType = op.getResult().getType();
- if (resType != field.getType())
- field = rewriter.create<memref::CastOp>(loc, resType, field);
- rewriter.replaceOp(op, field);
+ rewriter.replaceOp(
+ op, desc.getCrdMemRefOrView(rewriter, op.getLoc(), op.getLevel()));
return success();
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 1bcc131781d3..6ff21468e057 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -21,9 +21,11 @@
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
+#include "mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensorType.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Matchers.h"
#include "mlir/Support/LLVM.h"
@@ -598,6 +600,101 @@ public:
}
};
+/// Sparse rewriting rule for the print operator. This operation is mainly used
+/// for debugging and testing. As such, it lowers to the vector.print operation
+/// which only require very light-weight runtime support.
+struct PrintRewriter : public OpRewritePattern<PrintOp> {
+public:
+ using OpRewritePattern::OpRewritePattern;
+ LogicalResult matchAndRewrite(PrintOp op,
+ PatternRewriter &rewriter) const override {
+ Location loc = op.getLoc();
+ auto tensor = op.getTensor();
+ auto stt = getSparseTensorType(tensor);
+ // Header with NSE.
+ auto nse = rewriter.create<NumberOfEntriesOp>(loc, tensor);
+ rewriter.create<vector::PrintOp>(
+ loc, rewriter.getStringAttr("---- Sparse Tensor ----\nnse = "));
+ rewriter.create<vector::PrintOp>(loc, nse);
+ // Use the "codegen" foreach loop construct to iterate over
+ // all typical sparse tensor components for printing.
+ foreachFieldAndTypeInSparseTensor(stt, [&rewriter, &loc, &tensor,
+ &stt](Type, FieldIndex,
+ SparseTensorFieldKind kind,
+ Level l, LevelType) {
+ switch (kind) {
+ case SparseTensorFieldKind::StorageSpec: {
+ break;
+ }
+ case SparseTensorFieldKind::PosMemRef: {
+ auto lvl = constantIndex(rewriter, loc, l);
+ rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("pos["));
+ rewriter.create<vector::PrintOp>(
+ loc, lvl, vector::PrintPunctuation::NoPunctuation);
+ rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("] : "));
+ auto pos = rewriter.create<ToPositionsOp>(loc, tensor, l);
+ printContents(rewriter, loc, pos);
+ break;
+ }
+ case SparseTensorFieldKind::CrdMemRef: {
+ auto lvl = constantIndex(rewriter, loc, l);
+ rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("crd["));
+ rewriter.create<vector::PrintOp>(
+ loc, lvl, vector::PrintPunctuation::NoPunctuation);
+ rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("] : "));
+ Value crd = nullptr;
+ // TODO: eliminates ToCoordinateBufferOp!
+ if (stt.getAoSCOOStart() == l)
+ crd = rewriter.create<ToCoordinatesBufferOp>(loc, tensor);
+ else
+ crd = rewriter.create<ToCoordinatesOp>(loc, tensor, l);
+ printContents(rewriter, loc, crd);
+ break;
+ }
+ case SparseTensorFieldKind::ValMemRef: {
+ rewriter.create<vector::PrintOp>(loc,
+ rewriter.getStringAttr("values : "));
+ auto val = rewriter.create<ToValuesOp>(loc, tensor);
+ printContents(rewriter, loc, val);
+ break;
+ }
+ }
+ return true;
+ });
+ rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("----\n"));
+ rewriter.eraseOp(op);
+ return success();
+ }
+
+private:
+ // Helper to print contents of a single memref. Note that for the "push_back"
+ // vectors, this prints the full capacity, not just the size. This is done
+ // on purpose, so that clients see how much storage has been allocated in
+ // total. Contents of the extra capacity in the buffer may be uninitialized
+ // (unless the flag enable-buffer-initialization is set to true).
+ //
+ // Generates code to print:
+ // ( a0, a1, ... )
+ static void printContents(PatternRewriter &rewriter, Location loc,
+ Value vec) {
+ // Open bracket.
+ rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Open);
+ // For loop over elements.
+ auto zero = constantIndex(rewriter, loc, 0);
+ auto size = rewriter.create<memref::DimOp>(loc, vec, zero);
+ auto step = constantIndex(rewriter, loc, 1);
+ auto forOp = rewriter.create<scf::ForOp>(loc, zero, size, step);
+ rewriter.setInsertionPointToStart(forOp.getBody());
+ auto idx = forOp.getInductionVar();
+ auto val = rewriter.create<memref::LoadOp>(loc, vec, idx);
+ rewriter.create<vector::PrintOp>(loc, val, vector::PrintPunctuation::Comma);
+ rewriter.setInsertionPointAfter(forOp);
+ // Close bracket and end of line.
+ rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Close);
+ rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::NewLine);
+ }
+};
+
/// Sparse rewriting rule for sparse-to-sparse reshape operator.
struct TensorReshapeRewriter : public OpRewritePattern<tensor::ReshapeOp> {
public:
@@ -1284,7 +1381,8 @@ struct OutRewriter : public OpRewritePattern<OutOp> {
void mlir::populatePreSparsificationRewriting(RewritePatternSet &patterns) {
patterns.add<FoldInvariantYield, FuseSparseMultiplyOverAdd, FuseTensorCast,
- GenSemiRingReduction, GenSemiRingSelect>(patterns.getContext());
+ GenSemiRingReduction, GenSemiRingSelect, PrintRewriter>(
+ patterns.getContext());
}
void mlir::populateLowerSparseOpsToForeachPatterns(RewritePatternSet &patterns,
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp
index b888dfadb9c7..fa570159ba41 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp
@@ -554,41 +554,6 @@ sparse_tensor::genToMemref(OpBuilder &builder, Location loc, Value tensor) {
.getResult();
}
-Value sparse_tensor::genToPositions(OpBuilder &builder, Location loc,
- Value tensor, Level lvl) {
- const auto srcTp = getSparseTensorType(tensor);
- const Type posTp = srcTp.getPosType();
- const Type memTp = get1DMemRefType(posTp, /*withLayout=*/false);
- return builder.create<ToPositionsOp>(loc, memTp, tensor,
- builder.getIndexAttr(lvl));
-}
-
-Value sparse_tensor::genToCoordinates(OpBuilder &builder, Location loc,
- Value tensor, Level lvl) {
- const auto srcTp = getSparseTensorType(tensor);
- const Type crdTp = srcTp.getCrdType();
- const Type memTp =
- get1DMemRefType(crdTp, /*withLayout=*/lvl >= srcTp.getAoSCOOStart());
- return builder.create<ToCoordinatesOp>(loc, memTp, tensor,
- builder.getIndexAttr(lvl));
-}
-
-Value sparse_tensor::genToCoordinatesBuffer(OpBuilder &builder, Location loc,
- Value tensor) {
- const auto srcTp = getSparseTensorType(tensor);
- const Type crdTp = srcTp.getCrdType();
- const Type memTp = get1DMemRefType(crdTp, /*withLayout=*/false);
- return builder.create<ToCoordinatesBufferOp>(loc, memTp, tensor);
-}
-
-Value sparse_tensor::genToValues(OpBuilder &builder, Location loc,
- Value tensor) {
- RankedTensorType srcTp = getRankedTensorType(tensor);
- Type valTp = get1DMemRefType(srcTp.getElementType(),
- /*withLayout=*/false);
- return builder.create<ToValuesOp>(loc, valTp, tensor);
-}
-
Value sparse_tensor::genValMemSize(OpBuilder &builder, Location loc,
Value tensor) {
return getDescriptorFromTensorTuple(tensor).getValMemSize(builder, loc);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
index cc119bc70455..e8f6bd1c5eae 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
@@ -228,17 +228,6 @@ void deallocDenseTensor(OpBuilder &builder, Location loc, Value buffer);
void sizesFromSrc(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, Value src);
-/// Generates a 1D MemRefType with a dynamic size. When withLayout is set, the
-/// returned memref has a layout has unknown strides and offsets. Otherwise,
-/// a memref with a standard unit stride zero offset layout is returned.
-inline MemRefType get1DMemRefType(Type etp, bool withLayout) {
- auto layout = withLayout ? StridedLayoutAttr::StridedLayoutAttr::get(
- etp.getContext(), ShapedType::kDynamic,
- {ShapedType::kDynamic})
- : StridedLayoutAttr();
- return MemRefType::get(ShapedType::kDynamic, etp, layout);
-}
-
/// Scans to top of generated loop.
Operation *getTop(Operation *op);
@@ -281,22 +270,6 @@ void storeAll(OpBuilder &builder, Location loc, Value mem, ValueRange vs,
TypedValue<BaseMemRefType> genToMemref(OpBuilder &builder, Location loc,
Value tensor);
-/// Infers the result type and generates `ToPositionsOp`.
-Value genToPositions(OpBuilder &builder, Location loc, Value tensor, Level lvl);
-
-/// Infers the result type and generates `ToCoordinatesOp`. If the
-/// level is within a COO region, the result type is a memref with unknown
-/// stride and offset. Otherwise, the result type is a memref without
-/// any specified layout.
-Value genToCoordinates(OpBuilder &builder, Location loc, Value tensor,
- Level lvl);
-
-/// Infers the result type and generates `ToCoordinatesBufferOp`.
-Value genToCoordinatesBuffer(OpBuilder &builder, Location loc, Value tensor);
-
-/// Infers the result type and generates `ToValuesOp`.
-Value genToValues(OpBuilder &builder, Location loc, Value tensor);
-
/// Generates code to retrieve the values size for the sparse tensor.
Value genValMemSize(OpBuilder &builder, Location loc, Value tensor);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
index 0ead135c90d3..812c288a20c2 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
@@ -259,7 +259,7 @@ void LoopEmitter::initializeLoopEmit(
// Annotated sparse tensors.
// We also need the value buffer for all-dense annotated "sparse"
// tensors.
- valBuffer[t] = genToValues(builder, loc, tensor);
+ valBuffer[t] = builder.create<ToValuesOp>(loc, tensor);
}
// NOTE: we can also prepare for 0 lvl here in advance, this will hoist
// some loop preparation from tensor iteration, but will also (undesirably)
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp
index 011d814cd900..8edacaa9981e 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp
@@ -1281,21 +1281,21 @@ sparse_tensor::makeSparseTensorLevel(OpBuilder &b, Location l, Value t,
case LevelFormat::Batch:
llvm_unreachable("not implemented");
case LevelFormat::Compressed: {
- Value pos = genToPositions(b, l, t, lvl);
- Value crd = genToCoordinates(b, l, t, lvl);
+ Value pos = b.create<ToPositionsOp>(l, t, lvl);
+ Value crd = b.create<ToCoordinatesOp>(l, t, lvl);
return std::make_unique<CompressedLevel>(tid, lvl, lt, sz, pos, crd);
}
case LevelFormat::LooseCompressed: {
- Value pos = genToPositions(b, l, t, lvl);
- Value crd = genToCoordinates(b, l, t, lvl);
+ Value pos = b.create<ToPositionsOp>(l, t, lvl);
+ Value crd = b.create<ToCoordinatesOp>(l, t, lvl);
return std::make_unique<LooseCompressedLevel>(tid, lvl, lt, sz, pos, crd);
}
case LevelFormat::Singleton: {
- Value crd = genToCoordinates(b, l, t, lvl);
+ Value crd = b.create<ToCoordinatesOp>(l, t, lvl);
return std::make_unique<SingletonLevel>(tid, lvl, lt, sz, crd);
}
case LevelFormat::NOutOfM: {
- Value crd = genToCoordinates(b, l, t, lvl);
+ Value crd = b.create<ToCoordinatesOp>(l, t, lvl);
return std::make_unique<NOutOfMLevel>(tid, lvl, lt, sz, crd);
}
case LevelFormat::Undef:
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index e6efec14e31a..fe2f250e6b92 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -4012,15 +4012,17 @@ static bool inferStaticShape(PackOp packOp, SmallVectorImpl<int64_t> &srcShape,
llvm::SmallSetVector<int64_t, 4> innerDims;
innerDims.insert(packOp.getInnerDimsPos().begin(),
packOp.getInnerDimsPos().end());
- auto outerDimsPerm = packOp.getOuterDimsPerm();
+ SmallVector<int64_t> inverseOuterDimsPerm;
+ if (!packOp.getOuterDimsPerm().empty())
+ inverseOuterDimsPerm = invertPermutationVector(packOp.getOuterDimsPerm());
int srcRank = packOp.getSourceRank();
for (auto i : llvm::seq<int64_t>(0, srcRank)) {
if (innerDims.contains(i))
continue;
int64_t srcPos = i;
int64_t destPos = i;
- if (!outerDimsPerm.empty())
- destPos = outerDimsPerm[srcPos];
+ if (!inverseOuterDimsPerm.empty())
+ destPos = inverseOuterDimsPerm[srcPos];
if (ShapedType::isDynamic(srcShape[srcPos]) ==
ShapedType::isDynamic(destShape[destPos])) {
continue;
@@ -4240,15 +4242,17 @@ static bool inferStaticShape(UnPackOp op, SmallVectorImpl<int64_t> &srcShape,
op.getDestType().getShape().end());
llvm::SmallSetVector<int64_t, 4> innerDims;
innerDims.insert(op.getInnerDimsPos().begin(), op.getInnerDimsPos().end());
- auto outerDimsPerm = op.getOuterDimsPerm();
+ SmallVector<int64_t> inverseOuterDimsPerm;
+ if (!op.getOuterDimsPerm().empty())
+ inverseOuterDimsPerm = invertPermutationVector(op.getOuterDimsPerm());
int destRank = op.getDestRank();
for (auto i : llvm::seq<int64_t>(0, destRank)) {
if (innerDims.contains(i))
continue;
int64_t srcPos = i;
int64_t destPos = i;
- if (!outerDimsPerm.empty())
- srcPos = outerDimsPerm[destPos];
+ if (!inverseOuterDimsPerm.empty())
+ srcPos = inverseOuterDimsPerm[destPos];
if (ShapedType::isDynamic(srcShape[srcPos]) ==
ShapedType::isDynamic(destShape[destPos])) {
continue;
diff --git a/mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp b/mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp
index 5073234a7e35..7adf223f3440 100644
--- a/mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp
+++ b/mlir/lib/Dialect/Transform/Transforms/InterpreterPass.cpp
@@ -50,12 +50,79 @@ static Operation *findPayloadRoot(Operation *passRoot, StringRef tag) {
return WalkResult::interrupt();
});
+ if (!target) {
+ passRoot->emitError()
+ << "could not find the operation with transform.target_tag=\"" << tag
+ << "\" attribute";
+ return nullptr;
+ }
+
return walkResult.wasInterrupted() ? nullptr : target;
}
namespace {
class InterpreterPass
: public transform::impl::InterpreterPassBase<InterpreterPass> {
+ // Parses the pass arguments to bind trailing arguments of the entry point.
+ std::optional<RaggedArray<transform::MappedValue>>
+ parseArguments(Operation *payloadRoot) {
+ MLIRContext *context = payloadRoot->getContext();
+
+ SmallVector<SmallVector<transform::MappedValue>, 2> trailingBindings;
+ trailingBindings.resize(debugBindTrailingArgs.size());
+
+ // Construct lists of op names to match.
+ SmallVector<std::optional<OperationName>> debugBindNames;
+ debugBindNames.reserve(debugBindTrailingArgs.size());
+ for (auto &&[position, nameString] :
+ llvm::enumerate(debugBindTrailingArgs)) {
+ StringRef name = nameString;
+
+ // Parse the integer literals.
+ if (name.starts_with("#")) {
+ debugBindNames.push_back(std::nullopt);
+ StringRef lhs = "";
+ StringRef rhs = name.drop_front();
+ do {
+ std::tie(lhs, rhs) = rhs.split(';');
+ int64_t value;
+ if (lhs.getAsInteger(10, value)) {
+ emitError(UnknownLoc::get(context))
+ << "couldn't parse integer pass argument " << name;
+ return std::nullopt;
+ }
+ trailingBindings[position].push_back(
+ Builder(context).getI64IntegerAttr(value));
+ } while (!rhs.empty());
+ } else if (name.starts_with("^")) {
+ debugBindNames.emplace_back(OperationName(name.drop_front(), context));
+ } else {
+ debugBindNames.emplace_back(OperationName(name, context));
+ }
+ }
+
+ // Collect operations or results for extra bindings.
+ payloadRoot->walk([&](Operation *payload) {
+ for (auto &&[position, name] : llvm::enumerate(debugBindNames)) {
+ if (!name || payload->getName() != *name)
+ continue;
+
+ if (StringRef(*std::next(debugBindTrailingArgs.begin(), position))
+ .starts_with("^")) {
+ llvm::append_range(trailingBindings[position], payload->getResults());
+ } else {
+ trailingBindings[position].push_back(payload);
+ }
+ }
+ });
+
+ RaggedArray<transform::MappedValue> bindings;
+ bindings.push_back(ArrayRef<Operation *>{payloadRoot});
+ for (SmallVector<transform::MappedValue> &trailing : trailingBindings)
+ bindings.push_back(std::move(trailing));
+ return bindings;
+ }
+
public:
using Base::Base;
@@ -67,34 +134,18 @@ public:
findPayloadRoot(getOperation(), debugPayloadRootTag);
if (!payloadRoot)
return signalPassFailure();
- auto debugBindNames = llvm::map_to_vector(
- debugBindTrailingArgs,
- [&](const std::string &name) { return OperationName(name, context); });
- SmallVector<SmallVector<Operation *>, 2> trailingBindings;
- trailingBindings.resize(debugBindNames.size());
- payloadRoot->walk([&](Operation *payload) {
- for (auto &&[position, name] : llvm::enumerate(debugBindNames)) {
- if (payload->getName() == name)
- trailingBindings[position].push_back(payload);
- }
- });
Operation *transformEntryPoint = transform::detail::findTransformEntryPoint(
getOperation(), transformModule, entryPoint);
- if (!transformEntryPoint) {
- getOperation()->emitError()
- << "could not find transform entry point: " << entryPoint
- << " in either payload or transform module";
+ if (!transformEntryPoint)
return signalPassFailure();
- }
-
- RaggedArray<transform::MappedValue> bindings;
- bindings.push_back(ArrayRef<Operation *>{payloadRoot});
- for (SmallVector<Operation *> &trailing : trailingBindings)
- bindings.push_back(std::move(trailing));
+ std::optional<RaggedArray<transform::MappedValue>> bindings =
+ parseArguments(payloadRoot);
+ if (!bindings)
+ return signalPassFailure();
if (failed(transform::applyTransformNamedSequence(
- bindings,
+ *bindings,
cast<transform::TransformOpInterface>(transformEntryPoint),
transformModule,
options.enableExpensiveChecks(!disableExpensiveChecks)))) {
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index 8c341a347ff6..5be6a628904c 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -2040,77 +2040,6 @@ public:
}
};
-// Patterns to rewrite ExtractOp(ConstantMaskOp)
-//
-// When the result of ExtractOp is a subvector of input, we can rewrite it as
-// a ConstantMaskOp with subvector ranks.
-//
-// ExtractOp(ConstantMaskOp) -> ConstantMaskOp
-//
-// When the result of ExtractOp is a scalar, we can get the scalar value
-// directly.
-//
-// ExtractOp(ConstantMaskOp) -> ConstantOp
-class ExtractOpFromConstantMask final : public OpRewritePattern<ExtractOp> {
-public:
- using OpRewritePattern::OpRewritePattern;
-
- LogicalResult matchAndRewrite(ExtractOp extractOp,
- PatternRewriter &rewriter) const override {
- auto constantMaskOp =
- extractOp.getVector().getDefiningOp<vector::ConstantMaskOp>();
- if (!constantMaskOp)
- return failure();
-
- // All indices must be static.
- ArrayRef<int64_t> extractOpPos = extractOp.getStaticPosition();
- unsigned dynamicPosCount =
- llvm::count_if(extractOpPos, ShapedType::isDynamic);
- // If there is any dynamic position in ExtractOp, we cannot determine the
- // scalar value.
- if (dynamicPosCount)
- return failure();
-
- ArrayRef<Attribute> maskDimSizes =
- constantMaskOp.getMaskDimSizes().getValue();
- Type resultTy = extractOp.getResult().getType();
- if (resultTy.isa<mlir::VectorType>()) {
- auto resultVectorTy = resultTy.cast<mlir::VectorType>();
- int64_t resultRank = resultVectorTy.getRank();
- int64_t n = maskDimSizes.size();
- std::vector<int64_t> indices;
- for (auto i = n - resultRank; i < n; ++i)
- indices.push_back(cast<IntegerAttr>(maskDimSizes[i]).getInt());
-
- rewriter.replaceOpWithNewOp<vector::ConstantMaskOp>(
- extractOp, resultVectorTy,
- vector::getVectorSubscriptAttr(rewriter, indices));
-
- return success();
- } else if (resultTy.isa<mlir::IntegerType>()) {
- // ConstantMaskOp creates and returns a vector mask where elements of the
- // result vector are set to ‘0’ or ‘1’, based on whether the element
- // indices are contained within a hyper-rectangular region.
- // We go through ExtractOp static positions to determine the position is
- // within the hyper-rectangular region or not.
- Type boolType = rewriter.getI1Type();
- IntegerAttr setAttr = IntegerAttr::get(boolType, 1);
- for (size_t i = 0, end = extractOpPos.size(); i < end; ++i) {
- if (cast<IntegerAttr>(maskDimSizes[i]).getInt() <= extractOpPos[i]) {
- setAttr = IntegerAttr::get(boolType, 0);
- break;
- }
- }
-
- rewriter.replaceOpWithNewOp<arith::ConstantOp>(extractOp, boolType,
- setAttr);
- return success();
- }
-
- return failure();
- }
-};
-
// Folds extract(shape_cast(..)) into shape_cast when the total element count
// does not change.
LogicalResult foldExtractFromShapeCastToShapeCast(ExtractOp extractOp,
@@ -2137,8 +2066,7 @@ LogicalResult foldExtractFromShapeCastToShapeCast(ExtractOp extractOp,
void ExtractOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.add<ExtractOpSplatConstantFolder, ExtractOpNonSplatConstantFolder,
- ExtractOpFromBroadcast, ExtractOpFromCreateMask,
- ExtractOpFromConstantMask>(context);
+ ExtractOpFromBroadcast, ExtractOpFromCreateMask>(context);
results.add(foldExtractFromShapeCastToShapeCast);
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
index 620ceee48b19..b3ab4a916121 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
@@ -443,15 +443,24 @@ static vector::TransferWriteOp cloneWriteOp(RewriterBase &rewriter,
/// d1) and return vector<16x2x64>
static VectorType getDistributedType(VectorType originalType, AffineMap map,
int64_t warpSize) {
- if (map.getNumResults() != 1)
- return VectorType();
SmallVector<int64_t> targetShape(originalType.getShape().begin(),
originalType.getShape().end());
for (unsigned i = 0, e = map.getNumResults(); i < e; i++) {
unsigned position = map.getDimPosition(i);
- if (targetShape[position] % warpSize != 0)
- return VectorType();
+ if (targetShape[position] % warpSize != 0) {
+ if (warpSize % targetShape[position] != 0) {
+ return VectorType();
+ }
+ warpSize /= targetShape[position];
+ targetShape[position] = 1;
+ continue;
+ }
targetShape[position] = targetShape[position] / warpSize;
+ warpSize = 1;
+ break;
+ }
+ if (warpSize != 1) {
+ return VectorType();
}
VectorType targetType =
VectorType::get(targetShape, originalType.getElementType());
@@ -526,7 +535,30 @@ struct WarpOpTransferWrite : public OpRewritePattern<WarpExecuteOnLane0Op> {
// 4. Reindex the write using the distribution map.
auto newWarpOp =
newWriteOp.getVector().getDefiningOp<WarpExecuteOnLane0Op>();
+
+ // Delinearize the lane id based on the way threads are divided across the
+ // vector. To get the number of threads per vector dimension, divide the
+ // sequential size by the distributed size along each dim.
rewriter.setInsertionPoint(newWriteOp);
+ SmallVector<OpFoldResult> delinearizedIdSizes;
+ for (auto [seqSize, distSize] :
+ llvm::zip_equal(writtenVectorType.getShape(), targetType.getShape())) {
+ assert(seqSize % distSize == 0 && "Invalid distributed vector shape");
+ delinearizedIdSizes.push_back(rewriter.getIndexAttr(seqSize / distSize));
+ }
+ SmallVector<Value> delinearized;
+ if (map.getNumResults() > 1) {
+ delinearized = rewriter
+ .create<mlir::affine::AffineDelinearizeIndexOp>(
+ newWarpOp.getLoc(), newWarpOp.getLaneid(),
+ delinearizedIdSizes)
+ .getResults();
+ } else {
+ // If there is only one map result, we can elide the delinearization
+ // op and use the lane id directly.
+ delinearized.append(targetType.getRank(), newWarpOp.getLaneid());
+ }
+
AffineMap indexMap = map.compose(newWriteOp.getPermutationMap());
Location loc = newWriteOp.getLoc();
SmallVector<Value> indices(newWriteOp.getIndices().begin(),
@@ -539,11 +571,11 @@ struct WarpOpTransferWrite : public OpRewritePattern<WarpExecuteOnLane0Op> {
continue;
unsigned indexPos = indexExpr.getPosition();
unsigned vectorPos = cast<AffineDimExpr>(std::get<1>(it)).getPosition();
+ Value laneId = delinearized[vectorPos];
auto scale =
rewriter.getAffineConstantExpr(targetType.getDimSize(vectorPos));
indices[indexPos] = affine::makeComposedAffineApply(
- rewriter, loc, d0 + scale * d1,
- {indices[indexPos], newWarpOp.getLaneid()});
+ rewriter, loc, d0 + scale * d1, {indices[indexPos], laneId});
}
newWriteOp.getIndicesMutable().assign(indices);
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index fc11ae63e718..dc6f126aae4c 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -729,8 +729,8 @@ static LogicalResult commonConversionPrecondition(PatternRewriter &rewriter,
// TODO: consider relaxing this restriction in the future if we find ways
// to really work with subbyte elements across the MLIR/LLVM boundary.
- unsigned resultBitwidth = preconditionType.getElementTypeBitWidth();
- if (resultBitwidth % 8 != 0)
+ unsigned bitwidth = preconditionType.getElementTypeBitWidth();
+ if (bitwidth % 8 != 0)
return rewriter.notifyMatchFailure(op, "bitwidth is not k * 8");
return success();
@@ -768,6 +768,10 @@ static LogicalResult alignedConversionPrecondition(PatternRewriter &rewriter,
(dstElemBitwidth % srcElemBitwidth) != 0)
return rewriter.notifyMatchFailure(op, "Not a supported aligned case");
+ if ((srcType.getShape().back() % 2) != 0)
+ return rewriter.notifyMatchFailure(
+ op, "Not an even number of i4 elements in trailing dim");
+
return success();
}
@@ -876,6 +880,58 @@ static Value rewriteI4ToI8SignedExt(PatternRewriter &rewriter, Location loc,
return rewriter.create<vector::InterleaveOp>(loc, low, high);
}
+/// Rewrite the i8 -> i4 truncation into a sequence of shuffles and bitwise ops
+/// that take advantage of high-level information to avoid leaving LLVM to
+/// scramble with peephole optimizations.
+static Value rewriteI8ToI4Trunc(PatternRewriter &rewriter, Location loc,
+ Value srcValue) {
+ VectorType srcVecType = cast<VectorType>(srcValue.getType());
+ assert(srcVecType.getElementType().isSignlessInteger(8) &&
+ "Expected i8 type");
+
+ // 1. De-interleave low and high i8 elements.
+ int64_t vecDimSize = srcVecType.getShape().back();
+ SmallVector<int64_t> deinterleaveLowMaskValues;
+ SmallVector<int64_t> deinterleaveHighMaskValues;
+ assert((vecDimSize % 2) == 0 && "Odd number of i4 elements");
+ deinterleaveLowMaskValues.reserve(vecDimSize / 2);
+ deinterleaveHighMaskValues.reserve(vecDimSize / 2);
+ for (int i = 0, end = vecDimSize; i < end; i += 2) {
+ deinterleaveLowMaskValues.push_back(i);
+ deinterleaveHighMaskValues.push_back(i + 1);
+ }
+
+ auto lowShuffleOp = rewriter.create<vector::ShuffleOp>(
+ loc, srcValue, srcValue,
+ rewriter.getI64ArrayAttr(deinterleaveLowMaskValues));
+ auto highShuffleOp = rewriter.create<vector::ShuffleOp>(
+ loc, srcValue, srcValue,
+ rewriter.getI64ArrayAttr(deinterleaveHighMaskValues));
+
+ // 2. Zero out the upper side of each low i8 element.
+ constexpr int8_t i8LowBitMask = 0x0F;
+ Value zeroOutMask = rewriter.create<arith::ConstantOp>(
+ loc,
+ DenseElementsAttr::get(lowShuffleOp.getResultVectorType(), i8LowBitMask));
+ Value zeroOutLow =
+ rewriter.create<arith::AndIOp>(loc, lowShuffleOp, zeroOutMask);
+
+ // 3. Move high i4 values to upper side of the byte.
+ constexpr int8_t bitsToShift = 4;
+ VectorType deinterI8VecType = highShuffleOp.getResultVectorType();
+ auto shiftValues = rewriter.create<arith::ConstantOp>(
+ loc, DenseElementsAttr::get(deinterI8VecType, bitsToShift));
+ Value shlHigh =
+ rewriter.create<arith::ShLIOp>(loc, highShuffleOp, shiftValues);
+
+ // 4. Merge high and low i4 values.
+ auto mergedHiLowOp = rewriter.create<arith::OrIOp>(loc, zeroOutLow, shlHigh);
+
+ // 5. Generate a bitcast vector<Xxi8> -> vector<2Xxi4>.
+ auto i4VecType = srcVecType.cloneWith(std::nullopt, rewriter.getI4Type());
+ return rewriter.create<vector::BitCastOp>(loc, i4VecType, mergedHiLowOp);
+}
+
namespace {
/// Rewrite bitcast(trunci) to a sequence of shuffles and bitwise ops that take
/// advantage of high-level information to avoid leaving LLVM to scramble with
@@ -1019,7 +1075,7 @@ struct RewriteAlignedSubByteIntSignedExt : OpRewritePattern<ConversionOpType> {
LogicalResult matchAndRewrite(ConversionOpType conversionOp,
PatternRewriter &rewriter) const override {
- // Set up the BitCastRewriter and verify the preconditions.
+ // Verify the preconditions.
Value srcValue = conversionOp.getIn();
auto srcVecType = dyn_cast<VectorType>(srcValue.getType());
auto dstVecType = dyn_cast<VectorType>(conversionOp.getType());
@@ -1043,6 +1099,65 @@ struct RewriteAlignedSubByteIntSignedExt : OpRewritePattern<ConversionOpType> {
}
};
+/// Rewrite the i8 -> i4 part of any truncation into a sequence of shuffles and
+/// bitwise ops that take advantage of high-level information to avoid leaving
+/// LLVM to scramble with peephole optimizations.
+///
+/// For example:
+/// arith.trunci %in : vector<8xi32> to vector<8xi4>
+/// is rewriten as
+///
+/// %cst = arith.constant dense<15> : vector<4xi8>
+/// %cst_0 = arith.constant dense<4> : vector<4xi8>
+/// %0 = arith.trunci %in : vector<8xi32> to vector<8xi8>
+/// %1 = vector.shuffle %0, %0 [0, 2, 4, 6] : vector<8xi8>, vector<8xi8>
+/// %2 = vector.shuffle %0, %0 [1, 3, 5, 7] : vector<8xi8>, vector<8xi8>
+/// %3 = arith.andi %1, %cst : vector<4xi8>
+/// %4 = arith.shli %2, %cst_0 : vector<4xi8>
+/// %5 = arith.ori %3, %4 : vector<4xi8>
+/// %6 = vector.bitcast %5 : vector<4xi8> to vector<8xi4>
+///
+struct RewriteAlignedSubByteIntTrunc : OpRewritePattern<arith::TruncIOp> {
+ using OpRewritePattern<arith::TruncIOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(arith::TruncIOp truncOp,
+ PatternRewriter &rewriter) const override {
+ // Verify the preconditions.
+ Value srcValue = truncOp.getIn();
+ auto srcVecType = dyn_cast<VectorType>(srcValue.getType());
+ auto dstVecType = dyn_cast<VectorType>(truncOp.getType());
+ if (!srcVecType || !dstVecType)
+ return failure();
+
+ // Only single dim vectors are supported until we have
+ // `vector.deinterleave`.
+ if (srcVecType.getRank() != 1)
+ return failure();
+
+ if (failed(commonConversionPrecondition(rewriter, srcVecType, truncOp)))
+ return failure();
+
+ // Check general alignment preconditions. We invert the src/dst type order
+ // to reuse the existing precondition logic.
+ if (failed(alignedConversionPrecondition(rewriter, dstVecType, srcVecType,
+ truncOp)))
+ return failure();
+
+ // Create a new iX -> i8 truncation op.
+ Location loc = truncOp.getLoc();
+ auto i8VecType = srcVecType.cloneWith(std::nullopt, rewriter.getI8Type());
+ Value i8TruncVal =
+ rewriter.create<arith::TruncIOp>(loc, i8VecType, srcValue);
+
+ // Rewrite the i8 -> i4 truncation part.
+ Value subByteTrunc = rewriteI8ToI4Trunc(rewriter, loc, i8TruncVal);
+
+ // Finalize the rewrite.
+ rewriter.replaceOp(truncOp, subByteTrunc);
+ return success();
+ }
+};
+
/// Rewrite a sub-byte vector transpose into a sequence of instructions that
/// perform the transpose on wider (byte) element types.
/// For example:
@@ -1115,8 +1230,9 @@ void vector::populateVectorNarrowTypeRewritePatterns(
// Patterns for aligned cases. We set higher priority as they are expected to
// generate better performance for aligned cases.
patterns.add<RewriteAlignedSubByteIntSignedExt<arith::ExtSIOp>,
- RewriteAlignedSubByteIntSignedExt<arith::SIToFPOp>>(
- patterns.getContext(), benefit.getBenefit() + 1);
+ RewriteAlignedSubByteIntSignedExt<arith::SIToFPOp>,
+ RewriteAlignedSubByteIntTrunc>(patterns.getContext(),
+ benefit.getBenefit() + 1);
}
void vector::populateVectorTransposeNarrowTypeRewritePatterns(
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 74dd1dfaca0d..a2d4e2166331 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -713,6 +713,76 @@ struct BubbleDownBitCastForStridedSliceExtract
// Shuffles vector.bitcast op before vector.insert_strided_slice op.
//
// This transforms IR like:
+// %0 = vector.insert %val, %dst[4] : vector<32xi4> into vector<8x32xi4>
+// %1 = vector.bitcast %0 : vector<8x32xi4> to vector<8x16xi8>
+// Into:
+// %0 = vector.bitcast %val : vector<32xi4> to vector<16xi8>
+// %1 = vector.bitcast %dst : vector<8x32xi4> to vector<8x16xi8>
+// %2 = vector.insert %0, %1 [4] : vector<16xi8> into vector<8x16xi8>
+//
+struct BubbleUpBitCastForInsert : public OpRewritePattern<vector::BitCastOp> {
+ using OpRewritePattern::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp,
+ PatternRewriter &rewriter) const override {
+ VectorType castSrcType = bitcastOp.getSourceVectorType();
+ VectorType castDstType = bitcastOp.getResultVectorType();
+
+ // 0-D and scalable vectors are not supported yet.
+ if (castSrcType.getRank() == 0 || castSrcType.isScalable() ||
+ castDstType.isScalable())
+ return failure();
+
+ int64_t castSrcLastDim = castSrcType.getShape().back();
+ int64_t castDstLastDim = castDstType.getShape().back();
+ bool isNumElemsShrink = castSrcLastDim >= castDstLastDim;
+ int64_t ratio;
+ if (isNumElemsShrink) {
+ assert(castSrcLastDim % castDstLastDim == 0);
+ ratio = castSrcLastDim / castDstLastDim;
+ } else {
+ assert(castDstLastDim % castSrcLastDim == 0);
+ ratio = castDstLastDim / castSrcLastDim;
+ }
+
+ auto insertOp = bitcastOp.getSource().getDefiningOp<vector::InsertOp>();
+ if (!insertOp)
+ return failure();
+
+ // Only vector sources are supported for now.
+ auto insertSrcType = dyn_cast<VectorType>(insertOp.getSourceType());
+ if (!insertSrcType)
+ return failure();
+
+ // Bitcast the source.
+ SmallVector<int64_t> srcDims(insertSrcType.getShape());
+ srcDims.back() =
+ isNumElemsShrink ? srcDims.back() / ratio : srcDims.back() * ratio;
+ VectorType newCastSrcType =
+ VectorType::get(srcDims, castDstType.getElementType());
+ auto newCastSrcOp = rewriter.create<vector::BitCastOp>(
+ bitcastOp.getLoc(), newCastSrcType, insertOp.getSource());
+
+ SmallVector<int64_t> dstDims(insertOp.getDestVectorType().getShape());
+ dstDims.back() =
+ isNumElemsShrink ? dstDims.back() / ratio : dstDims.back() * ratio;
+ VectorType newCastDstType =
+ VectorType::get(dstDims, castDstType.getElementType());
+
+ // Bitcast the destination.
+ auto newCastDstOp = rewriter.create<vector::BitCastOp>(
+ bitcastOp.getLoc(), newCastDstType, insertOp.getDest());
+
+ // Generate new insert.
+ rewriter.replaceOpWithNewOp<vector::InsertOp>(
+ bitcastOp, newCastSrcOp, newCastDstOp, insertOp.getMixedPosition());
+ return success();
+ }
+};
+
+// Shuffles vector.bitcast op before vector.insert_strided_slice op.
+//
+// This transforms IR like:
// %0 = vector.insert_strided_slice %src, %dst {
// offsets = [0], strides = [1]} : vector<4xf16> into vector<8xf16>
// %1 = vector.bitcast %0: vector<8xf16> to vector<4xf32>
@@ -1782,8 +1852,8 @@ void mlir::vector::populateBubbleVectorBitCastOpPatterns(
RewritePatternSet &patterns, PatternBenefit benefit) {
patterns.add<BubbleDownVectorBitCastForExtract,
BubbleDownBitCastForStridedSliceExtract,
- BubbleUpBitCastForStridedSliceInsert>(patterns.getContext(),
- benefit);
+ BubbleUpBitCastForInsert, BubbleUpBitCastForStridedSliceInsert>(
+ patterns.getContext(), benefit);
}
void mlir::vector::populateBreakDownVectorBitCastOpPatterns(
diff --git a/mlir/lib/Target/Cpp/TranslateToCpp.cpp b/mlir/lib/Target/Cpp/TranslateToCpp.cpp
index 2ba3dec0a9a5..4bc707c43ad9 100644
--- a/mlir/lib/Target/Cpp/TranslateToCpp.cpp
+++ b/mlir/lib/Target/Cpp/TranslateToCpp.cpp
@@ -361,6 +361,22 @@ static LogicalResult printBinaryOperation(CppEmitter &emitter,
return success();
}
+static LogicalResult printUnaryOperation(CppEmitter &emitter,
+ Operation *operation,
+ StringRef unaryOperator) {
+ raw_ostream &os = emitter.ostream();
+
+ if (failed(emitter.emitAssignPrefix(*operation)))
+ return failure();
+
+ os << unaryOperator;
+
+ if (failed(emitter.emitOperand(operation->getOperand(0))))
+ return failure();
+
+ return success();
+}
+
static LogicalResult printOperation(CppEmitter &emitter, emitc::AddOp addOp) {
Operation *operation = addOp.getOperation();
@@ -588,6 +604,44 @@ static LogicalResult printOperation(CppEmitter &emitter,
return success();
}
+static LogicalResult printOperation(CppEmitter &emitter,
+ emitc::BitwiseAndOp bitwiseAndOp) {
+ Operation *operation = bitwiseAndOp.getOperation();
+ return printBinaryOperation(emitter, operation, "&");
+}
+
+static LogicalResult
+printOperation(CppEmitter &emitter,
+ emitc::BitwiseLeftShiftOp bitwiseLeftShiftOp) {
+ Operation *operation = bitwiseLeftShiftOp.getOperation();
+ return printBinaryOperation(emitter, operation, "<<");
+}
+
+static LogicalResult printOperation(CppEmitter &emitter,
+ emitc::BitwiseNotOp bitwiseNotOp) {
+ Operation *operation = bitwiseNotOp.getOperation();
+ return printUnaryOperation(emitter, operation, "~");
+}
+
+static LogicalResult printOperation(CppEmitter &emitter,
+ emitc::BitwiseOrOp bitwiseOrOp) {
+ Operation *operation = bitwiseOrOp.getOperation();
+ return printBinaryOperation(emitter, operation, "|");
+}
+
+static LogicalResult
+printOperation(CppEmitter &emitter,
+ emitc::BitwiseRightShiftOp bitwiseRightShiftOp) {
+ Operation *operation = bitwiseRightShiftOp.getOperation();
+ return printBinaryOperation(emitter, operation, ">>");
+}
+
+static LogicalResult printOperation(CppEmitter &emitter,
+ emitc::BitwiseXorOp bitwiseXorOp) {
+ Operation *operation = bitwiseXorOp.getOperation();
+ return printBinaryOperation(emitter, operation, "^");
+}
+
static LogicalResult printOperation(CppEmitter &emitter, emitc::CastOp castOp) {
raw_ostream &os = emitter.ostream();
Operation &op = *castOp.getOperation();
@@ -627,6 +681,24 @@ static LogicalResult printOperation(CppEmitter &emitter,
return success();
}
+static LogicalResult printOperation(CppEmitter &emitter,
+ emitc::LogicalAndOp logicalAndOp) {
+ Operation *operation = logicalAndOp.getOperation();
+ return printBinaryOperation(emitter, operation, "&&");
+}
+
+static LogicalResult printOperation(CppEmitter &emitter,
+ emitc::LogicalNotOp logicalNotOp) {
+ Operation *operation = logicalNotOp.getOperation();
+ return printUnaryOperation(emitter, operation, "!");
+}
+
+static LogicalResult printOperation(CppEmitter &emitter,
+ emitc::LogicalOrOp logicalOrOp) {
+ Operation *operation = logicalOrOp.getOperation();
+ return printBinaryOperation(emitter, operation, "||");
+}
+
static LogicalResult printOperation(CppEmitter &emitter, emitc::ForOp forOp) {
raw_indented_ostream &os = emitter.ostream();
@@ -1280,11 +1352,15 @@ LogicalResult CppEmitter::emitOperation(Operation &op, bool trailingSemicolon) {
.Case<cf::BranchOp, cf::CondBranchOp>(
[&](auto op) { return printOperation(*this, op); })
// EmitC ops.
- .Case<emitc::AddOp, emitc::ApplyOp, emitc::AssignOp, emitc::CallOp,
+ .Case<emitc::AddOp, emitc::ApplyOp, emitc::AssignOp,
+ emitc::BitwiseAndOp, emitc::BitwiseLeftShiftOp,
+ emitc::BitwiseNotOp, emitc::BitwiseOrOp,
+ emitc::BitwiseRightShiftOp, emitc::BitwiseXorOp, emitc::CallOp,
emitc::CallOpaqueOp, emitc::CastOp, emitc::CmpOp,
emitc::ConstantOp, emitc::DeclareFuncOp, emitc::DivOp,
emitc::ExpressionOp, emitc::ForOp, emitc::FuncOp, emitc::IfOp,
- emitc::IncludeOp, emitc::MulOp, emitc::RemOp, emitc::ReturnOp,
+ emitc::IncludeOp, emitc::LogicalAndOp, emitc::LogicalNotOp,
+ emitc::LogicalOrOp, emitc::MulOp, emitc::RemOp, emitc::ReturnOp,
emitc::SubOp, emitc::VariableOp, emitc::VerbatimOp>(
[&](auto op) { return printOperation(*this, op); })
// Func ops.
diff --git a/mlir/lib/Target/LLVM/NVVM/Target.cpp b/mlir/lib/Target/LLVM/NVVM/Target.cpp
index 71b15a92782e..d5b6645631ed 100644
--- a/mlir/lib/Target/LLVM/NVVM/Target.cpp
+++ b/mlir/lib/Target/LLVM/NVVM/Target.cpp
@@ -13,6 +13,7 @@
#include "mlir/Target/LLVM/NVVM/Target.h"
+#include "mlir/Config/mlir-config.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/Target/LLVM/NVVM/Utils.h"
@@ -156,7 +157,7 @@ SerializeGPUModuleBase::loadBitcodeFiles(llvm::Module &module) {
return std::move(bcFiles);
}
-#if MLIR_CUDA_CONVERSIONS_ENABLED == 1
+#if MLIR_ENABLE_CUDA_CONVERSIONS
namespace {
class NVPTXSerializer : public SerializeGPUModuleBase {
public:
@@ -562,7 +563,7 @@ NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
return compileToBinary(*serializedISA);
#endif // MLIR_NVPTXCOMPILER_ENABLED == 1
}
-#endif // MLIR_CUDA_CONVERSIONS_ENABLED == 1
+#endif // MLIR_ENABLE_CUDA_CONVERSIONS
std::optional<SmallVector<char, 0>>
NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
@@ -574,7 +575,7 @@ NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
module->emitError("Module must be a GPU module.");
return std::nullopt;
}
-#if MLIR_CUDA_CONVERSIONS_ENABLED == 1
+#if MLIR_ENABLE_CUDA_CONVERSIONS
NVPTXSerializer serializer(*module, cast<NVVMTargetAttr>(attribute), options);
serializer.init();
return serializer.run();
@@ -582,7 +583,7 @@ NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
module->emitError(
"The `NVPTX` target was not built. Please enable it when building LLVM.");
return std::nullopt;
-#endif // MLIR_CUDA_CONVERSIONS_ENABLED == 1
+#endif // MLIR_ENABLE_CUDA_CONVERSIONS
}
Attribute
diff --git a/mlir/lib/Target/LLVMIR/DebugTranslation.cpp b/mlir/lib/Target/LLVMIR/DebugTranslation.cpp
index 16918aab5497..420bb8d8274e 100644
--- a/mlir/lib/Target/LLVMIR/DebugTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/DebugTranslation.cpp
@@ -150,7 +150,8 @@ llvm::DIDerivedType *DebugTranslation::translateImpl(DIDerivedTypeAttr attr) {
/*File=*/nullptr, /*Line=*/0,
/*Scope=*/nullptr, translate(attr.getBaseType()), attr.getSizeInBits(),
attr.getAlignInBits(), attr.getOffsetInBits(),
- /*DWARFAddressSpace=*/std::nullopt, /*Flags=*/llvm::DINode::FlagZero);
+ /*DWARFAddressSpace=*/std::nullopt, /*PtrAuthData=*/std::nullopt,
+ /*Flags=*/llvm::DINode::FlagZero);
}
llvm::DIFile *DebugTranslation::translateImpl(DIFileAttr attr) {
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 6e53d801a0d2..fd1de274da60 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -396,9 +396,9 @@ collectReductionDecls(T loop,
/// Translates the blocks contained in the given region and appends them to at
/// the current insertion point of `builder`. The operations of the entry block
-/// are appended to the current insertion block, which is not expected to have a
-/// terminator. If set, `continuationBlockArgs` is populated with translated
-/// values that correspond to the values omp.yield'ed from the region.
+/// are appended to the current insertion block. If set, `continuationBlockArgs`
+/// is populated with translated values that correspond to the values
+/// omp.yield'ed from the region.
static LogicalResult inlineConvertOmpRegions(
Region &region, StringRef blockName, llvm::IRBuilderBase &builder,
LLVM::ModuleTranslation &moduleTranslation,
@@ -409,7 +409,14 @@ static LogicalResult inlineConvertOmpRegions(
// Special case for single-block regions that don't create additional blocks:
// insert operations without creating additional blocks.
if (llvm::hasSingleElement(region)) {
+ llvm::Instruction *potentialTerminator =
+ builder.GetInsertBlock()->empty() ? nullptr
+ : &builder.GetInsertBlock()->back();
+
+ if (potentialTerminator && potentialTerminator->isTerminator())
+ potentialTerminator->removeFromParent();
moduleTranslation.mapBlock(&region.front(), builder.GetInsertBlock());
+
if (failed(moduleTranslation.convertBlock(
region.front(), /*ignoreArguments=*/true, builder)))
return failure();
@@ -423,6 +430,10 @@ static LogicalResult inlineConvertOmpRegions(
// Drop the mapping that is no longer necessary so that the same region can
// be processed multiple times.
moduleTranslation.forgetMapping(region);
+
+ if (potentialTerminator && potentialTerminator->isTerminator())
+ potentialTerminator->insertAfter(&builder.GetInsertBlock()->back());
+
return success();
}
@@ -656,8 +667,22 @@ convertOmpSingle(omp::SingleOp &singleOp, llvm::IRBuilderBase &builder,
moduleTranslation, bodyGenStatus);
};
auto finiCB = [&](InsertPointTy codeGenIP) {};
+
+ // Handle copyprivate
+ Operation::operand_range cpVars = singleOp.getCopyprivateVars();
+ std::optional<ArrayAttr> cpFuncs = singleOp.getCopyprivateFuncs();
+ llvm::SmallVector<llvm::Value *> llvmCPVars;
+ llvm::SmallVector<llvm::Function *> llvmCPFuncs;
+ for (size_t i = 0, e = cpVars.size(); i < e; ++i) {
+ llvmCPVars.push_back(moduleTranslation.lookupValue(cpVars[i]));
+ auto llvmFuncOp = SymbolTable::lookupNearestSymbolFrom<LLVM::LLVMFuncOp>(
+ singleOp, cast<SymbolRefAttr>((*cpFuncs)[i]));
+ llvmCPFuncs.push_back(
+ moduleTranslation.lookupFunction(llvmFuncOp.getName()));
+ }
+
builder.restoreIP(moduleTranslation.getOpenMPBuilder()->createSingle(
- ompLoc, bodyCB, finiCB, singleOp.getNowait(), /*DidIt=*/nullptr));
+ ompLoc, bodyCB, finiCB, singleOp.getNowait(), llvmCPVars, llvmCPFuncs));
return bodyGenStatus;
}
@@ -1000,11 +1025,50 @@ convertOmpWsLoop(Operation &opInst, llvm::IRBuilderBase &builder,
return success();
}
+/// A RAII class that on construction replaces the region arguments of the
+/// parallel op (which correspond to private variables) with the actual private
+/// variables they correspond to. This prepares the parallel op so that it
+/// matches what is expected by the OMPIRBuilder.
+///
+/// On destruction, it restores the original state of the operation so that on
+/// the MLIR side, the op is not affected by conversion to LLVM IR.
+class OmpParallelOpConversionManager {
+public:
+ OmpParallelOpConversionManager(omp::ParallelOp opInst)
+ : region(opInst.getRegion()), privateVars(opInst.getPrivateVars()),
+ privateArgBeginIdx(opInst.getNumReductionVars()),
+ privateArgEndIdx(privateArgBeginIdx + privateVars.size()) {
+ auto privateVarsIt = privateVars.begin();
+
+ for (size_t argIdx = privateArgBeginIdx; argIdx < privateArgEndIdx;
+ ++argIdx, ++privateVarsIt)
+ mlir::replaceAllUsesInRegionWith(region.getArgument(argIdx),
+ *privateVarsIt, region);
+ }
+
+ ~OmpParallelOpConversionManager() {
+ auto privateVarsIt = privateVars.begin();
+
+ for (size_t argIdx = privateArgBeginIdx; argIdx < privateArgEndIdx;
+ ++argIdx, ++privateVarsIt)
+ mlir::replaceAllUsesInRegionWith(*privateVarsIt,
+ region.getArgument(argIdx), region);
+ }
+
+private:
+ Region &region;
+ OperandRange privateVars;
+ unsigned privateArgBeginIdx;
+ unsigned privateArgEndIdx;
+};
+
/// Converts the OpenMP parallel operation to LLVM IR.
static LogicalResult
convertOmpParallel(omp::ParallelOp opInst, llvm::IRBuilderBase &builder,
LLVM::ModuleTranslation &moduleTranslation) {
using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+ OmpParallelOpConversionManager raii(opInst);
+
// TODO: support error propagation in OpenMPIRBuilder and use it instead of
// relying on captured variables.
LogicalResult bodyGenStatus = success();
@@ -1086,12 +1150,81 @@ convertOmpParallel(omp::ParallelOp opInst, llvm::IRBuilderBase &builder,
// TODO: Perform appropriate actions according to the data-sharing
// attribute (shared, private, firstprivate, ...) of variables.
- // Currently defaults to shared.
+ // Currently shared and private are supported.
auto privCB = [&](InsertPointTy allocaIP, InsertPointTy codeGenIP,
llvm::Value &, llvm::Value &vPtr,
llvm::Value *&replacementValue) -> InsertPointTy {
replacementValue = &vPtr;
+ // If this is a private value, this lambda will return the corresponding
+ // mlir value and its `PrivateClauseOp`. Otherwise, empty values are
+ // returned.
+ auto [privVar, privatizerClone] =
+ [&]() -> std::pair<mlir::Value, omp::PrivateClauseOp> {
+ if (!opInst.getPrivateVars().empty()) {
+ auto privVars = opInst.getPrivateVars();
+ auto privatizers = opInst.getPrivatizers();
+
+ for (auto [privVar, privatizerAttr] :
+ llvm::zip_equal(privVars, *privatizers)) {
+ // Find the MLIR private variable corresponding to the LLVM value
+ // being privatized.
+ llvm::Value *llvmPrivVar = moduleTranslation.lookupValue(privVar);
+ if (llvmPrivVar != &vPtr)
+ continue;
+
+ SymbolRefAttr privSym = llvm::cast<SymbolRefAttr>(privatizerAttr);
+ omp::PrivateClauseOp privatizer =
+ SymbolTable::lookupNearestSymbolFrom<omp::PrivateClauseOp>(
+ opInst, privSym);
+
+ // Clone the privatizer in case it is used by more than one parallel
+ // region. The privatizer is processed in-place (see below) before it
+ // gets inlined in the parallel region and therefore processing the
+ // original op is dangerous.
+ return {privVar, privatizer.clone()};
+ }
+ }
+
+ return {mlir::Value(), omp::PrivateClauseOp()};
+ }();
+
+ if (privVar) {
+ if (privatizerClone.getDataSharingType() ==
+ omp::DataSharingClauseType::FirstPrivate) {
+ privatizerClone.emitOpError(
+ "TODO: delayed privatization is not "
+ "supported for `firstprivate` clauses yet.");
+ bodyGenStatus = failure();
+ return codeGenIP;
+ }
+
+ Region &allocRegion = privatizerClone.getAllocRegion();
+
+ // Replace the privatizer block argument with mlir value being privatized.
+ // This way, the body of the privatizer will be changed from using the
+ // region/block argument to the value being privatized.
+ auto allocRegionArg = allocRegion.getArgument(0);
+ replaceAllUsesInRegionWith(allocRegionArg, privVar, allocRegion);
+
+ auto oldIP = builder.saveIP();
+ builder.restoreIP(allocaIP);
+
+ SmallVector<llvm::Value *, 1> yieldedValues;
+ if (failed(inlineConvertOmpRegions(allocRegion, "omp.privatizer", builder,
+ moduleTranslation, &yieldedValues))) {
+ opInst.emitError("failed to inline `alloc` region of an `omp.private` "
+ "op in the parallel region");
+ bodyGenStatus = failure();
+ } else {
+ assert(yieldedValues.size() == 1);
+ replacementValue = yieldedValues.front();
+ }
+
+ privatizerClone.erase();
+ builder.restoreIP(oldIP);
+ }
+
return codeGenIP;
};
@@ -1635,7 +1768,7 @@ getRefPtrIfDeclareTarget(mlir::Value value,
// A small helper structure to contain data gathered
// for map lowering and coalese it into one area and
// avoiding extra computations such as searches in the
-// llvm module for lowered mapped varibles or checking
+// llvm module for lowered mapped variables or checking
// if something is declare target (and retrieving the
// value) more than neccessary.
struct MapInfoData : llvm::OpenMPIRBuilder::MapInfosTy {
@@ -2854,26 +2987,26 @@ LogicalResult OpenMPDialectLLVMIRTranslationInterface::amendOperation(
moduleTranslation);
return failure();
})
- .Case(
- "omp.requires",
- [&](Attribute attr) {
- if (auto requiresAttr = attr.dyn_cast<omp::ClauseRequiresAttr>()) {
- using Requires = omp::ClauseRequires;
- Requires flags = requiresAttr.getValue();
- llvm::OpenMPIRBuilderConfig &config =
- moduleTranslation.getOpenMPBuilder()->Config;
- config.setHasRequiresReverseOffload(
- bitEnumContainsAll(flags, Requires::reverse_offload));
- config.setHasRequiresUnifiedAddress(
- bitEnumContainsAll(flags, Requires::unified_address));
- config.setHasRequiresUnifiedSharedMemory(
- bitEnumContainsAll(flags, Requires::unified_shared_memory));
- config.setHasRequiresDynamicAllocators(
- bitEnumContainsAll(flags, Requires::dynamic_allocators));
- return success();
- }
- return failure();
- })
+ .Case("omp.requires",
+ [&](Attribute attr) {
+ if (auto requiresAttr =
+ attr.dyn_cast<omp::ClauseRequiresAttr>()) {
+ using Requires = omp::ClauseRequires;
+ Requires flags = requiresAttr.getValue();
+ llvm::OpenMPIRBuilderConfig &config =
+ moduleTranslation.getOpenMPBuilder()->Config;
+ config.setHasRequiresReverseOffload(
+ bitEnumContainsAll(flags, Requires::reverse_offload));
+ config.setHasRequiresUnifiedAddress(
+ bitEnumContainsAll(flags, Requires::unified_address));
+ config.setHasRequiresUnifiedSharedMemory(
+ bitEnumContainsAll(flags, Requires::unified_shared_memory));
+ config.setHasRequiresDynamicAllocators(
+ bitEnumContainsAll(flags, Requires::dynamic_allocators));
+ return success();
+ }
+ return failure();
+ })
.Default([](Attribute) {
// Fall through for omp attributes that do not require lowering.
return success();
@@ -2988,12 +3121,13 @@ LogicalResult OpenMPDialectLLVMIRTranslationInterface::convertOperation(
.Case([&](omp::TargetOp) {
return convertOmpTarget(*op, builder, moduleTranslation);
})
- .Case<omp::MapInfoOp, omp::DataBoundsOp>([&](auto op) {
- // No-op, should be handled by relevant owning operations e.g.
- // TargetOp, EnterDataOp, ExitDataOp, DataOp etc. and then
- // discarded
- return success();
- })
+ .Case<omp::MapInfoOp, omp::DataBoundsOp, omp::PrivateClauseOp>(
+ [&](auto op) {
+ // No-op, should be handled by relevant owning operations e.g.
+ // TargetOp, EnterDataOp, ExitDataOp, DataOp etc. and then
+ // discarded
+ return success();
+ })
.Default([&](Operation *inst) {
return inst->emitError("unsupported OpenMP operation: ")
<< inst->getName();
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index a11603a44dcd..c00628a420a0 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -1512,13 +1512,14 @@ ModuleTranslation::getOrCreateAliasScope(AliasScopeAttr aliasScopeAttr) {
if (!scopeInserted)
return scopeIt->second;
llvm::LLVMContext &ctx = llvmModule->getContext();
+ auto dummy = llvm::MDNode::getTemporary(ctx, std::nullopt);
// Convert the domain metadata node if necessary.
auto [domainIt, insertedDomain] = aliasDomainMetadataMapping.try_emplace(
aliasScopeAttr.getDomain(), nullptr);
if (insertedDomain) {
llvm::SmallVector<llvm::Metadata *, 2> operands;
// Placeholder for self-reference.
- operands.push_back({});
+ operands.push_back(dummy.get());
if (StringAttr description = aliasScopeAttr.getDomain().getDescription())
operands.push_back(llvm::MDString::get(ctx, description));
domainIt->second = llvm::MDNode::get(ctx, operands);
@@ -1529,7 +1530,7 @@ ModuleTranslation::getOrCreateAliasScope(AliasScopeAttr aliasScopeAttr) {
assert(domainIt->second && "Scope's domain should already be valid");
llvm::SmallVector<llvm::Metadata *, 3> operands;
// Placeholder for self-reference.
- operands.push_back({});
+ operands.push_back(dummy.get());
operands.push_back(domainIt->second);
if (StringAttr description = aliasScopeAttr.getDescription())
operands.push_back(llvm::MDString::get(ctx, description));
diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp
index f967e8352bf4..26899301eb74 100644
--- a/mlir/lib/Transforms/Utils/DialectConversion.cpp
+++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp
@@ -192,7 +192,6 @@ public:
EraseBlock,
InlineBlock,
MoveBlock,
- SplitBlock,
BlockTypeConversion,
ReplaceBlockArg,
// Operation rewrites
@@ -400,30 +399,6 @@ private:
Block *insertBeforeBlock;
};
-/// Splitting of a block. This rewrite is immediately reflected in the IR.
-class SplitBlockRewrite : public BlockRewrite {
-public:
- SplitBlockRewrite(ConversionPatternRewriterImpl &rewriterImpl, Block *block,
- Block *originalBlock)
- : BlockRewrite(Kind::SplitBlock, rewriterImpl, block),
- originalBlock(originalBlock) {}
-
- static bool classof(const IRRewrite *rewrite) {
- return rewrite->getKind() == Kind::SplitBlock;
- }
-
- void rollback() override {
- // Merge back the block that was split out.
- originalBlock->getOperations().splice(originalBlock->end(),
- block->getOperations());
- eraseBlock(block);
- }
-
-private:
- // The original block from which this block was split.
- Block *originalBlock;
-};
-
/// This structure contains the information pertaining to an argument that has
/// been converted.
struct ConvertedArgInfo {
@@ -798,13 +773,12 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
PatternRewriter &rewriter, ValueRange values,
SmallVectorImpl<Value> &remapped);
- /// Returns true if the given operation is ignored, and does not need to be
+ /// Return "true" if the given operation is ignored, and does not need to be
/// converted.
bool isOpIgnored(Operation *op) const;
- /// Recursively marks the nested operations under 'op' as ignored. This
- /// removes them from being considered for legalization.
- void markNestedOpsIgnored(Operation *op);
+ /// Return "true" if the given operation was replaced or erased.
+ bool wasOpReplaced(Operation *op) const;
//===--------------------------------------------------------------------===//
// Type Conversion
@@ -884,9 +858,6 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
void notifyBlockInserted(Block *block, Region *previous,
Region::iterator previousIt) override;
- /// Notifies that a block was split.
- void notifySplitBlock(Block *block, Block *continuation);
-
/// Notifies that a block is being inlined into another block.
void notifyBlockBeingInlined(Block *block, Block *srcBlock,
Block::iterator before);
@@ -946,18 +917,15 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
/// Ordered list of block operations (creations, splits, motions).
SmallVector<std::unique_ptr<IRRewrite>> rewrites;
- /// A set of operations that should no longer be considered for legalization,
- /// but were not directly replace/erased/etc. by a pattern. These are
- /// generally child operations of other operations who were
- /// replaced/erased/etc. This is not meant to be an exhaustive list of all
- /// operations, but the minimal set that can be used to detect if a given
- /// operation should be `ignored`. For example, we may add the operations that
- /// define non-empty regions to the set, but not any of the others. This
- /// simplifies the amount of memory needed as we can query if the parent
- /// operation was ignored.
+ /// A set of operations that should no longer be considered for legalization.
+ /// E.g., ops that are recursively legal. Ops that were replaced/erased are
+ /// tracked separately.
SetVector<Operation *> ignoredOps;
- // A set of operations that were erased.
+ /// A set of operations that were replaced/erased. Such ops are not erased
+ /// immediately but only when the dialect conversion succeeds. In the mean
+ /// time, they should no longer be considered for legalization and any attempt
+ /// to modify/access them is invalid rewriter API usage.
SetVector<Operation *> replacedOps;
/// The current type converter, or nullptr if no type converter is currently
@@ -1237,24 +1205,14 @@ LogicalResult ConversionPatternRewriterImpl::remapValues(
return success();
}
-// TODO: This function is a misnomer. It does not actually check if `op` is in
-// `ignoredOps`.
bool ConversionPatternRewriterImpl::isOpIgnored(Operation *op) const {
- // Check to see if this operation or the parent operation is ignored.
- return ignoredOps.count(op->getParentOp()) || replacedOps.count(op);
+ // Check to see if this operation is ignored or was replaced.
+ return replacedOps.count(op) || ignoredOps.count(op);
}
-void ConversionPatternRewriterImpl::markNestedOpsIgnored(Operation *op) {
- // Walk this operation and collect nested operations that define non-empty
- // regions. We mark such operations as 'ignored' so that we know we don't have
- // to convert them, or their nested ops.
- if (op->getNumRegions() == 0)
- return;
- op->walk([&](Operation *op) {
- if (llvm::any_of(op->getRegions(),
- [](Region &region) { return !region.empty(); }))
- ignoredOps.insert(op);
- });
+bool ConversionPatternRewriterImpl::wasOpReplaced(Operation *op) const {
+ // Check to see if this operation was replaced.
+ return replacedOps.count(op);
}
//===----------------------------------------------------------------------===//
@@ -1476,6 +1434,9 @@ void ConversionPatternRewriterImpl::notifyOperationInserted(
logger.startLine() << "** Insert : '" << op->getName() << "'(" << op
<< ")\n";
});
+ assert(!wasOpReplaced(op->getParentOp()) &&
+ "attempting to insert into a block within a replaced/erased op");
+
if (!previous.isSet()) {
// This is a newly created op.
appendRewrite<CreateOperationRewrite>(op);
@@ -1490,7 +1451,7 @@ void ConversionPatternRewriterImpl::notifyOperationInserted(
void ConversionPatternRewriterImpl::notifyOpReplaced(Operation *op,
ValueRange newValues) {
assert(newValues.size() == op->getNumResults());
- assert(!replacedOps.contains(op) && "operation was already replaced");
+ assert(!ignoredOps.contains(op) && "operation was already replaced");
// Track if any of the results changed, e.g. erased and replaced with null.
bool resultChanged = false;
@@ -1509,10 +1470,8 @@ void ConversionPatternRewriterImpl::notifyOpReplaced(Operation *op,
appendRewrite<ReplaceOperationRewrite>(op, currentTypeConverter,
resultChanged);
- // Mark this operation as recursively ignored so that we don't need to
- // convert any nested operations.
- replacedOps.insert(op);
- markNestedOpsIgnored(op);
+ // Mark this operation and all nested ops as replaced.
+ op->walk([&](Operation *op) { replacedOps.insert(op); });
}
void ConversionPatternRewriterImpl::notifyBlockIsBeingErased(Block *block) {
@@ -1523,6 +1482,9 @@ void ConversionPatternRewriterImpl::notifyBlockIsBeingErased(Block *block) {
void ConversionPatternRewriterImpl::notifyBlockInserted(
Block *block, Region *previous, Region::iterator previousIt) {
+ assert(!wasOpReplaced(block->getParentOp()) &&
+ "attempting to insert into a region within a replaced/erased op");
+
if (!previous) {
// This is a newly created block.
appendRewrite<CreateBlockRewrite>(block);
@@ -1532,11 +1494,6 @@ void ConversionPatternRewriterImpl::notifyBlockInserted(
appendRewrite<MoveBlockRewrite>(block, previous, prevBlock);
}
-void ConversionPatternRewriterImpl::notifySplitBlock(Block *block,
- Block *continuation) {
- appendRewrite<SplitBlockRewrite>(continuation, block);
-}
-
void ConversionPatternRewriterImpl::notifyBlockBeingInlined(
Block *block, Block *srcBlock, Block::iterator before) {
appendRewrite<InlineBlockRewrite>(block, srcBlock, before);
@@ -1604,6 +1561,9 @@ void ConversionPatternRewriter::eraseOp(Operation *op) {
}
void ConversionPatternRewriter::eraseBlock(Block *block) {
+ assert(!impl->wasOpReplaced(block->getParentOp()) &&
+ "attempting to erase a block within a replaced/erased op");
+
// Mark all ops for erasure.
for (Operation &op : *block)
eraseOp(&op);
@@ -1619,18 +1579,27 @@ void ConversionPatternRewriter::eraseBlock(Block *block) {
Block *ConversionPatternRewriter::applySignatureConversion(
Region *region, TypeConverter::SignatureConversion &conversion,
const TypeConverter *converter) {
+ assert(!impl->wasOpReplaced(region->getParentOp()) &&
+ "attempting to apply a signature conversion to a block within a "
+ "replaced/erased op");
return impl->applySignatureConversion(region, conversion, converter);
}
FailureOr<Block *> ConversionPatternRewriter::convertRegionTypes(
Region *region, const TypeConverter &converter,
TypeConverter::SignatureConversion *entryConversion) {
+ assert(!impl->wasOpReplaced(region->getParentOp()) &&
+ "attempting to apply a signature conversion to a block within a "
+ "replaced/erased op");
return impl->convertRegionTypes(region, converter, entryConversion);
}
LogicalResult ConversionPatternRewriter::convertNonEntryRegionTypes(
Region *region, const TypeConverter &converter,
ArrayRef<TypeConverter::SignatureConversion> blockConversions) {
+ assert(!impl->wasOpReplaced(region->getParentOp()) &&
+ "attempting to apply a signature conversion to a block within a "
+ "replaced/erased op");
return impl->convertNonEntryRegionTypes(region, converter, blockConversions);
}
@@ -1663,25 +1632,22 @@ ConversionPatternRewriter::getRemappedValues(ValueRange keys,
results);
}
-Block *ConversionPatternRewriter::splitBlock(Block *block,
- Block::iterator before) {
- auto *continuation = block->splitBlock(before);
- impl->notifySplitBlock(block, continuation);
- return continuation;
-}
-
void ConversionPatternRewriter::inlineBlockBefore(Block *source, Block *dest,
Block::iterator before,
ValueRange argValues) {
+#ifndef NDEBUG
assert(argValues.size() == source->getNumArguments() &&
"incorrect # of argument replacement values");
-#ifndef NDEBUG
+ assert(!impl->wasOpReplaced(source->getParentOp()) &&
+ "attempting to inline a block from a replaced/erased op");
+ assert(!impl->wasOpReplaced(dest->getParentOp()) &&
+ "attempting to inline a block into a replaced/erased op");
auto opIgnored = [&](Operation *op) { return impl->isOpIgnored(op); };
-#endif // NDEBUG
// The source block will be deleted, so it should not have any users (i.e.,
// there should be no predecessors).
assert(llvm::all_of(source->getUsers(), opIgnored) &&
"expected 'source' to have no predecessors");
+#endif // NDEBUG
impl->notifyBlockBeingInlined(dest, source, before);
for (auto it : llvm::zip(source->getArguments(), argValues))
@@ -1691,6 +1657,8 @@ void ConversionPatternRewriter::inlineBlockBefore(Block *source, Block *dest,
}
void ConversionPatternRewriter::startOpModification(Operation *op) {
+ assert(!impl->wasOpReplaced(op) &&
+ "attempting to modify a replaced/erased op");
#ifndef NDEBUG
impl->pendingRootUpdates.insert(op);
#endif
@@ -1698,6 +1666,8 @@ void ConversionPatternRewriter::startOpModification(Operation *op) {
}
void ConversionPatternRewriter::finalizeOpModification(Operation *op) {
+ assert(!impl->wasOpReplaced(op) &&
+ "attempting to modify a replaced/erased op");
PatternRewriter::finalizeOpModification(op);
// There is nothing to do here, we only need to track the operation at the
// start of the update.
@@ -1912,8 +1882,13 @@ OperationLegalizer::legalize(Operation *op,
// If this operation is recursively legal, mark its children as ignored so
// that we don't consider them for legalization.
- if (legalityInfo->isRecursivelyLegal)
- rewriter.getImpl().markNestedOpsIgnored(op);
+ if (legalityInfo->isRecursivelyLegal) {
+ op->walk([&](Operation *nested) {
+ if (op != nested)
+ rewriter.getImpl().ignoredOps.insert(nested);
+ });
+ }
+
return success();
}
diff --git a/mlir/test/CMakeLists.txt b/mlir/test/CMakeLists.txt
index 74921544c555..baf07ea1f010 100644
--- a/mlir/test/CMakeLists.txt
+++ b/mlir/test/CMakeLists.txt
@@ -173,6 +173,7 @@ if(LLVM_BUILD_EXAMPLES)
transform-opt-ch3
transform-opt-ch4
mlir-minimal-opt
+ mlir-transform-opt
)
if(MLIR_ENABLE_EXECUTION_ENGINE)
list(APPEND MLIR_TEST_DEPENDS
diff --git a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
index 00d7b6b8d65f..23e0edd510cb 100644
--- a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
+++ b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
@@ -927,3 +927,57 @@ func.func @affine_parallel_with_reductions_i64(%arg0: memref<3x3xi64>, %arg1: me
// CHECK: scf.reduce.return %[[RES]] : i64
// CHECK: }
// CHECK: }
+
+///////////////////////////////////////////////////////////////////////
+
+func.func @test_dilinearize_index(%linear_index: index) -> (index, index, index) {
+ %b0 = arith.constant 16 : index
+ %b1 = arith.constant 224 : index
+ %b2 = arith.constant 224 : index
+ %1:3 = affine.delinearize_index %linear_index into (%b0, %b1, %b2) : index, index, index
+ return %1#0, %1#1, %1#2 : index, index, index
+}
+// CHECK-LABEL: func.func @test_dilinearize_index(
+// CHECK-SAME: %[[VAL_0:.*]]: index) -> (index, index, index) {
+// CHECK: %[[VAL_1:.*]] = arith.constant 16 : index
+// CHECK: %[[VAL_2:.*]] = arith.constant 224 : index
+// CHECK: %[[VAL_3:.*]] = arith.constant 224 : index
+// CHECK: %[[VAL_4:.*]] = arith.constant 50176 : index
+// CHECK: %[[VAL_5:.*]] = arith.constant 50176 : index
+// CHECK: %[[VAL_6:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_7:.*]] = arith.constant -1 : index
+// CHECK: %[[VAL_8:.*]] = arith.cmpi slt, %[[VAL_0]], %[[VAL_6]] : index
+// CHECK: %[[VAL_9:.*]] = arith.subi %[[VAL_7]], %[[VAL_0]] : index
+// CHECK: %[[VAL_10:.*]] = arith.select %[[VAL_8]], %[[VAL_9]], %[[VAL_0]] : index
+// CHECK: %[[VAL_11:.*]] = arith.divsi %[[VAL_10]], %[[VAL_5]] : index
+// CHECK: %[[VAL_12:.*]] = arith.subi %[[VAL_7]], %[[VAL_11]] : index
+// CHECK: %[[VAL_13:.*]] = arith.select %[[VAL_8]], %[[VAL_12]], %[[VAL_11]] : index
+// CHECK: %[[VAL_14:.*]] = arith.constant 50176 : index
+// CHECK: %[[VAL_15:.*]] = arith.remsi %[[VAL_0]], %[[VAL_14]] : index
+// CHECK: %[[VAL_16:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_17:.*]] = arith.cmpi slt, %[[VAL_15]], %[[VAL_16]] : index
+// CHECK: %[[VAL_18:.*]] = arith.addi %[[VAL_15]], %[[VAL_14]] : index
+// CHECK: %[[VAL_19:.*]] = arith.select %[[VAL_17]], %[[VAL_18]], %[[VAL_15]] : index
+// CHECK: %[[VAL_20:.*]] = arith.constant 50176 : index
+// CHECK: %[[VAL_21:.*]] = arith.remsi %[[VAL_0]], %[[VAL_20]] : index
+// CHECK: %[[VAL_22:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_23:.*]] = arith.cmpi slt, %[[VAL_21]], %[[VAL_22]] : index
+// CHECK: %[[VAL_24:.*]] = arith.addi %[[VAL_21]], %[[VAL_20]] : index
+// CHECK: %[[VAL_25:.*]] = arith.select %[[VAL_23]], %[[VAL_24]], %[[VAL_21]] : index
+// CHECK: %[[VAL_26:.*]] = arith.constant 224 : index
+// CHECK: %[[VAL_27:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_28:.*]] = arith.constant -1 : index
+// CHECK: %[[VAL_29:.*]] = arith.cmpi slt, %[[VAL_25]], %[[VAL_27]] : index
+// CHECK: %[[VAL_30:.*]] = arith.subi %[[VAL_28]], %[[VAL_25]] : index
+// CHECK: %[[VAL_31:.*]] = arith.select %[[VAL_29]], %[[VAL_30]], %[[VAL_25]] : index
+// CHECK: %[[VAL_32:.*]] = arith.divsi %[[VAL_31]], %[[VAL_26]] : index
+// CHECK: %[[VAL_33:.*]] = arith.subi %[[VAL_28]], %[[VAL_32]] : index
+// CHECK: %[[VAL_34:.*]] = arith.select %[[VAL_29]], %[[VAL_33]], %[[VAL_32]] : index
+// CHECK: %[[VAL_35:.*]] = arith.constant 224 : index
+// CHECK: %[[VAL_36:.*]] = arith.remsi %[[VAL_0]], %[[VAL_35]] : index
+// CHECK: %[[VAL_37:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_38:.*]] = arith.cmpi slt, %[[VAL_36]], %[[VAL_37]] : index
+// CHECK: %[[VAL_39:.*]] = arith.addi %[[VAL_36]], %[[VAL_35]] : index
+// CHECK: %[[VAL_40:.*]] = arith.select %[[VAL_38]], %[[VAL_39]], %[[VAL_36]] : index
+// CHECK: return %[[VAL_13]], %[[VAL_34]], %[[VAL_40]] : index, index, index
+// CHECK: }
diff --git a/mlir/test/Dialect/AMDGPU/optimize_shmem_reads_writes.mlir b/mlir/test/Dialect/AMDGPU/optimize_shmem_reads_writes.mlir
index a1de1ff87c22..983eee732e2a 100644
--- a/mlir/test/Dialect/AMDGPU/optimize_shmem_reads_writes.mlir
+++ b/mlir/test/Dialect/AMDGPU/optimize_shmem_reads_writes.mlir
@@ -1,13 +1,13 @@
-// RUN: mlir-opt %s --pass-pipeline='builtin.module(func.func(amdgpu-optimize-shared-memory))' | FileCheck %s
+// RUN: mlir-opt %s --pass-pipeline='builtin.module(func.func(amdgpu-optimize-shared-memory))' | FileCheck %s
// CHECK: @optimize_shmem([[arg0:%.+]]: memref<{{.*}}>, [[readRow:%.+]]: index, [[readCol:%.+]]: index, [[writeRow:%.+]]: index, [[writeCol:%.+]]: index, [[fragRow:%.+]]: index, [[fragCol:%.+]]: index, [[fragColPerm:%.+]]: index, [[stRow:%.+]]: index, [[stCol:%.+]]: index)
- func.func @optimize_shmem(%arg0: memref<4096x4096xf16>,
+ func.func @optimize_shmem(%arg0: memref<4096x4096xf16>,
%readRow: index, %readCol: index,
%writeRow: index, %writeCol: index,
- %fragRow: index, %fragCol: index,
+ %fragRow: index, %fragCol: index,
%fragColPerm: index,
%stRow: index, %stCol: index) {
- // CHECK: %[[cst:.+]] = arith.constant 0.000000e+00 : f16
+ // CHECK: %[[cst:.+]] = arith.constant 0.000000e+00 : f16
%cst = arith.constant 0.000000e+00 : f16
// CHECK: [[shmA:%.+]] = memref.alloc
@@ -15,42 +15,36 @@
%shmA = memref.alloc() {alignment = 64 : i64} : memref<128x32xf16, 3>
%shmB = memref.alloc() {alignment = 64 : i64} : memref<256x32xf16, 3>
- // CHECK: %[[D0:.+]] = vector.transfer_read [[arg0:%.+]][[[readRow:%.+]], [[readCol:%.+]]], [[cst:.+]] {in_bounds = [true, true]} : memref<4096x4096xf16>, vector<1x8xf16>
%0 = vector.transfer_read %arg0[%readRow, %readCol], %cst {in_bounds = [true, true]} : memref<4096x4096xf16>, vector<1x8xf16>
- // CHECK: [[c7:%.+]] = arith.constant 7 : index
- // CHECK: [[srcBits:%.+]] = arith.andi [[stRow:%.+]], [[c7]]
- // CHECK: [[c2:%.+]] = arith.constant 2 : index
- // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
- // CHECK: [[stColPerm:%.+]] = arith.xori [[stCol:%.+]], [[xorBits]]
- // CHECK: vector.transfer_write %[[D0:.+]], [[shmB]][[[writeRow:%.+]], [[writeCol:%.+]]] {in_bounds = [true, true]} : vector<1x8xf16>, memref<256x32xf16, 3>
+ // CHECK: [[c6:%.+]] = arith.constant 6 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[stRow:%.+]], [[c6]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[stColPerm:%.+]] = arith.xori [[stCol:%.+]], [[xorBits]]
vector.transfer_write %0, %shmB[%writeRow, %writeCol] {in_bounds = [true, true]} : vector<1x8xf16>, memref<256x32xf16, 3>
gpu.barrier
gpu.barrier
- // CHECK: [[c7:%.+]] = arith.constant 7 : index
- // CHECK: [[srcBits:%.+]] = arith.andi [[fragRow]], [[c7]]
- // CHECK: [[c2:%.+]] = arith.constant 2 : index
- // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[c6:%.+]] = arith.constant 6 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[fragRow]], [[c6]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
// CHECK: [[fragColPerm:%.+]] = arith.xori [[fragCol:%.+]], [[xorBits]]
- // CHECK: vector.load [[shmB:%.+]][[[fragRow:%.+]], [[fragColPerm]]] : memref<256x32xf16, 3>, vector<8xf16>
%1 = vector.load %shmB[%fragRow, %fragColPerm] : memref<256x32xf16, 3>, vector<8xf16>
- // CHECK: %[[D2:.+]] = vector.transfer_read [[arg0:%.+]][[[readRow:%.+]], [[readCol:%.+]]], [[cst:.+]] {in_bounds = [true, true]} : memref<4096x4096xf16>, vector<1x8xf16>
%2 = vector.transfer_read %arg0[%readRow, %readCol], %cst {in_bounds = [true, true]} : memref<4096x4096xf16>, vector<1x8xf16>
- // CHECK: [[c7:%.+]] = arith.constant 7 : index
- // CHECK: [[srcBits:%.+]] = arith.andi [[stRow:%.+]], [[c7]]
- // CHECK: [[c2:%.+]] = arith.constant 2 : index
- // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
- // CHECK: [[stColPerm:%.+]] = arith.xori [[stCol:%.+]], [[xorBits]]
- // CHECK: vector.transfer_write %[[D2:.+]], [[shmA:%.+]][[[writeRow:%.+]], [[writeCol:%.+]]] {in_bounds = [true, true]} : vector<1x8xf16>, memref<128x32xf16, 3>
+ // CHECK: [[c6:%.+]] = arith.constant 6 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[stRow:%.+]], [[c6]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[stColPerm:%.+]] = arith.xori [[stCol:%.+]], [[xorBits]]
vector.transfer_write %2, %shmA[%writeRow, %writeCol] {in_bounds = [true, true]} : vector<1x8xf16>, memref<128x32xf16, 3>
gpu.barrier
gpu.barrier
- // CHECK: [[c7:%.+]] = arith.constant 7 : index
- // CHECK: [[srcBits:%.+]] = arith.andi [[fragRow]], [[c7]]
- // CHECK: [[c2:%.+]] = arith.constant 2 : index
- // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[c6:%.+]] = arith.constant 6 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[fragRow]], [[c6]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
// CHECK: [[fragColPerm:%.+]] = arith.xori [[fragCol:%.+]], [[xorBits]]
- // CHECK: vector.load [[shmA:%.+]][[[fragRow:%.+]], [[fragColPerm]]] : memref<128x32xf16, 3>, vector<8xf16>
%3 = vector.load %shmA[%fragRow, %fragColPerm] : memref<128x32xf16, 3>, vector<8xf16>
return
}
diff --git a/mlir/test/Dialect/AMDGPU/transform_optimize_shmem_reads_writes.mlir b/mlir/test/Dialect/AMDGPU/transform_optimize_shmem_reads_writes.mlir
index 143e7c2d2709..b1bb91ffc297 100644
--- a/mlir/test/Dialect/AMDGPU/transform_optimize_shmem_reads_writes.mlir
+++ b/mlir/test/Dialect/AMDGPU/transform_optimize_shmem_reads_writes.mlir
@@ -1,10 +1,10 @@
-// RUN: mlir-opt %s -transform-interpreter | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter | FileCheck %s
// CHECK: @optimize_shmem([[arg0:%.+]]: memref<{{.*}}>, [[readRow:%.+]]: index, [[readCol:%.+]]: index, [[writeRow:%.+]]: index, [[writeCol:%.+]]: index, [[fragRow:%.+]]: index, [[fragCol:%.+]]: index, [[fragColPerm:%.+]]: index, [[stRow:%.+]]: index, [[stCol:%.+]]: index)
- func.func @optimize_shmem(%arg0: memref<4096x4096xf16>,
+ func.func @optimize_shmem(%arg0: memref<4096x4096xf16>,
%readRow: index, %readCol: index,
%writeRow: index, %writeCol: index,
- %fragRow: index, %fragCol: index,
+ %fragRow: index, %fragCol: index,
%fragColPerm: index,
%stRow: index, %stCol: index) {
%cst = arith.constant 0.000000e+00 : f16
@@ -13,33 +13,33 @@
%shmB = memref.alloc() {alignment = 64 : i64} : memref<256x32xf16, 3>
%0 = vector.transfer_read %arg0[%readRow, %readCol], %cst {in_bounds = [true, true]} : memref<4096x4096xf16>, vector<1x8xf16>
- // CHECK: [[c7:%.+]] = arith.constant 7 : index
- // CHECK: [[srcBits:%.+]] = arith.andi [[stRow:%.+]], [[c7]]
- // CHECK: [[c2:%.+]] = arith.constant 2 : index
- // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
- // CHECK: [[stColPerm:%.+]] = arith.xori [[stCol:%.+]], [[xorBits]]
+ // CHECK: [[c6:%.+]] = arith.constant 6 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[stRow:%.+]], [[c6]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[stColPerm:%.+]] = arith.xori [[stCol:%.+]], [[xorBits]]
vector.transfer_write %0, %shmB[%writeRow, %writeCol] {in_bounds = [true, true]} : vector<1x8xf16>, memref<256x32xf16, 3>
gpu.barrier
gpu.barrier
- // CHECK: [[c7:%.+]] = arith.constant 7 : index
- // CHECK: [[srcBits:%.+]] = arith.andi [[fragRow]], [[c7]]
- // CHECK: [[c2:%.+]] = arith.constant 2 : index
- // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
- // CHECK: [[fragColPerm:%.+]] = arith.xori [[fragCol:%.+]], [[xorBits]]
+ // CHECK: [[c6:%.+]] = arith.constant 6 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[fragRow]], [[c6]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[fragColPerm:%.+]] = arith.xori [[fragCol:%.+]], [[xorBits]]
%1 = vector.load %shmB[%fragRow, %fragColPerm] : memref<256x32xf16, 3>, vector<8xf16>
%2 = vector.transfer_read %arg0[%readRow, %readCol], %cst {in_bounds = [true, true]} : memref<4096x4096xf16>, vector<1x8xf16>
- // CHECK: [[c7:%.+]] = arith.constant 7 : index
- // CHECK: [[srcBits:%.+]] = arith.andi [[stRow:%.+]], [[c7]]
- // CHECK: [[c2:%.+]] = arith.constant 2 : index
- // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
- // CHECK: [[stColPerm:%.+]] = arith.xori [[stCol:%.+]], [[xorBits]]
+ // CHECK: [[c6:%.+]] = arith.constant 6 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[stRow:%.+]], [[c6]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[stColPerm:%.+]] = arith.xori [[stCol:%.+]], [[xorBits]]
vector.transfer_write %2, %shmA[%writeRow, %writeCol] {in_bounds = [true, true]} : vector<1x8xf16>, memref<128x32xf16, 3>
gpu.barrier
gpu.barrier
- // CHECK: [[c7:%.+]] = arith.constant 7 : index
- // CHECK: [[srcBits:%.+]] = arith.andi [[fragRow]], [[c7]]
- // CHECK: [[c2:%.+]] = arith.constant 2 : index
- // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
+ // CHECK: [[c6:%.+]] = arith.constant 6 : index
+ // CHECK: [[srcBits:%.+]] = arith.andi [[fragRow]], [[c6]]
+ // CHECK: [[c2:%.+]] = arith.constant 2 : index
+ // CHECK: [[xorBits:%.+]] = arith.shli [[srcBits]], [[c2]]
// CHECK: [[fragColPerm:%.+]] = arith.xori [[fragCol:%.+]], [[xorBits]]
%3 = vector.load %shmA[%fragRow, %fragColPerm] : memref<128x32xf16, 3>, vector<8xf16>
return
@@ -48,7 +48,7 @@
module attributes { transform.with_named_sequence } {
transform.named_sequence @__transform_main(%root: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["func.func"]} in %root : (!transform.any_op) -> !transform.any_op
- transform.amdgpu.optimize_shared_memory_reads_and_writes %0 : (!transform.any_op) -> ()
+ transform.amdgpu.optimize_shared_memory_reads_and_writes %0 {sharedMemoryLineSizeBytes = 128, defaultVectorSizeBits = 128}: (!transform.any_op) -> ()
transform.yield
} // @__transform_main
} // module
diff --git a/mlir/test/Dialect/Affine/access-analysis.mlir b/mlir/test/Dialect/Affine/access-analysis.mlir
new file mode 100644
index 000000000000..68310b932353
--- /dev/null
+++ b/mlir/test/Dialect/Affine/access-analysis.mlir
@@ -0,0 +1,67 @@
+// RUN: mlir-opt %s -split-input-file -test-affine-access-analysis -verify-diagnostics | FileCheck %s
+
+// CHECK-LABEL: func @loop_1d
+func.func @loop_1d(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+ %c0 = arith.constant 0 : index
+ %M = memref.dim %A, %c0 : memref<?x?xf32>
+ affine.for %i = 0 to %M {
+ affine.for %j = 0 to %M {
+ affine.load %A[%c0, %i] : memref<?x?xf32>
+ // expected-remark@above {{contiguous along loop 0}}
+ affine.load %A[%c0, 8 * %i + %j] : memref<?x?xf32>
+ // expected-remark@above {{contiguous along loop 1}}
+ // Note/FIXME: access stride isn't being checked.
+ // expected-remark@-3 {{contiguous along loop 0}}
+
+ // These are all non-contiguous along both loops. Nothing is emitted.
+ affine.load %A[%i, %c0] : memref<?x?xf32>
+ // Note/FIXME: access stride isn't being checked.
+ affine.load %A[%i, 8 * %j] : memref<?x?xf32>
+ // expected-remark@above {{contiguous along loop 1}}
+ affine.load %A[%j, 4 * %i] : memref<?x?xf32>
+ // expected-remark@above {{contiguous along loop 0}}
+ }
+ }
+ return
+}
+
+// -----
+
+#map = affine_map<(d0) -> (d0 * 16)>
+#map1 = affine_map<(d0) -> (d0 * 16 + 16)>
+#map2 = affine_map<(d0) -> (d0)>
+#map3 = affine_map<(d0) -> (d0 + 1)>
+
+func.func @tiled(%arg0: memref<*xf32>) {
+ %alloc = memref.alloc() {alignment = 64 : i64} : memref<1x224x224x64xf32>
+ %cast = memref.cast %arg0 : memref<*xf32> to memref<64xf32>
+ affine.for %arg1 = 0 to 4 {
+ affine.for %arg2 = 0 to 224 {
+ affine.for %arg3 = 0 to 14 {
+ %alloc_0 = memref.alloc() : memref<1x16x1x16xf32>
+ affine.for %arg4 = #map(%arg1) to #map1(%arg1) {
+ affine.for %arg5 = #map(%arg3) to #map1(%arg3) {
+ %0 = affine.load %cast[%arg4] : memref<64xf32>
+ // expected-remark@above {{contiguous along loop 3}}
+ affine.store %0, %alloc_0[0, %arg1 * -16 + %arg4, 0, %arg3 * -16 + %arg5] : memref<1x16x1x16xf32>
+ // expected-remark@above {{contiguous along loop 4}}
+ // expected-remark@above {{contiguous along loop 2}}
+ }
+ }
+ affine.for %arg4 = #map(%arg1) to #map1(%arg1) {
+ affine.for %arg5 = #map2(%arg2) to #map3(%arg2) {
+ affine.for %arg6 = #map(%arg3) to #map1(%arg3) {
+ %0 = affine.load %alloc_0[0, %arg1 * -16 + %arg4, -%arg2 + %arg5, %arg3 * -16 + %arg6] : memref<1x16x1x16xf32>
+ // expected-remark@above {{contiguous along loop 5}}
+ // expected-remark@above {{contiguous along loop 2}}
+ affine.store %0, %alloc[0, %arg5, %arg6, %arg4] : memref<1x224x224x64xf32>
+ // expected-remark@above {{contiguous along loop 3}}
+ }
+ }
+ }
+ memref.dealloc %alloc_0 : memref<1x16x1x16xf32>
+ }
+ }
+ }
+ return
+}
diff --git a/mlir/test/Dialect/Arith/expand-ops.mlir b/mlir/test/Dialect/Arith/expand-ops.mlir
index 046e8ff64fba..91f652e5a270 100644
--- a/mlir/test/Dialect/Arith/expand-ops.mlir
+++ b/mlir/test/Dialect/Arith/expand-ops.mlir
@@ -255,36 +255,21 @@ func.func @truncf_f32(%arg0 : f32) -> bf16 {
}
// CHECK-LABEL: @truncf_f32
-
-// CHECK-DAG: %[[C16:.+]] = arith.constant 16
-// CHECK-DAG: %[[C32768:.+]] = arith.constant 32768
-// CHECK-DAG: %[[C2130706432:.+]] = arith.constant 2130706432
-// CHECK-DAG: %[[C2139095040:.+]] = arith.constant 2139095040
-// CHECK-DAG: %[[C8388607:.+]] = arith.constant 8388607
-// CHECK-DAG: %[[C31:.+]] = arith.constant 31
-// CHECK-DAG: %[[C23:.+]] = arith.constant 23
-// CHECK-DAG: %[[BITCAST:.+]] = arith.bitcast %arg0
-// CHECK-DAG: %[[SIGN:.+]] = arith.shrui %[[BITCAST:.+]], %[[C31]]
-// CHECK-DAG: %[[ROUND:.+]] = arith.subi %[[C32768]], %[[SIGN]]
-// CHECK-DAG: %[[MANTISSA:.+]] = arith.andi %[[BITCAST]], %[[C8388607]]
-// CHECK-DAG: %[[ROUNDED:.+]] = arith.addi %[[MANTISSA]], %[[ROUND]]
-// CHECK-DAG: %[[ROLL:.+]] = arith.shrui %[[ROUNDED]], %[[C23]]
-// CHECK-DAG: %[[SHR:.+]] = arith.shrui %[[ROUNDED]], %[[ROLL]]
-// CHECK-DAG: %[[EXP:.+]] = arith.andi %0, %[[C2139095040]]
-// CHECK-DAG: %[[EXPROUND:.+]] = arith.addi %[[EXP]], %[[ROUNDED]]
-// CHECK-DAG: %[[EXPROLL:.+]] = arith.andi %[[EXPROUND]], %[[C2139095040]]
-// CHECK-DAG: %[[EXPMAX:.+]] = arith.cmpi uge, %[[EXP]], %[[C2130706432]]
-// CHECK-DAG: %[[EXPNEW:.+]] = arith.select %[[EXPMAX]], %[[EXP]], %[[EXPROLL]]
-// CHECK-DAG: %[[OVERFLOW_B:.+]] = arith.trunci %[[ROLL]]
-// CHECK-DAG: %[[KEEP_MAN:.+]] = arith.andi %[[EXPMAX]], %[[OVERFLOW_B]]
-// CHECK-DAG: %[[MANNEW:.+]] = arith.select %[[KEEP_MAN]], %[[MANTISSA]], %[[SHR]]
-// CHECK-DAG: %[[NEWSIGN:.+]] = arith.shli %[[SIGN]], %[[C31]]
-// CHECK-DAG: %[[WITHEXP:.+]] = arith.ori %[[NEWSIGN]], %[[EXPNEW]]
-// CHECK-DAG: %[[WITHMAN:.+]] = arith.ori %[[WITHEXP]], %[[MANNEW]]
-// CHECK-DAG: %[[SHIFT:.+]] = arith.shrui %[[WITHMAN]], %[[C16]]
-// CHECK-DAG: %[[TRUNC:.+]] = arith.trunci %[[SHIFT]]
-// CHECK-DAG: %[[RES:.+]] = arith.bitcast %[[TRUNC]]
-// CHECK: return %[[RES]]
+// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : i32
+// CHECK-DAG: %[[C16:.+]] = arith.constant 16 : i32
+// CHECK-DAG: %[[C7FC0_i16:.+]] = arith.constant 32704 : i16
+// CHECK-DAG: %[[C7FFF:.+]] = arith.constant 32767 : i32
+// CHECK-DAG: %[[ISNAN:.+]] = arith.cmpf une, %arg0, %arg0 : f32
+// CHECK-DAG: %[[BITCAST:.+]] = arith.bitcast %arg0 : f32 to i32
+// CHECK-DAG: %[[SHRUI:.+]] = arith.shrui %[[BITCAST]], %[[C16]] : i32
+// CHECK-DAG: %[[BIT16:.+]] = arith.andi %[[SHRUI]], %[[C1]] : i32
+// CHECK-DAG: %[[ROUNDING_BIAS:.+]] = arith.addi %[[BIT16]], %[[C7FFF]] : i32
+// CHECK-DAG: %[[BIASED:.+]] = arith.addi %[[BITCAST]], %[[ROUNDING_BIAS]] : i32
+// CHECK-DAG: %[[BIASED_SHIFTED:.+]] = arith.shrui %[[BIASED]], %[[C16]] : i32
+// CHECK-DAG: %[[NORMAL_CASE_RESULT_i16:.+]] = arith.trunci %[[BIASED_SHIFTED]] : i32 to i16
+// CHECK-DAG: %[[SELECT:.+]] = arith.select %[[ISNAN]], %[[C7FC0_i16]], %[[NORMAL_CASE_RESULT_i16]] : i16
+// CHECK-DAG: %[[RESULT:.+]] = arith.bitcast %[[SELECT]] : i16 to bf16
+// CHECK: return %[[RESULT]]
// -----
diff --git a/mlir/test/Dialect/EmitC/invalid_ops.mlir b/mlir/test/Dialect/EmitC/invalid_ops.mlir
index 121a2163d383..5f64b535d684 100644
--- a/mlir/test/Dialect/EmitC/invalid_ops.mlir
+++ b/mlir/test/Dialect/EmitC/invalid_ops.mlir
@@ -331,3 +331,27 @@ emitc.declare_func @bar
// expected-error@+1 {{'emitc.declare_func' op requires attribute 'sym_name'}}
"emitc.declare_func"() : () -> ()
+
+// -----
+
+func.func @logical_and_resulterror(%arg0: i32, %arg1: i32) {
+ // expected-error @+1 {{'emitc.logical_and' op result #0 must be 1-bit signless integer, but got 'i32'}}
+ %0 = "emitc.logical_and"(%arg0, %arg1) : (i32, i32) -> i32
+ return
+}
+
+// -----
+
+func.func @logical_not_resulterror(%arg0: i32) {
+ // expected-error @+1 {{'emitc.logical_not' op result #0 must be 1-bit signless integer, but got 'i32'}}
+ %0 = "emitc.logical_not"(%arg0) : (i32) -> i32
+ return
+}
+
+// -----
+
+func.func @logical_or_resulterror(%arg0: i32, %arg1: i32) {
+ // expected-error @+1 {{'emitc.logical_or' op result #0 must be 1-bit signless integer, but got 'i32'}}
+ %0 = "emitc.logical_or"(%arg0, %arg1) : (i32, i32) -> i32
+ return
+}
diff --git a/mlir/test/Dialect/EmitC/ops.mlir b/mlir/test/Dialect/EmitC/ops.mlir
index 93119be14c90..f852390f03e2 100644
--- a/mlir/test/Dialect/EmitC/ops.mlir
+++ b/mlir/test/Dialect/EmitC/ops.mlir
@@ -61,6 +61,16 @@ func.func @add_pointer(%arg0: !emitc.ptr<f32>, %arg1: i32, %arg2: !emitc.opaque<
return
}
+func.func @bitwise(%arg0: i32, %arg1: i32) -> () {
+ %0 = emitc.bitwise_and %arg0, %arg1 : (i32, i32) -> i32
+ %1 = emitc.bitwise_left_shift %arg0, %arg1 : (i32, i32) -> i32
+ %2 = emitc.bitwise_not %arg0 : (i32) -> i32
+ %3 = emitc.bitwise_or %arg0, %arg1 : (i32, i32) -> i32
+ %4 = emitc.bitwise_right_shift %arg0, %arg1 : (i32, i32) -> i32
+ %5 = emitc.bitwise_xor %arg0, %arg1 : (i32, i32) -> i32
+ return
+}
+
func.func @div_int(%arg0: i32, %arg1: i32) {
%1 = "emitc.div" (%arg0, %arg1) : (i32, i32) -> i32
return
@@ -117,6 +127,13 @@ func.func @cmp(%arg0 : i32, %arg1 : f32, %arg2 : i64, %arg3 : f64, %arg4 : !emit
return
}
+func.func @logical(%arg0: i32, %arg1: i32) {
+ %0 = emitc.logical_and %arg0, %arg1 : i32, i32
+ %1 = emitc.logical_not %arg0 : i32
+ %2 = emitc.logical_or %arg0, %arg1 : i32, i32
+ return
+}
+
func.func @test_if(%arg0: i1, %arg1: f32) {
emitc.if %arg0 {
%0 = emitc.call_opaque "func_const"(%arg1) : (f32) -> i32
diff --git a/mlir/test/Dialect/GPU/ops.mlir b/mlir/test/Dialect/GPU/ops.mlir
index 8d249c9e9b9b..511b01887747 100644
--- a/mlir/test/Dialect/GPU/ops.mlir
+++ b/mlir/test/Dialect/GPU/ops.mlir
@@ -59,24 +59,39 @@ module attributes {gpu.container_module} {
gpu.module @kernels {
gpu.func @kernel_1(%arg0 : f32, %arg1 : memref<?xf32, 1>) kernel {
%tIdX = gpu.thread_id x
+ // CHECK: thread_id_x
%tIdY = gpu.thread_id y
+ // CHECK-NEXT: thread_id_y
%tIdZ = gpu.thread_id z
+ // CHECK-NEXT: thread_id_z
%bDimX = gpu.block_dim x
+ // CHECK-NEXT: block_dim_x
%bDimY = gpu.block_dim y
+ // CHECK-NEXT: block_dim_y
%bDimZ = gpu.block_dim z
+ // CHECK-NEXT: block_dim_z
%bIdX = gpu.block_id x
+ // CHECK-NEXT: block_id_x
%bIdY = gpu.block_id y
+ // CHECK-NEXT: block_id_y
%bIdZ = gpu.block_id z
+ // CHECK-NEXT: block_id_z
%gDimX = gpu.grid_dim x
+ // CHECK-NEXT: grid_dim_x
%gDimY = gpu.grid_dim y
+ // CHECK-NEXT: grid_dim_y
%gDimZ = gpu.grid_dim z
+ // CHECK-NEXT: grid_dim_z
%gIdX = gpu.global_id x
+ // CHECK-NEXT: global_id_x
%gIdY = gpu.global_id y
+ // CHECK-NEXT: global_id_y
%gIdZ = gpu.global_id z
+ // CHECK-NEXT: global_id_z
%sgId = gpu.subgroup_id : index
%numSg = gpu.num_subgroups : index
diff --git a/mlir/test/Dialect/Linalg/flatten-elementwise.mlir b/mlir/test/Dialect/Linalg/flatten-elementwise.mlir
new file mode 100644
index 000000000000..858c133dd536
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/flatten-elementwise.mlir
@@ -0,0 +1,99 @@
+// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
+
+// CHECK-LABEL: func.func @fill(
+// CHECK-SAME: %[[ARG0:.*]]: f32,
+// CHECK-SAME: %[[ARG1:.*]]: memref<32x7xf32>
+// CHECK-NEXT: %[[FLATTENED:.*]] = memref.collapse_shape %[[ARG1]] {{\[}}[0, 1]]
+// CHECK-NEXT: linalg.fill ins(%[[ARG0]] : f32) outs(%[[FLATTENED]] : memref<224xf32>)
+func.func @fill(%cst: f32, %arg: memref<32x7xf32>) {
+ linalg.fill ins(%cst: f32) outs(%arg: memref<32x7xf32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %flattened = transform.structured.flatten_elementwise %0
+ : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func.func @fill_tensor(
+// CHECK-SAME: %[[ARG0:.*]]: f32,
+// CHECK-SAME: %[[ARG1:.*]]: tensor<32x7xf32>
+// CHECK-NEXT: %[[FLATTENED:.*]] = tensor.collapse_shape %[[ARG1]] {{\[}}[0, 1]]
+// CHECK-NEXT: %[[FLATTENED_RESULT:.*]] = linalg.fill ins(%[[ARG0]] : f32) outs(%[[FLATTENED]] : tensor<224xf32>)
+// CHECK-NEXT: %[[RESULT:.*]] = tensor.expand_shape %[[FLATTENED_RESULT]] {{\[}}[0, 1]]
+func.func @fill_tensor(%cst: f32, %arg: tensor<32x7xf32>) -> tensor<32x7xf32> {
+ %0 = linalg.fill ins(%cst: f32) outs(%arg: tensor<32x7xf32>) -> tensor<32x7xf32>
+ return %0 : tensor<32x7xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %flattened = transform.structured.flatten_elementwise %0
+ : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: func.func @map(
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<32x7xf32>
+// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<32x7xf32>
+// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<32x7xf32>
+// CHECK-NEXT: %[[FLATTENED_0:.*]] = memref.collapse_shape %[[ARG0]] {{\[}}[0, 1]]
+// CHECK-NEXT: %[[FLATTENED_1:.*]] = memref.collapse_shape %[[ARG1]] {{\[}}[0, 1]]
+// CHECK-NEXT: %[[FLATTENED_2:.*]] = memref.collapse_shape %[[ARG2]] {{\[}}[0, 1]]
+// CHECK-NEXT: linalg.map { arith.addf } ins(%[[FLATTENED_0]], %[[FLATTENED_1]] : memref<224xf32>, memref<224xf32>) outs(%[[FLATTENED_2]] : memref<224xf32>)
+func.func @map(%arg0: memref<32x7xf32>, %arg1: memref<32x7xf32>, %arg2: memref<32x7xf32>) {
+ linalg.map {arith.addf} ins(%arg0, %arg1: memref<32x7xf32>, memref<32x7xf32>) outs(%arg2: memref<32x7xf32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %flattened = transform.structured.flatten_elementwise %0
+ : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
+// CHECK-LABEL: func.func @generic
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<32x7xf32>
+// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<32x7xf32>
+// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<32x7xf32>
+// CHECK-NEXT: %[[FLATTENED_0:.*]] = memref.collapse_shape %[[ARG0]] {{\[}}[0, 1]]
+// CHECK-NEXT: %[[FLATTENED_1:.*]] = memref.collapse_shape %[[ARG1]] {{\[}}[0, 1]]
+// CHECK-NEXT: %[[FLATTENED_2:.*]] = memref.collapse_shape %[[ARG2]] {{\[}}[0, 1]]
+// CHECK-NEXT: linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel"]} ins(%[[FLATTENED_0]], %[[FLATTENED_1]] : memref<224xf32>, memref<224xf32>) outs(%[[FLATTENED_2]] : memref<224xf32>)
+// CHECK-NEXT: ^bb0(%[[A:.*]]: f32, %[[B:.*]]: f32, %[[C:.*]]: f32)
+// CHECK-NEXT: %[[SUM:.*]] = arith.addf %[[A]], %[[B]]
+// CHECK-NEXT: linalg.yield %[[SUM]]
+#map = affine_map<(d0, d1) -> (d0, d1)>
+func.func @generic( %arg0: memref<32x7xf32>, %arg1: memref<32x7xf32>, %arg2: memref<32x7xf32>) {
+ linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1: memref<32x7xf32>, memref<32x7xf32>) outs(%arg2: memref<32x7xf32>) {
+ ^bb0(%a: f32, %b: f32, %c: f32):
+ %0 = arith.addf %a, %b : f32
+ linalg.yield %0 : f32
+ }
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %flattened = transform.structured.flatten_elementwise %0
+ : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize.mlir b/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize.mlir
index d54cace31efb..b7e316f8925d 100644
--- a/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-peel-and-vectorize.mlir
@@ -19,7 +19,7 @@ func.func @matmul(%A: tensor<1024x512xf32>,
// CHECK-DAG: %[[C16:.*]] = arith.constant 16 : index
// CHECK: %[[VSCALE:.*]] = vector.vscale
// CHECK: %[[STEP:.*]] = arith.muli %[[VSCALE]], %[[C16]] : index
-// CHECK: %2 = scf.for {{.*}} %[[C0]] to %[[C1024]] step %[[C8]] iter_args(%arg4 = %arg2) -> (tensor<1024x2000xf32>) {
+// CHECK: scf.for {{.*}} %[[C0]] to %[[C1024]] step %[[C8]] iter_args(%arg4 = %arg2) -> (tensor<1024x2000xf32>) {
// Main loop after vectorisation (without masking)
diff --git a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
index 96953c234a08..85e1c56dd45a 100644
--- a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
+++ b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
@@ -550,3 +550,48 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
+
+// -----
+
+#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+#map1 = affine_map<(d0, d1, d2) -> (d0 + d1 + d2)>
+func.func @vectorize_reverse_like_tensor_extract(%arg0: tensor<1x2x3xf32>, %arg1: tensor<1x1x3xf32>, %arg2: index) -> tensor<1x1x3xf32> {
+ %c1 = arith.constant 1 : index
+ %c0 = arith.constant 0 : index
+ %c2 = arith.constant 2 : index
+ %0 = linalg.generic {indexing_maps = [#map], iterator_types = ["parallel", "parallel", "parallel"]} outs(%arg1 : tensor<1x1x3xf32>) {
+ ^bb0(%out: f32):
+ %1 = linalg.index 1 : index
+ %2 = linalg.index 0 : index
+ %3 = affine.apply #map1(%1, %2, %arg2)
+ %4 = linalg.index 2 : index
+ %5 = arith.subi %c2, %4 : index
+ %extracted = tensor.extract %arg0[%c0, %3, %5] : tensor<1x2x3xf32>
+ linalg.yield %extracted : f32
+ } -> tensor<1x1x3xf32>
+ return %0 : tensor<1x1x3xf32>
+}
+// CHECK-LABEL: func.func @vectorize_reverse_like_tensor_extract
+// CHECK-SAME: %[[ARG0:[0-9a-zA-Z]*]]
+// CHECK-SAME: %[[ARG1:[0-9a-zA-Z]*]]
+// CHECK-SAME: %[[ARG2:[0-9a-zA-Z]*]]
+// CHECK-DAG: %[[CST:.+]] = arith.constant dense<3> : vector<1x1x3xindex>
+// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
+// CHECK-DAG: %[[MASK:.*]] = arith.constant dense<true> : vector<1x1x3xi1>
+// CHECK-DAG: %[[PASSTHRU:.*]] = arith.constant dense<0.000000e+00> : vector<1x1x3xf32>
+// CHECK-DAG: %[[INIT_IDX:.+]] = arith.constant dense<[2, 1, 0]> : vector<3xindex>
+// CHECK: %[[T0:.+]] = vector.broadcast %[[ARG2]] : index to vector<1x1x3xindex>
+// CHECK: %[[T1:.+]] = arith.muli %[[T0]], %[[CST]] : vector<1x1x3xindex>
+// CHECK: %[[T2:.+]] = vector.broadcast %[[INIT_IDX]]
+// CHECK: %[[T3:.+]] = arith.addi %[[T2]], %[[T1]]
+// CHECK: %[[GATHER:.*]] = vector.gather %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]] [%[[T3]]], %[[MASK]], %[[PASSTHRU]]
+// CHECK: vector.transfer_write %[[GATHER]]
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir
index f85bc5111d7a..395b812a7685 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -1027,3 +1027,13 @@ func.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<3x4x2x
to tensor<3x4x2x4xi32, #DSDD>
return %t1 : tensor<3x4x2x4xi32, #DSDD>
}
+
+// -----
+
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
+
+func.func @sparse_print(%arg0: tensor<10x10xf64>) {
+ // expected-error@+1 {{'sparse_tensor.print' op operand #0 must be sparse tensor of any type values}}
+ sparse_tensor.print %arg0 : tensor<10x10xf64>
+ return
+}
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
index 476fa1b08a39..f4a58df1d4d2 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
@@ -705,8 +705,25 @@ func.func @sparse_lvl(%arg0: index, %t : tensor<?x?xi32, #BSR>) -> index {
map = (i, j, k, l) -> (i: dense, j: compressed, k: dense, l: dense)
}>
+// CHECK-LABEL: func.func @sparse_reinterpret_map(
+// CHECK-SAME: %[[A0:.*]]: tensor<6x12xi32, #sparse{{[0-9]*}}>)
+// CHECK: %[[VAL:.*]] = sparse_tensor.reinterpret_map %[[A0]]
+// CHECK: return %[[VAL]]
func.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<3x4x2x3xi32, #DSDD> {
%t1 = sparse_tensor.reinterpret_map %t0 : tensor<6x12xi32, #BSR>
to tensor<3x4x2x3xi32, #DSDD>
return %t1 : tensor<3x4x2x3xi32, #DSDD>
}
+
+// -----
+
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
+
+// CHECK-LABEL: func.func @sparse_print(
+// CHECK-SAME: %[[A0:.*]]: tensor<10x10xf64, #sparse{{[0-9]*}}>)
+// CHECK: sparse_tensor.print %[[A0]]
+// CHECK: return
+func.func @sparse_print(%arg0: tensor<10x10xf64, #CSR>) {
+ sparse_tensor.print %arg0 : tensor<10x10xf64, #CSR>
+ return
+}
diff --git a/mlir/test/Dialect/Tensor/canonicalize.mlir b/mlir/test/Dialect/Tensor/canonicalize.mlir
index e123c77aabd5..d17c23adfb14 100644
--- a/mlir/test/Dialect/Tensor/canonicalize.mlir
+++ b/mlir/test/Dialect/Tensor/canonicalize.mlir
@@ -822,7 +822,7 @@ func.func @infer_src_shape_pack(%src: tensor<?x?x?x?xf32>, %dest: tensor<10x20x3
// CHECK-LABEL: func.func @infer_src_shape_pack
// CHECK-SAME: %[[SRC:[0-9a-zA-Z]+]]
// CHECK-SAME: %[[DEST:[0-9a-zA-Z]+]]
-// CHECK: %[[CAST_SRC:.+]] = tensor.cast %[[SRC]] : tensor<?x?x?x?xf32> to tensor<30x20x?x10xf32>
+// CHECK: %[[CAST_SRC:.+]] = tensor.cast %[[SRC]] : tensor<?x?x?x?xf32> to tensor<40x20x?x30xf32>
// CHECK: %[[PACK:.+]] = tensor.pack %[[CAST_SRC]] {{.+}} into %[[DEST]]
// CHECK: return %[[PACK]]
@@ -841,13 +841,24 @@ func.func @infer_dest_shape_pack(%src: tensor<30x20x?x10xf32>, %dest: tensor<?x?
// CHECK-LABEL: func.func @infer_dest_shape_pack
// CHECK-SAME: %[[SRC:[0-9a-zA-Z]+]]
// CHECK-SAME: %[[DEST:[0-9a-zA-Z]+]]
-// CHECK: %[[CAST_DEST:.+]] = tensor.cast %[[DEST]] : tensor<?x?x?x?x16xf32> to tensor<10x20x30x?x16xf32>
+// CHECK: %[[CAST_DEST:.+]] = tensor.cast %[[DEST]] : tensor<?x?x?x?x16xf32> to tensor<?x20x10x30x16xf32>
// CHECK: %[[PACK:.+]] = tensor.pack %[[SRC]] {{.+}} into %[[CAST_DEST]]
-// CHECK: %[[CAST_PACK:.+]] = tensor.cast %[[PACK]] : tensor<10x20x30x?x16xf32> to tensor<?x?x?x?x16xf32>
+// CHECK: %[[CAST_PACK:.+]] = tensor.cast %[[PACK]] : tensor<?x20x10x30x16xf32> to tensor<?x?x?x?x16xf32>
// CHECK: return %[[CAST_PACK]]
// -----
+func.func @no_infer_pack_shape(%arg0: tensor<?x32x100xf32>, %arg1: index) -> tensor<32x7x?x16x1xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
+ %0 = tensor.empty(%arg1) : tensor<32x7x?x16x1xf32>
+ %pack = tensor.pack %arg0 padding_value(%cst : f32) outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 0] inner_tiles = [16, 1] into %0 : tensor<?x32x100xf32> -> tensor<32x7x?x16x1xf32>
+ return %pack : tensor<32x7x?x16x1xf32>
+}
+// CHECK-LABEL: func.func @no_infer_pack_shape
+// CHECK-NOT: tensor.cast
+
+// -----
+
func.func @fold_padding_value_pack_negative1(%arg0: tensor<1200x499999xf32>) -> tensor<31250x1200x16x1xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<31250x1200x16x1xf32>
@@ -920,9 +931,9 @@ func.func @infer_dest_shape_unpack(%src: tensor<10x20x30x40x16xf32>, %dest: tens
// CHECK-LABEL: func.func @infer_dest_shape_unpack
// CHECK-SAME: %[[SRC:[0-9a-zA-Z]+]]
// CHECK-SAME: %[[DEST:[0-9a-zA-Z]+]]
-// CHECK: %[[CAST_DEST:.+]] = tensor.cast %[[DEST]] : tensor<?x?x?x?xf32> to tensor<30x20x?x10xf32>
+// CHECK: %[[CAST_DEST:.+]] = tensor.cast %[[DEST]] : tensor<?x?x?x?xf32> to tensor<40x20x?x30xf32>
// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[SRC]] {{.+}} into %[[CAST_DEST]]
-// CHECK: %[[CAST_UNPACK:.+]] = tensor.cast %[[UNPACK]] : tensor<30x20x?x10xf32> to tensor<?x?x?x?xf32>
+// CHECK: %[[CAST_UNPACK:.+]] = tensor.cast %[[UNPACK]] : tensor<40x20x?x30xf32> to tensor<?x?x?x?xf32>
// CHECK: return %[[CAST_UNPACK]]
// -----
@@ -938,12 +949,24 @@ func.func @infer_src_shape_unpack(%src: tensor<?x?x?x?x16xf32>, %dest: tensor<30
// CHECK-LABEL: func.func @infer_src_shape_unpack
// CHECK-SAME: %[[SRC:[0-9a-zA-Z]+]]
// CHECK-SAME: %[[DEST:[0-9a-zA-Z]+]]
-// CHECK: %[[CAST_SRC:.+]] = tensor.cast %[[SRC]] : tensor<?x?x?x?x16xf32> to tensor<10x20x30x?x16xf32>
+// CHECK: %[[CAST_SRC:.+]] = tensor.cast %[[SRC]] : tensor<?x?x?x?x16xf32> to tensor<?x20x10x30x16xf32>
// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[CAST_SRC]]
// CHECK: return %[[UNPACK]]
// -----
+func.func @no_infer_unpack_shape(%arg1: tensor<32x7x?x16x1xf32>, %arg2: index) -> tensor<?x32x100xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
+ %0 = tensor.empty(%arg2) : tensor<?x32x100xf32>
+ %unpack = tensor.unpack %arg1 outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 0] inner_tiles = [16, 1] into %0 : tensor<32x7x?x16x1xf32> -> tensor<?x32x100xf32>
+ return %unpack : tensor<?x32x100xf32>
+}
+// CHECK-LABEL: func.func @no_infer_unpack_shape
+// CHECK-NOT: tensor.cast
+
+// -----
+
+
// CHECK-LABEL: func @fold_overlapping_insert
// CHECK-SAME: %[[INPUT:.+]]: tensor<?x?x?xf32>, %{{.+}}: tensor<4x?x8xf32>, %[[SLICE2:.+]]: tensor<4x?x8xf32>
func.func @fold_overlapping_insert(%input : tensor<?x?x?xf32>, %slice1: tensor<4x?x8xf32>, %slice2: tensor<4x?x8xf32>, %i: index, %size: index) -> (tensor<?x?x?xf32>) {
diff --git a/mlir/test/Dialect/Transform/include/test-interpreter-external-concurrent-source.mlir b/mlir/test/Dialect/Transform/include/test-interpreter-external-concurrent-source.mlir
index 316b90f85236..255ff5f31ed3 100644
--- a/mlir/test/Dialect/Transform/include/test-interpreter-external-concurrent-source.mlir
+++ b/mlir/test/Dialect/Transform/include/test-interpreter-external-concurrent-source.mlir
@@ -1,16 +1,21 @@
// RUN: mlir-opt %s
// No need to check anything else than parsing here, this is being used by another test as data.
-transform.with_pdl_patterns {
-^bb0(%arg0: !transform.any_op):
- pdl.pattern @func_return : benefit(1) {
- %0 = pdl.operation "func.return"
- pdl.rewrite %0 with "transform.dialect"
- }
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%root: !transform.any_op) {
+ transform.with_pdl_patterns %root : !transform.any_op {
+ ^bb0(%arg0: !transform.any_op):
+ pdl.pattern @func_return : benefit(1) {
+ %0 = pdl.operation "func.return"
+ pdl.rewrite %0 with "transform.dialect"
+ }
- sequence %arg0 : !transform.any_op failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
- %0 = pdl_match @func_return in %arg1 : (!transform.any_op) -> !transform.op<"func.return">
- transform.debug.emit_remark_at %0, "matched" : !transform.op<"func.return">
+ sequence %arg0 : !transform.any_op failures(propagate) {
+ ^bb1(%arg1: !transform.any_op):
+ %0 = pdl_match @func_return in %arg1 : (!transform.any_op) -> !transform.op<"func.return">
+ transform.debug.emit_remark_at %0, "matched" : !transform.op<"func.return">
+ }
+ }
+ transform.yield
}
}
diff --git a/mlir/test/Dialect/Transform/include/test-interpreter-external-source.mlir b/mlir/test/Dialect/Transform/include/test-interpreter-external-source.mlir
index 5956c86ebbe4..f6b7f787cc2c 100644
--- a/mlir/test/Dialect/Transform/include/test-interpreter-external-source.mlir
+++ b/mlir/test/Dialect/Transform/include/test-interpreter-external-source.mlir
@@ -1,11 +1,13 @@
// RUN: mlir-opt %s
// No need to check anything else than parsing here, this is being used by another test as data.
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
- transform.debug.emit_remark_at %arg0, "outer" : !transform.any_op
- transform.sequence %arg0 : !transform.any_op failures(propagate) attributes {transform.target_tag="transform"} {
- ^bb1(%arg1: !transform.any_op):
- transform.debug.emit_remark_at %arg1, "inner" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op) {
+ transform.debug.emit_remark_at %arg0, "outer" : !transform.any_op
+ transform.sequence %arg0 : !transform.any_op failures(propagate) attributes {transform.target_tag="transform"} {
+ ^bb1(%arg1: !transform.any_op):
+ transform.debug.emit_remark_at %arg1, "inner" : !transform.any_op
+ }
+ transform.yield
}
}
diff --git a/mlir/test/Dialect/Transform/multi-arg-top-level-ops.mlir b/mlir/test/Dialect/Transform/multi-arg-top-level-ops.mlir
index 9a7e7ca2f953..1c018b1b1f77 100644
--- a/mlir/test/Dialect/Transform/multi-arg-top-level-ops.mlir
+++ b/mlir/test/Dialect/Transform/multi-arg-top-level-ops.mlir
@@ -1,10 +1,15 @@
-// RUN: mlir-opt %s --pass-pipeline='builtin.module(test-transform-dialect-interpreter{bind-first-extra-to-ops=func.func bind-second-extra-to-ops=func.return})' \
-// RUN: --split-input-file --verify-diagnostics
+// RUN: mlir-opt %s --pass-pipeline="builtin.module(transform-interpreter{\
+// RUN: debug-bind-trailing-args=func.func,func.return})" \
+// RUN: --split-input-file --verify-diagnostics
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op, %arg1: !transform.any_op, %arg2: !transform.any_op):
- transform.debug.emit_remark_at %arg1, "first extra" : !transform.any_op
- transform.debug.emit_remark_at %arg2, "second extra" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(
+ %arg0: !transform.any_op, %arg1: !transform.any_op,
+ %arg2: !transform.any_op) {
+ transform.debug.emit_remark_at %arg1, "first extra" : !transform.any_op
+ transform.debug.emit_remark_at %arg2, "second extra" : !transform.any_op
+ transform.yield
+ }
}
// expected-remark @below {{first extra}}
@@ -26,9 +31,13 @@ func.func @bar(%arg0: i1) {
// -----
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op, %arg1: !transform.any_op, %arg2: !transform.param<i64>):
- // expected-error @above {{wrong kind of value provided for top-level parameter}}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(
+ %arg0: !transform.any_op, %arg1: !transform.any_op,
+ %arg2: !transform.param<i64>) {
+ // expected-error @above {{wrong kind of value provided for top-level parameter}}
+ transform.yield
+ }
}
func.func @foo() {
@@ -37,9 +46,13 @@ func.func @foo() {
// -----
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op, %arg1: !transform.any_op, %arg2: !transform.any_value):
- // expected-error @above {{wrong kind of value provided for the top-level value handle}}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(
+ %arg0: !transform.any_op, %arg1: !transform.any_op,
+ %arg2: !transform.any_value) {
+ // expected-error @above {{wrong kind of value provided for the top-level value handle}}
+ transform.yield
+ }
}
func.func @foo() {
@@ -48,19 +61,27 @@ func.func @foo() {
// -----
-// expected-error @below {{operation expects 1 extra value bindings, but 2 were provided to the interpreter}}
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op, %arg1: !transform.any_op):
+
+module attributes {transform.with_named_sequence} {
+ // expected-error @below {{operation expects 1 extra value bindings, but 2 were provided to the interpreter}}
+ transform.named_sequence @__transform_main(
+ %arg0: !transform.any_op, %arg1: !transform.any_op) {
+ transform.yield
+ }
}
// -----
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op, %arg1: !transform.any_op, %arg2: !transform.any_op):
- transform.sequence %arg0, %arg1, %arg2 : !transform.any_op, !transform.any_op, !transform.any_op failures(propagate) {
- ^bb0(%arg3: !transform.any_op, %arg4: !transform.any_op, %arg5: !transform.any_op):
- transform.debug.emit_remark_at %arg4, "first extra" : !transform.any_op
- transform.debug.emit_remark_at %arg5, "second extra" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(
+ %arg0: !transform.any_op, %arg1: !transform.any_op,
+ %arg2: !transform.any_op) {
+ transform.sequence %arg0, %arg1, %arg2 : !transform.any_op, !transform.any_op, !transform.any_op failures(propagate) {
+ ^bb0(%arg3: !transform.any_op, %arg4: !transform.any_op, %arg5: !transform.any_op):
+ transform.debug.emit_remark_at %arg4, "first extra" : !transform.any_op
+ transform.debug.emit_remark_at %arg5, "second extra" : !transform.any_op
+ }
+ transform.yield
}
}
diff --git a/mlir/test/Dialect/Transform/multi-arg-top-level-params.mlir b/mlir/test/Dialect/Transform/multi-arg-top-level-params.mlir
index f59a4b6d4ccc..6486bcae3294 100644
--- a/mlir/test/Dialect/Transform/multi-arg-top-level-params.mlir
+++ b/mlir/test/Dialect/Transform/multi-arg-top-level-params.mlir
@@ -1,24 +1,37 @@
-// RUN: mlir-opt %s --pass-pipeline='builtin.module(test-transform-dialect-interpreter{bind-first-extra-to-params=1,2,3 bind-second-extra-to-params=42,45})' \
+// RUN: mlir-opt %s --pass-pipeline='builtin.module(transform-interpreter{\
+// RUN: debug-bind-trailing-args=#1;2;3,#42;45})' \
// RUN: --split-input-file --verify-diagnostics
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op, %arg1: !transform.param<i64>, %arg2: !transform.param<i64>):
- // expected-remark @below {{1 : i64, 2 : i64, 3 : i64}}
- transform.debug.emit_param_as_remark %arg1 : !transform.param<i64>
- // expected-remark @below {{42 : i64, 45 : i64}}
- transform.debug.emit_param_as_remark %arg2 : !transform.param<i64>
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(
+ %arg0: !transform.any_op, %arg1: !transform.param<i64>,
+ %arg2: !transform.param<i64>) {
+ // expected-remark @below {{1 : i64, 2 : i64, 3 : i64}}
+ transform.debug.emit_param_as_remark %arg1 : !transform.param<i64>
+ // expected-remark @below {{42 : i64, 45 : i64}}
+ transform.debug.emit_param_as_remark %arg2 : !transform.param<i64>
+ transform.yield
+ }
}
// -----
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op, %arg1: !transform.any_op, %arg2: !transform.param<i64>):
- // expected-error @above {{wrong kind of value provided for top-level operation handle}}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(
+ %arg0: !transform.any_op, %arg1: !transform.any_op,
+ // expected-error @above {{wrong kind of value provided for top-level operation handle}}
+ %arg2: !transform.param<i64>) {
+ transform.yield
+ }
}
// -----
-// expected-error @below {{operation expects 3 extra value bindings, but 2 were provided to the interpreter}}
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op, %arg1: !transform.param<i64>, %arg2: !transform.param<i64>, %arg3: !transform.param<i64>):
+module attributes {transform.with_named_sequence} {
+ // expected-error @below {{operation expects 3 extra value bindings, but 2 were provided to the interpreter}}
+ transform.named_sequence @__transform_main(
+ %arg0: !transform.any_op, %arg1: !transform.param<i64>,
+ %arg2: !transform.param<i64>, %arg3: !transform.param<i64>) {
+ transform.yield
+ }
}
diff --git a/mlir/test/Dialect/Transform/multi-arg-top-level-values.mlir b/mlir/test/Dialect/Transform/multi-arg-top-level-values.mlir
index 38d7e2869777..dcc1079267dc 100644
--- a/mlir/test/Dialect/Transform/multi-arg-top-level-values.mlir
+++ b/mlir/test/Dialect/Transform/multi-arg-top-level-values.mlir
@@ -1,4 +1,5 @@
-// RUN: mlir-opt %s --pass-pipeline='builtin.module(test-transform-dialect-interpreter{bind-first-extra-to-results-of-ops=test.some_returning_op bind-second-extra-to-results-of-ops=test.some_other_returning_op})' \
+// RUN: mlir-opt %s --pass-pipeline='builtin.module(transform-interpreter{\
+// RUN: debug-bind-trailing-args=^test.some_returning_op,^test.some_other_returning_op})' \
// RUN: --split-input-file --verify-diagnostics
// Note that diagnostic checker will merge two diagnostics with the same message
@@ -21,10 +22,14 @@
// expected-note @below {{value handle points to an op result #1}}
%2:2 = "test.some_other_returning_op"() : () -> (f32, f64)
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op, %arg1: !transform.any_value, %arg2: !transform.any_value):
- transform.debug.emit_remark_at %arg1, "first extra" : !transform.any_value
- transform.debug.emit_remark_at %arg2, "second extra" : !transform.any_value
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(
+ %arg0: !transform.any_op, %arg1: !transform.any_value,
+ %arg2: !transform.any_value) {
+ transform.debug.emit_remark_at %arg1, "first extra" : !transform.any_value
+ transform.debug.emit_remark_at %arg2, "second extra" : !transform.any_value
+ transform.yield
+ }
}
// -----
@@ -32,14 +37,19 @@ transform.sequence failures(propagate) {
%0:2 = "test.some_returning_op"() : () -> (i32, i64)
%1 = "test.some_returning_op"() : () -> index
-transform.sequence failures(propagate) {
-// expected-error @below {{wrong kind of value provided for top-level operation handle}}
-^bb0(%arg0: !transform.any_op, %arg1: !transform.any_op, %arg2: !transform.any_value):
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(
+ // expected-error @below {{wrong kind of value provided for top-level operation handle}}
+ %arg0: !transform.any_op, %arg1: !transform.any_op, %arg2: !transform.any_value) {
+ transform.yield
+ }
}
// -----
-// expected-error @below {{operation expects 1 extra value bindings, but 2 were provided to the interpreter}}
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op, %arg1: !transform.any_value):
+module attributes {transform.with_named_sequence} {
+ // expected-error @below {{operation expects 1 extra value bindings, but 2 were provided to the interpreter}}
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op, %arg1: !transform.any_value) {
+ transform.yield
+ }
}
diff --git a/mlir/test/Dialect/Transform/test-interpreter-debug.mlir b/mlir/test/Dialect/Transform/test-interpreter-debug.mlir
index c7dad582dd43..99301ea23c6f 100644
--- a/mlir/test/Dialect/Transform/test-interpreter-debug.mlir
+++ b/mlir/test/Dialect/Transform/test-interpreter-debug.mlir
@@ -1,19 +1,21 @@
-// RUN: mlir-opt %s --pass-pipeline="builtin.module(test-transform-dialect-interpreter{debug-payload-root-tag=payload debug-transform-root-tag=transform})" \
-// RUN: --allow-unregistered-dialect --split-input-file --verify-diagnostics
+// RUN: mlir-opt %s --pass-pipeline="builtin.module(transform-interpreter{\
+// RUN: debug-payload-root-tag=payload \
+// RUN: entry-point=transform})" \
+// RUN: --allow-unregistered-dialect --split-input-file --verify-diagnostics
// expected-error @below {{could not find the operation with transform.target_tag="payload" attribute}}
-module {
- transform.sequence failures(suppress) {
- ^bb0(%arg0: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @transform(%arg0: !transform.any_op) {
+ transform.yield
}
}
// -----
-// expected-error @below {{could not find the operation with transform.target_tag="transform" attribute}}
-module {
- transform.sequence failures(suppress) {
- ^bb0(%arg0: !transform.any_op):
+// expected-error @below {{could not find a nested named sequence with name: transform}}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @not_transform(%arg0: !transform.any_op) {
+ transform.yield
}
module attributes {transform.target_tag="payload"} {}
@@ -21,42 +23,16 @@ module {
// -----
-// expected-error @below {{more than one operation with transform.target_tag="transform" attribute}}
-module {
- // expected-note @below {{first operation}}
- transform.sequence failures(propagate) attributes {transform.target_tag="transform"} {
- ^bb0(%arg0: !transform.any_op):
- }
-
- // expected-note @below {{other operation}}
- transform.sequence failures(propagate) attributes {transform.target_tag="transform"} {
- ^bb0(%arg0: !transform.any_op):
- }
-
- module attributes {transform.target_tag="payload"} {}
-}
-
-// -----
-
-module {
- // expected-error @below {{expected the transform entry point to be a top-level transform op}}
- func.func private @foo() attributes {transform.target_tag="transform"}
-
- module attributes {transform.target_tag="payload"} {}
-}
-
-// -----
-
-module {
- transform.sequence failures(suppress) attributes {transform.target_tag="transform"} {
- ^bb0(%arg0: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @transform(%arg0: !transform.any_op) {
transform.debug.emit_remark_at %arg0, "payload" : !transform.any_op
+ transform.yield
}
- // This will not be executed because it's not tagged.
- transform.sequence failures(suppress) {
- ^bb0(%arg0: !transform.any_op):
+ // This will not be executed.
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op) {
transform.debug.emit_remark_at %arg0, "some other text that is not printed" : !transform.any_op
+ transform.yield
}
module {
diff --git a/mlir/test/Dialect/Transform/test-interpreter-external-concurrent.mlir b/mlir/test/Dialect/Transform/test-interpreter-external-concurrent.mlir
index 59c2b672a6e6..9884102c6c0f 100644
--- a/mlir/test/Dialect/Transform/test-interpreter-external-concurrent.mlir
+++ b/mlir/test/Dialect/Transform/test-interpreter-external-concurrent.mlir
@@ -1,4 +1,6 @@
-// RUN: mlir-opt %s --pass-pipeline="builtin.module(func.func(test-transform-dialect-interpreter{transform-file-name=%p%{fs-sep}include%{fs-sep}test-interpreter-external-concurrent-source.mlir}))" \
+// RUN: mlir-opt %s --pass-pipeline="builtin.module(\
+// RUN: transform-preload-library{transform-library-paths=%p%{fs-sep}include%{fs-sep}test-interpreter-external-concurrent-source.mlir},\
+// RUN: func.func(transform-interpreter))" \
// RUN: --verify-diagnostics
// Exercising the pass on multiple functions of different lengths that may be
diff --git a/mlir/test/Dialect/Transform/test-interpreter-external.mlir b/mlir/test/Dialect/Transform/test-interpreter-external.mlir
index ba8e0c6870db..599ce05fcc40 100644
--- a/mlir/test/Dialect/Transform/test-interpreter-external.mlir
+++ b/mlir/test/Dialect/Transform/test-interpreter-external.mlir
@@ -1,4 +1,6 @@
-// RUN: mlir-opt %s --pass-pipeline="builtin.module(test-transform-dialect-interpreter{transform-file-name=%p%{fs-sep}include%{fs-sep}test-interpreter-external-source.mlir})" \
+// RUN: mlir-opt %s --pass-pipeline="builtin.module(\
+// RUN: transform-preload-library{transform-library-paths=%p%{fs-sep}include%{fs-sep}test-interpreter-external-source.mlir},\
+// RUN: transform-interpreter)" \
// RUN: --verify-diagnostics
// The schedule in the separate file emits remarks at the payload root.
diff --git a/mlir/test/Dialect/Transform/test-interpreter.mlir b/mlir/test/Dialect/Transform/test-interpreter.mlir
index de5807b2874b..b6850e2024d5 100644
--- a/mlir/test/Dialect/Transform/test-interpreter.mlir
+++ b/mlir/test/Dialect/Transform/test-interpreter.mlir
@@ -1411,7 +1411,6 @@ module attributes {transform.with_named_sequence} {
// -----
// expected-error @below {{could not find a nested named sequence with name: __transform_main}}
-// expected-error @below {{could not find transform entry point: __transform_main in either payload or transform module}}
module {
}
diff --git a/mlir/test/Dialect/Transform/test-pass-application.mlir b/mlir/test/Dialect/Transform/test-pass-application.mlir
index 65625457c868..7cb5387b937d 100644
--- a/mlir/test/Dialect/Transform/test-pass-application.mlir
+++ b/mlir/test/Dialect/Transform/test-pass-application.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter -allow-unregistered-dialect --split-input-file --verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter -allow-unregistered-dialect --split-input-file --verify-diagnostics | FileCheck %s
// CHECK-LABEL: func @successful_pass_application(
// CHECK: %[[c5:.*]] = arith.constant 5 : index
@@ -9,10 +9,12 @@ func.func @successful_pass_application(%t: tensor<5xf32>) -> index {
return %dim : index
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_registered_pass "canonicalize" to %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_registered_pass "canonicalize" to %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -22,12 +24,14 @@ func.func @pass_pipeline() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- // This pipeline does not do anything. Just make sure that the pipeline is
- // found and no error is produced.
- transform.apply_registered_pass "test-options-pass-pipeline" to %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ // This pipeline does not do anything. Just make sure that the pipeline is
+ // found and no error is produced.
+ transform.apply_registered_pass "test-options-pass-pipeline" to %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -36,11 +40,13 @@ func.func @invalid_pass_name() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- // expected-error @below {{unknown pass or pass pipeline: non-existing-pass}}
- transform.apply_registered_pass "non-existing-pass" to %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ // expected-error @below {{unknown pass or pass pipeline: non-existing-pass}}
+ transform.apply_registered_pass "non-existing-pass" to %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -53,11 +59,13 @@ func.func @not_isolated_from_above(%t: tensor<5xf32>) -> index {
return %dim : index
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %1 = transform.structured.match ops{["tensor.dim"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- // expected-error @below {{pass pipeline failed}}
- transform.apply_registered_pass "canonicalize" to %1 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %1 = transform.structured.match ops{["tensor.dim"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ // expected-error @below {{pass pipeline failed}}
+ transform.apply_registered_pass "canonicalize" to %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -66,11 +74,13 @@ func.func @invalid_pass_option() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- // expected-error @below {{failed to add pass or pass pipeline to pipeline: canonicalize}}
- transform.apply_registered_pass "canonicalize" to %1 {options = "invalid-option=1"} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ // expected-error @below {{failed to add pass or pass pipeline to pipeline: canonicalize}}
+ transform.apply_registered_pass "canonicalize" to %1 {options = "invalid-option=1"} : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -80,27 +90,29 @@ func.func @valid_pass_option() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_registered_pass "canonicalize" to %1 {options = "top-down=false"} : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_registered_pass "canonicalize" to %1 {options = "top-down=false"} : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
}
// -----
-module {
+module attributes {transform.with_named_sequence} {
// expected-error @below {{trying to schedule a pass on an unsupported operation}}
// expected-note @below {{target op}}
func.func @invalid_target_op_type() {
return
}
- transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
%1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
// func-bufferize can be applied only to ModuleOps.
// expected-error @below {{pass pipeline failed}}
transform.apply_registered_pass "func-bufferize" to %1 : (!transform.any_op) -> !transform.any_op
+ transform.yield
}
}
diff --git a/mlir/test/Dialect/Transform/test-pattern-application.mlir b/mlir/test/Dialect/Transform/test-pattern-application.mlir
index 10cd9ef351fe..0c41e81b17b5 100644
--- a/mlir/test/Dialect/Transform/test-pattern-application.mlir
+++ b/mlir/test/Dialect/Transform/test-pattern-application.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter -allow-unregistered-dialect --split-input-file --verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s --transform-interpreter -allow-unregistered-dialect --split-input-file --verify-diagnostics | FileCheck %s
// CHECK-LABEL: func @update_tracked_op_mapping()
// CHECK: "test.container"() ({
@@ -11,15 +11,17 @@ func.func @update_tracked_op_mapping() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_patterns to %0 {
- transform.apply_patterns.transform.test_patterns
- } : !transform.any_op
- // Add an attribute to %1, which is now mapped to a new op.
- transform.annotate %1 "annotated" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.transform.test_patterns
+ } : !transform.any_op
+ // Add an attribute to %1, which is now mapped to a new op.
+ transform.annotate %1 "annotated" : !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -33,19 +35,21 @@ func.func @replacement_op_not_found() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- // expected-note @below {{replacement is required because this handle must be updated}}
- %1 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- // expected-error @below {{tracking listener failed to find replacement op during application of this transform op}}
- // expected-note @below {{ran out of suitable replacement values}}
- transform.apply_patterns to %0 {
- transform.apply_patterns.transform.test_patterns
- } : !transform.any_op
- // %1 must be used in some way. If no replacement payload op could be found,
- // an error is thrown only if the handle is not dead.
- transform.annotate %1 "annotated" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ // expected-note @below {{replacement is required because this handle must be updated}}
+ %1 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ // expected-error @below {{tracking listener failed to find replacement op during application of this transform op}}
+ // expected-note @below {{ran out of suitable replacement values}}
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.transform.test_patterns
+ } : !transform.any_op
+ // %1 must be used in some way. If no replacement payload op could be found,
+ // an error is thrown only if the handle is not dead.
+ transform.annotate %1 "annotated" : !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -61,14 +65,16 @@ func.func @replacement_op_for_dead_handle_not_found() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- // No error because %1 is dead.
- transform.apply_patterns to %0 {
- transform.apply_patterns.transform.test_patterns
- } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ // No error because %1 is dead.
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.transform.test_patterns
+ } : !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -84,14 +90,16 @@ func.func @replacement_op_not_found_silenced() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_patterns to %0 {
- transform.apply_patterns.transform.test_patterns
- } {transform.silence_tracking_failures} : !transform.any_op
- transform.annotate %1 "annotated" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.transform.test_patterns
+ } {transform.silence_tracking_failures} : !transform.any_op
+ transform.annotate %1 "annotated" : !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -103,12 +111,14 @@ func.func @patterns_apply_only_to_target_body() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
-%0 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_patterns to %0 {
- transform.apply_patterns.transform.test_patterns
- } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.transform.test_patterns
+ } : !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -125,16 +135,18 @@ func.func @erase_tracked_op() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.match ops{["test.erase_op"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.debug.emit_remark_at %1, "matched op" : !transform.any_op
- transform.apply_patterns to %0 {
- transform.apply_patterns.transform.test_patterns
- } : !transform.any_op
- // No marker should be printed.
- transform.debug.emit_remark_at %1, "op was deleted" : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.match ops{["test.erase_op"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.debug.emit_remark_at %1, "matched op" : !transform.any_op
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.transform.test_patterns
+ } : !transform.any_op
+ // No marker should be printed.
+ transform.debug.emit_remark_at %1, "op was deleted" : !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -143,7 +155,7 @@ transform.sequence failures(propagate) {
// CHECK: "test.container"() ({
// CHECK-NEXT: ^bb0:
// CHECK-NEXT: }) : () -> ()
-module {
+module attributes {transform.with_named_sequence} {
func.func @erase_tracked_op_in_named_sequence() {
"test.container"() ({
// expected-remark @below {{matched op}}
@@ -152,23 +164,21 @@ module {
return
}
- module attributes { transform.with_named_sequence } {
- transform.named_sequence @foo(%arg0: !transform.any_op {transform.readonly}) -> () {
- transform.apply_patterns to %arg0 {
- transform.apply_patterns.transform.test_patterns
- } : !transform.any_op
- transform.yield
- }
+ transform.named_sequence @foo(%arg0: !transform.any_op {transform.readonly}) -> () {
+ transform.apply_patterns to %arg0 {
+ transform.apply_patterns.transform.test_patterns
+ } : !transform.any_op
+ transform.yield
+ }
- transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.match ops{["test.erase_op"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.debug.emit_remark_at %1, "matched op" : !transform.any_op
- include @foo failures(propagate) (%0) : (!transform.any_op) -> ()
- // No marker should be printed.
- transform.debug.emit_remark_at %1, "op was deleted" : !transform.any_op
- }
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.match ops{["test.erase_op"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.debug.emit_remark_at %1, "matched op" : !transform.any_op
+ transform.include @foo failures(propagate) (%0) : (!transform.any_op) -> ()
+ // No marker should be printed.
+ transform.debug.emit_remark_at %1, "op was deleted" : !transform.any_op
+ transform.yield
}
}
@@ -183,13 +193,15 @@ func.func @canonicalization(%t: tensor<5xf32>) -> index {
return %dim : index
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["tensor.dim"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_patterns to %1 {
- transform.apply_patterns.canonicalization
- } : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["tensor.dim"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_patterns to %1 {
+ transform.apply_patterns.canonicalization
+ } : !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -200,13 +212,13 @@ module {
return
}
- module {
- transform.sequence failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
+ module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
// expected-error @below {{cannot apply transform to itself (or one of its ancestors)}}
transform.apply_patterns to %arg1 {
transform.apply_patterns.canonicalization
} : !transform.any_op
+ transform.yield
}
}
}
@@ -224,12 +236,14 @@ func.func @canonicalization_and_cse(%m: memref<5xf32>) {
return
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_patterns to %1 {
- transform.apply_patterns.canonicalization
- } {apply_cse} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %1 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_patterns to %1 {
+ transform.apply_patterns.canonicalization
+ } {apply_cse} : !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -243,15 +257,17 @@ func.func @full_dialect_conversion() -> tensor<5xf32> {
return %0 : tensor<5xf32>
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_conversion_patterns to %0 {
- transform.apply_conversion_patterns.transform.test_conversion_patterns
- } with type_converter {
- transform.apply_conversion_patterns.transform.test_type_converter
- } {legal_ops = ["func.func", "func.return", "test.new_op"]}
- : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_conversion_patterns to %0 {
+ transform.apply_conversion_patterns.transform.test_conversion_patterns
+ } with type_converter {
+ transform.apply_conversion_patterns.transform.test_type_converter
+ } {legal_ops = ["func.func", "func.return", "test.new_op"]}
+ : !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -266,16 +282,18 @@ func.func @full_dialect_conversion_failed() -> tensor<5xf32> {
return %0 : tensor<5xf32>
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- // expected-error @below{{dialect conversion failed}}
- transform.apply_conversion_patterns to %0 {
- transform.apply_conversion_patterns.transform.test_conversion_patterns
- } with type_converter {
- transform.apply_conversion_patterns.transform.test_type_converter
- } {legal_ops = ["func.func", "func.return", "test.new_op"]}
- : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ // expected-error @below{{dialect conversion failed}}
+ transform.apply_conversion_patterns to %0 {
+ transform.apply_conversion_patterns.transform.test_conversion_patterns
+ } with type_converter {
+ transform.apply_conversion_patterns.transform.test_type_converter
+ } {legal_ops = ["func.func", "func.return", "test.new_op"]}
+ : !transform.any_op
+ transform.yield
+ }
}
// -----
@@ -294,98 +312,108 @@ func.func @partial_dialect_conversion() -> tensor<5xf32> {
return %0 : tensor<5xf32>
}
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_conversion_patterns to %0 {
- transform.apply_conversion_patterns.transform.test_conversion_patterns
- } with type_converter {
- transform.apply_conversion_patterns.transform.test_type_converter
- } {legal_ops = ["func.func", "func.return", "test.new_op"],
- partial_conversion} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_conversion_patterns to %0 {
+ transform.apply_conversion_patterns.transform.test_conversion_patterns
+ } with type_converter {
+ transform.apply_conversion_patterns.transform.test_type_converter
+ } {legal_ops = ["func.func", "func.return", "test.new_op"],
+ partial_conversion} : !transform.any_op
+ transform.yield
+ }
}
// -----
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- // expected-error @below{{pattern descriptor does not specify type converter and apply_conversion_patterns op has no default type converter}}
- transform.apply_conversion_patterns to %0 {
- // expected-note @below{{pattern descriptor op}}
- transform.apply_conversion_patterns.transform.test_conversion_patterns
- } {illegal_ops = ["test.foo"]} : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ // expected-error @below{{pattern descriptor does not specify type converter and apply_conversion_patterns op has no default type converter}}
+ transform.apply_conversion_patterns to %0 {
+ // expected-note @below{{pattern descriptor op}}
+ transform.apply_conversion_patterns.transform.test_conversion_patterns
+ } {illegal_ops = ["test.foo"]} : !transform.any_op
+ transform.yield
+ }
}
// -----
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_conversion_patterns to %0 {
- // expected-error @below{{expected LLVMTypeConverter}}
- transform.apply_conversion_patterns.dialect_to_llvm "test"
- } with type_converter {
- transform.apply_conversion_patterns.transform.test_type_converter
- } {illegal_ops = ["test.foo"],
- legal_ops = ["func.func", "func.return", "test.new_op"]}
- : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_conversion_patterns to %0 {
+ // expected-error @below{{expected LLVMTypeConverter}}
+ transform.apply_conversion_patterns.dialect_to_llvm "test"
+ } with type_converter {
+ transform.apply_conversion_patterns.transform.test_type_converter
+ } {illegal_ops = ["test.foo"],
+ legal_ops = ["func.func", "func.return", "test.new_op"]}
+ : !transform.any_op
+ transform.yield
+ }
}
// -----
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_conversion_patterns to %0 {
- // expected-error @below{{unknown dialect or dialect not loaded: this_dialect_does_not_exist}}
- transform.apply_conversion_patterns.dialect_to_llvm "this_dialect_does_not_exist"
- } with type_converter {
- transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
- } {illegal_ops = ["test.foo"],
- legal_ops = ["func.func", "func.return", "test.new_op"]}
- : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_conversion_patterns to %0 {
+ // expected-error @below{{unknown dialect or dialect not loaded: this_dialect_does_not_exist}}
+ transform.apply_conversion_patterns.dialect_to_llvm "this_dialect_does_not_exist"
+ } with type_converter {
+ transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
+ } {illegal_ops = ["test.foo"],
+ legal_ops = ["func.func", "func.return", "test.new_op"]}
+ : !transform.any_op
+ transform.yield
+ }
}
// -----
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.apply_conversion_patterns to %0 {
- // expected-error @below{{dialect does not implement ConvertToLLVMPatternInterface or extension was not loaded: transform}}
- transform.apply_conversion_patterns.dialect_to_llvm "transform"
- } with type_converter {
- transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
- } {illegal_ops = ["test.foo"],
- legal_ops = ["func.func", "func.return", "test.new_op"]}
- : !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.apply_conversion_patterns to %0 {
+ // expected-error @below{{dialect does not implement ConvertToLLVMPatternInterface or extension was not loaded: transform}}
+ transform.apply_conversion_patterns.dialect_to_llvm "transform"
+ } with type_converter {
+ transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
+ } {illegal_ops = ["test.foo"],
+ legal_ops = ["func.func", "func.return", "test.new_op"]}
+ : !transform.any_op
+ transform.yield
+ }
}
// -----
module attributes { transform.with_named_sequence } {
-func.func @replacement_op_not_found() {
- // No op replacement can be found, but there are no handles that must be
- // updated. No error should be reported.
- "test.container"() ({
- %0 = "test.foo"() {replace_with_new_op = "test.bar"} : () -> (i32)
- }) : () -> ()
- return
-}
+ func.func @replacement_op_not_found() {
+ // No op replacement can be found, but there are no handles that must be
+ // updated. No error should be reported.
+ "test.container"() ({
+ %0 = "test.foo"() {replace_with_new_op = "test.bar"} : () -> (i32)
+ }) : () -> ()
+ return
+ }
-transform.named_sequence @patterns(%container: !transform.any_op {transform.readonly}) {
- transform.apply_patterns to %container {
- transform.apply_patterns.transform.test_patterns
- } : !transform.any_op
- transform.yield
-}
+ transform.named_sequence @patterns(%container: !transform.any_op {transform.readonly}) {
+ transform.apply_patterns to %container {
+ transform.apply_patterns.transform.test_patterns
+ } : !transform.any_op
+ transform.yield
+ }
-transform.sequence failures(propagate) {
-^bb1(%arg1: !transform.any_op):
- %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- %1 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.annotate %1 "annotated" : !transform.any_op
- transform.include @patterns failures(propagate) (%0) : (!transform.any_op) -> ()
-}
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op) {
+ %0 = transform.structured.match ops{["test.container"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.match ops{["test.foo"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.annotate %1 "annotated" : !transform.any_op
+ transform.include @patterns failures(propagate) (%0) : (!transform.any_op) -> ()
+ transform.yield
+ }
}
diff --git a/mlir/test/Dialect/Transform/test-pdl-extension.mlir b/mlir/test/Dialect/Transform/test-pdl-extension.mlir
index a9710f755312..a3349c1ba505 100644
--- a/mlir/test/Dialect/Transform/test-pdl-extension.mlir
+++ b/mlir/test/Dialect/Transform/test-pdl-extension.mlir
@@ -1,21 +1,26 @@
-// RUN: mlir-opt %s --test-transform-dialect-interpreter -allow-unregistered-dialect --split-input-file --verify-diagnostics
-
-transform.with_pdl_patterns {
-^bb0(%arg0: !transform.any_op):
- sequence %arg0 : !transform.any_op failures(propagate) {
- ^bb0(%arg1: !transform.any_op):
- %0 = pdl_match @some in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.debug.emit_remark_at %0, "matched" : !transform.any_op
- }
-
- pdl.pattern @some : benefit(1) {
- %0 = pdl.operation "test.some_op"
- pdl.rewrite %0 with "transform.dialect"
- }
-
- pdl.pattern @other : benefit(1) {
- %0 = pdl.operation "test.other_op"
- pdl.rewrite %0 with "transform.dialect"
+// RUN: mlir-opt %s --transform-interpreter -allow-unregistered-dialect --split-input-file --verify-diagnostics
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%root: !transform.any_op) {
+ transform.with_pdl_patterns %root : !transform.any_op {
+ ^bb0(%arg0: !transform.any_op):
+ sequence %arg0 : !transform.any_op failures(propagate) {
+ ^bb0(%arg1: !transform.any_op):
+ %0 = pdl_match @some in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.debug.emit_remark_at %0, "matched" : !transform.any_op
+ }
+
+ pdl.pattern @some : benefit(1) {
+ %0 = pdl.operation "test.some_op"
+ pdl.rewrite %0 with "transform.dialect"
+ }
+
+ pdl.pattern @other : benefit(1) {
+ %0 = pdl.operation "test.other_op"
+ pdl.rewrite %0 with "transform.dialect"
+ }
+ }
+ transform.yield
}
}
@@ -28,17 +33,22 @@ transform.with_pdl_patterns {
// -----
-transform.with_pdl_patterns {
-^bb0(%arg0: !transform.any_op):
- sequence %arg0 : !transform.any_op failures(propagate) {
- ^bb1(%arg1: !transform.any_op):
- %0 = pdl_match @some in %arg1 : (!transform.any_op) -> !transform.any_op
- }
-
- pdl.pattern @some : benefit(1) {
- %0 = pdl.operation "test.some_op"
- pdl.apply_native_constraint "verbose_constraint"(%0 : !pdl.operation)
- pdl.rewrite %0 with "transform.dialect"
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%root: !transform.any_op) {
+ transform.with_pdl_patterns %root : !transform.any_op {
+ ^bb0(%arg0: !transform.any_op):
+ sequence %arg0 : !transform.any_op failures(propagate) {
+ ^bb1(%arg1: !transform.any_op):
+ %0 = pdl_match @some in %arg1 : (!transform.any_op) -> !transform.any_op
+ }
+
+ pdl.pattern @some : benefit(1) {
+ %0 = pdl.operation "test.some_op"
+ pdl.apply_native_constraint "verbose_constraint"(%0 : !pdl.operation)
+ pdl.rewrite %0 with "transform.dialect"
+ }
+ }
+ transform.yield
}
}
diff --git a/mlir/test/Dialect/Transform/transform-state-extension.mlir b/mlir/test/Dialect/Transform/transform-state-extension.mlir
index a26293fbe51c..e8c0b7a8a3aa 100644
--- a/mlir/test/Dialect/Transform/transform-state-extension.mlir
+++ b/mlir/test/Dialect/Transform/transform-state-extension.mlir
@@ -1,89 +1,95 @@
-// RUN: mlir-opt %s -test-transform-dialect-interpreter -verify-diagnostics -split-input-file
+// RUN: mlir-opt %s -transform-interpreter -verify-diagnostics -split-input-file
// expected-note @below {{associated payload op}}
-module {
- transform.sequence failures(propagate) {
- ^bb0(%arg0: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op) {
// expected-remark @below {{extension absent}}
- test_check_if_test_extension_present %arg0 : !transform.any_op
- test_add_test_extension "A"
+ transform.test_check_if_test_extension_present %arg0 : !transform.any_op
+ transform.test_add_test_extension "A"
// expected-remark @below {{extension present, A}}
- test_check_if_test_extension_present %arg0 : !transform.any_op
- test_remove_test_extension
+ transform.test_check_if_test_extension_present %arg0 : !transform.any_op
+ transform.test_remove_test_extension
// expected-remark @below {{extension absent}}
- test_check_if_test_extension_present %arg0 : !transform.any_op
+ transform.test_check_if_test_extension_present %arg0 : !transform.any_op
+ transform.yield
}
}
// -----
// expected-note @below {{associated payload op}}
-module {
- transform.sequence failures(propagate) {
- ^bb0(%arg0: !transform.any_op):
- test_add_test_extension "A"
- test_remove_test_extension
- test_add_test_extension "B"
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op) {
+ transform.test_add_test_extension "A"
+ transform.test_remove_test_extension
+ transform.test_add_test_extension "B"
// expected-remark @below {{extension present, B}}
- test_check_if_test_extension_present %arg0 : !transform.any_op
+ transform.test_check_if_test_extension_present %arg0 : !transform.any_op
+ transform.yield
}
}
// -----
// expected-note @below {{associated payload op}}
-module {
- transform.sequence failures(propagate) {
- ^bb0(%arg0: !transform.any_op):
- test_add_test_extension "A"
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op) {
+ transform.test_add_test_extension "A"
// expected-remark @below {{extension present, A}}
- test_check_if_test_extension_present %arg0 : !transform.any_op
+ transform.test_check_if_test_extension_present %arg0 : !transform.any_op
// expected-note @below {{associated payload op}}
- test_remap_operand_to_self %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.test_remap_operand_to_self %arg0 : (!transform.any_op) -> !transform.any_op
// expected-remark @below {{extension present, A}}
- test_check_if_test_extension_present %arg0 : !transform.any_op
+ transform.test_check_if_test_extension_present %arg0 : !transform.any_op
+ transform.yield
}
}
// -----
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
- test_add_test_extension "A"
- // This is okay because we are replacing the top-level module operation
- // (0 results) with this operation that has _more_ (1) results.
- %dummy = test_remap_operand_to_self %arg0 : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op) {
+ transform.test_add_test_extension "A"
+ // This is okay because we are replacing the top-level module operation
+ // (0 results) with this operation that has _more_ (1) results.
+ %dummy = transform.test_remap_operand_to_self %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
}
// -----
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
- test_add_test_extension "A"
- %dummy = test_remap_operand_to_self %arg0 : (!transform.any_op) -> !transform.any_op
- // This is still okay. Even though we are replacing the previous
- // operation with (1 result) with this operation that has less (0) results,
- // there is no handle to the result, hence no issue with value handle update.
- test_remap_operand_to_self %dummy : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op) {
+ transform.test_add_test_extension "A"
+ %dummy = transform.test_remap_operand_to_self %arg0 : (!transform.any_op) -> !transform.any_op
+ // This is still okay. Even though we are replacing the previous
+ // operation with (1 result) with this operation that has less (0) results,
+ // there is no handle to the result, hence no issue with value handle update.
+ transform.test_remap_operand_to_self %dummy : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
}
// -----
-transform.sequence failures(propagate) {
-^bb0(%arg0: !transform.any_op):
- test_add_test_extension "A"
- // expected-error @below {{cannot replace an op with another op producing fewer results while tracking handles}}
- %dummy = test_remap_operand_to_self %arg0 : (!transform.any_op) -> !transform.any_op
- %valuehandle = transform.get_result %dummy[0] : (!transform.any_op) -> !transform.any_value
- test_remap_operand_to_self %dummy : (!transform.any_op) -> ()
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op) {
+ transform.test_add_test_extension "A"
+ // expected-error @below {{cannot replace an op with another op producing fewer results while tracking handles}}
+ %dummy = transform.test_remap_operand_to_self %arg0 : (!transform.any_op) -> !transform.any_op
+ %valuehandle = transform.get_result %dummy[0] : (!transform.any_op) -> !transform.any_value
+ transform.test_remap_operand_to_self %dummy : (!transform.any_op) -> ()
+ transform.yield
+ }
}
// -----
-module {
- transform.sequence failures(suppress) {
- ^bb0(%arg0: !transform.any_op):
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op) {
// expected-error @below {{TestTransformStateExtension missing}}
- test_remap_operand_to_self %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.test_remap_operand_to_self %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.yield
}
}
diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir
index dc486181ebe9..e6f045e12e51 100644
--- a/mlir/test/Dialect/Vector/canonicalize.mlir
+++ b/mlir/test/Dialect/Vector/canonicalize.mlir
@@ -2567,76 +2567,3 @@ func.func @load_store_forwarding_rank_mismatch(%v0: vector<4x1x1xf32>, %arg0: te
tensor<4x4x4xf32>, vector<1x100x4x5xf32>
return %r : vector<1x100x4x5xf32>
}
-
-// -----
-
-// CHECK-LABEL: func.func @extract_true_from_constant_mask() -> i1 {
-func.func @extract_true_from_constant_mask() -> i1 {
-// CHECK: %[[TRUE:.*]] = arith.constant true
-// CHECK-NEXT: return %[[TRUE]] : i1
- %mask = vector.constant_mask [2, 2, 3] : vector<4x4x4xi1>
- %extract = vector.extract %mask[1, 1, 2] : i1 from vector<4x4x4xi1>
- return %extract : i1
-}
-
-// -----
-
-// CHECK-LABEL: func.func @extract_false_from_constant_mask() -> i1 {
-func.func @extract_false_from_constant_mask() -> i1 {
-// CHECK: %[[FALSE:.*]] = arith.constant false
-// CHECK-NEXT: return %[[FALSE]] : i1
- %mask = vector.constant_mask [2, 2, 3] : vector<4x4x4xi1>
- %extract = vector.extract %mask[1, 2, 2] : i1 from vector<4x4x4xi1>
- return %extract : i1
-}
-
-// -----
-
-// CHECK-LABEL: func.func @extract_from_create_mask() -> i1 {
-func.func @extract_from_create_mask() -> i1 {
-// CHECK: %[[TRUE:.*]] = arith.constant true
-// CHECK-NEXT: return %[[TRUE]] : i1
- %c2 = arith.constant 2 : index
- %c3 = arith.constant 3 : index
- %mask = vector.create_mask %c2, %c2, %c3 : vector<4x4x4xi1>
- %extract = vector.extract %mask[1, 1, 2] : i1 from vector<4x4x4xi1>
- return %extract : i1
-}
-
-// -----
-
-// CHECK-LABEL: func.func @extract_subvector_from_constant_mask() ->
-// CHECK-SAME: vector<6xi1> {
-func.func @extract_subvector_from_constant_mask() -> vector<6xi1> {
-// CHECK: %[[S0:.*]] = vector.constant_mask [4] : vector<6xi1>
-// CHECK-NEXT: return %[[S0]] : vector<6xi1>
- %mask = vector.constant_mask [2, 3, 4] : vector<4x5x6xi1>
- %extract = vector.extract %mask[1, 2] : vector<6xi1> from vector<4x5x6xi1>
- return %extract : vector<6xi1>
-}
-
-// -----
-
-// CHECK-LABEL: func.func @extract_scalar_with_dynamic_positions(
-// CHECK-SAME: %[[INDEX:.*]]: index) -> i1 {
-func.func @extract_scalar_with_dynamic_positions(%index: index) -> i1 {
-// CHECK: %[[S0:.*]] = vector.constant_mask [2, 2, 3] : vector<4x4x4xi1>
-// CHECK-NEXT: %[[S1:.*]] = vector.extract %[[S0]][1, 1, %[[INDEX]]] : i1 from vector<4x4x4xi1>
-// CHECK-NEXT: return %[[S1]] : i1
- %mask = vector.constant_mask [2, 2, 3] : vector<4x4x4xi1>
- %extract = vector.extract %mask[1, 1, %index] : i1 from vector<4x4x4xi1>
- return %extract : i1
-}
-
-// -----
-
-// CHECK-LABEL: func.func @extract_subvector_with_dynamic_positions
-// CHECK-SAME: %[[INDEX:.*]]: index) -> vector<6xi1> {
-func.func @extract_subvector_with_dynamic_positions(%index: index) -> vector<6xi1> {
-// CHECK: %[[S0:.*]] = vector.constant_mask [2, 3, 4] : vector<4x5x6xi1>
-// CHECK-NEXT: %[[S1:.*]] = vector.extract %[[S0]][1, %[[INDEX]]] : vector<6xi1> from vector<4x5x6xi1>
-// CHECK-NEXT: return %[[S1]] : vector<6xi1>
- %mask = vector.constant_mask [2, 3, 4] : vector<4x5x6xi1>
- %extract = vector.extract %mask[1, %index] : vector<6xi1> from vector<4x5x6xi1>
- return %extract : vector<6xi1>
-}
diff --git a/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir b/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir
index 94e78ce40a3c..8f0148119806 100644
--- a/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir
+++ b/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir
@@ -262,6 +262,48 @@ func.func @aligned_sitofp_2d(%a: vector<8x32xi4>) -> vector<8x32xf32> {
return %0 : vector<8x32xf32>
}
+// CHECK-LABEL: func.func @aligned_trunci(
+func.func @aligned_trunci(%a: vector<8xi32>) -> vector<8xi4> {
+// CHECK-SAME: %[[IN:.*]]: vector<8xi32>) -> vector<8xi4> {
+// CHECK-DAG: %[[LOW_MASK:.*]] = arith.constant dense<15> : vector<4xi8>
+// CHECK-DAG: %[[I4_BITS:.*]] = arith.constant dense<4> : vector<4xi8>
+// CHECK: %[[I8:.*]] = arith.trunci %[[IN]] : vector<8xi32> to vector<8xi8>
+// CHECK: %[[LOW:.*]] = vector.shuffle %[[I8]], %[[I8]] [0, 2, 4, 6] : vector<8xi8>, vector<8xi8>
+// CHECK: %[[HIGH:.*]] = vector.shuffle %[[I8]], %[[I8]] [1, 3, 5, 7] : vector<8xi8>, vector<8xi8>
+// CHECK: %[[ZEROED_LOW:.*]] = arith.andi %[[LOW]], %[[LOW_MASK]] : vector<4xi8>
+// CHECK: %[[SHL_HIGH:.*]] = arith.shli %[[HIGH]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[MERGED:.*]] = arith.ori %[[ZEROED_LOW]], %[[SHL_HIGH]] : vector<4xi8>
+// CHECK: %[[I4:.*]] = vector.bitcast %[[MERGED]] : vector<4xi8> to vector<8xi4>
+ %0 = arith.trunci %a : vector<8xi32> to vector<8xi4>
+ return %0 : vector<8xi4>
+}
+
+// CHECK-LABEL: func.func @aligned_trunci_base_case(
+func.func @aligned_trunci_base_case(%a: vector<8xi8>) -> vector<8xi4> {
+// CHECK-SAME: %[[IN:.*]]: vector<8xi8>) -> vector<8xi4> {
+// CHECK-DAG: %[[LOW_MASK:.*]] = arith.constant dense<15> : vector<4xi8>
+// CHECK-DAG: %[[I4_BITS:.*]] = arith.constant dense<4> : vector<4xi8>
+// CHECK: %[[LOW:.*]] = vector.shuffle %[[IN]], %[[IN]] [0, 2, 4, 6] : vector<8xi8>, vector<8xi8>
+// CHECK: %[[HIGH:.*]] = vector.shuffle %[[IN]], %[[IN]] [1, 3, 5, 7] : vector<8xi8>, vector<8xi8>
+// CHECK: %[[ZEROED_LOW:.*]] = arith.andi %[[LOW]], %[[LOW_MASK]] : vector<4xi8>
+// CHECK: %[[SHL_HIGH:.*]] = arith.shli %[[HIGH]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[MERGED:.*]] = arith.ori %[[ZEROED_LOW]], %[[SHL_HIGH]] : vector<4xi8>
+// CHECK: %[[I4:.*]] = vector.bitcast %[[MERGED]] : vector<4xi8> to vector<8xi4>
+ %0 = arith.trunci %a : vector<8xi8> to vector<8xi4>
+ return %0 : vector<8xi4>
+}
+
+// CHECK-LABEL: func.func @aligned_trunci_2d(
+func.func @aligned_trunci_2d(%a: vector<8x32xi32>) -> vector<8x32xi4> {
+// CHECK-NOT: vector.shuffle
+// CHECK-NOT: vector.andi
+// CHECK-NOT: vector.shli
+// CHECK-NOT: vector.ori
+// CHECK: arith.trunci
+ %0 = arith.trunci %a : vector<8x32xi32> to vector<8x32xi4>
+ return %0 : vector<8x32xi4>
+}
+
// CHECK-LABEL: func.func @i4_transpose(
func.func @i4_transpose(%a: vector<8x16xi4>) -> vector<16x8xi4> {
// CHECK-SAME: %[[IN:.*]]: vector<8x16xi4>) -> vector<16x8xi4> {
diff --git a/mlir/test/Dialect/Vector/vector-transforms.mlir b/mlir/test/Dialect/Vector/vector-transforms.mlir
index ea10bd56390c..eda6a5cc40d9 100644
--- a/mlir/test/Dialect/Vector/vector-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-transforms.mlir
@@ -339,6 +339,51 @@ func.func @bubble_down_bitcast_in_strided_slice_extract_odd_size(%arg0: vector<4
return %0: vector<3xf16>
}
+// CHECK-LABEL: func.func @bubble_up_bitcast_in_insert_i4_i8(
+// CHECK-SAME: %[[VAL:.*]]: vector<32xi4>,
+// CHECK-SAME: %[[DST:.*]]: vector<8x32xi4>) -> vector<8x16xi8> {
+func.func @bubble_up_bitcast_in_insert_i4_i8(%val: vector<32xi4>, %src: vector<8x32xi4>) -> vector<8x16xi8> {
+// CHECK: %[[BC_VAL:.*]] = vector.bitcast %[[VAL]] : vector<32xi4> to vector<16xi8>
+// CHECK: %[[BC_DST:.*]] = vector.bitcast %[[DST]] : vector<8x32xi4> to vector<8x16xi8>
+// CHECK: vector.insert %[[BC_VAL]], %[[BC_DST]] [4] : vector<16xi8> into vector<8x16xi8>
+ %0 = vector.insert %val, %src[4] : vector<32xi4> into vector<8x32xi4>
+ %1 = vector.bitcast %0 : vector<8x32xi4> to vector<8x16xi8>
+ return %1 : vector<8x16xi8>
+}
+
+// CHECK-LABEL: func.func @bubble_up_bitcast_in_insert_i8_i4(
+// CHECK-SAME: %[[VAL:.*]]: vector<16xi8>,
+// CHECK-SAME: %[[DST:.*]]: vector<8x16xi8>) -> vector<8x32xi4> {
+func.func @bubble_up_bitcast_in_insert_i8_i4(%val: vector<16xi8>, %src: vector<8x16xi8>) -> vector<8x32xi4> {
+// CHECK: %[[BC_VAL:.*]] = vector.bitcast %[[VAL]] : vector<16xi8> to vector<32xi4>
+// CHECK: %[[BC_DST:.*]] = vector.bitcast %[[DST]] : vector<8x16xi8> to vector<8x32xi4>
+// CHECK: vector.insert %[[BC_VAL]], %[[BC_DST]] [4] : vector<32xi4> into vector<8x32xi4>
+ %0 = vector.insert %val, %src[4] : vector<16xi8> into vector<8x16xi8>
+ %1 = vector.bitcast %0 : vector<8x16xi8> to vector<8x32xi4>
+ return %1 : vector<8x32xi4>
+}
+
+// CHECK-LABEL: func.func @bubble_up_bitcast_in_insert_i32_f32(
+// CHECK-SAME: %[[VAL:.*]]: vector<16xi32>,
+// CHECK-SAME: %[[DST:.*]]: vector<8x16xi32>) -> vector<8x16xf32> {
+func.func @bubble_up_bitcast_in_insert_i32_f32(%val: vector<16xi32>, %src: vector<8x16xi32>) -> vector<8x16xf32> {
+// CHECK: %[[BC_VAL:.*]] = vector.bitcast %[[VAL]] : vector<16xi32> to vector<16xf32>
+// CHECK: %[[BC_DST:.*]] = vector.bitcast %[[DST]] : vector<8x16xi32> to vector<8x16xf32>
+// CHECK: vector.insert %[[BC_VAL]], %[[BC_DST]] [4] : vector<16xf32> into vector<8x16xf32>
+ %0 = vector.insert %val, %src[4] : vector<16xi32> into vector<8x16xi32>
+ %1 = vector.bitcast %0 : vector<8x16xi32> to vector<8x16xf32>
+ return %1 : vector<8x16xf32>
+}
+
+// CHECK-LABEL: func.func @bubble_up_bitcast_in_insert_scalar(
+func.func @bubble_up_bitcast_in_insert_scalar(%val: i8, %src: vector<8x16xi8>) -> vector<8x32xi4> {
+// CHECK: vector.insert
+// CHECK-NEXT: vector.bitcast
+ %0 = vector.insert %val, %src[4, 8] : i8 into vector<8x16xi8>
+ %1 = vector.bitcast %0 : vector<8x16xi8> to vector<8x32xi4>
+ return %1 : vector<8x32xi4>
+}
+
// CHECK-LABEL: func @bubble_up_bitcast_in_strided_slice_insert
// CHECK-SAME: (%[[DST:.+]]: vector<8xf16>, %[[SRC1:.+]]: vector<4xf16>, %[[SRC2:.+]]: vector<4xf16>)
func.func @bubble_up_bitcast_in_strided_slice_insert(%dst: vector<8xf16>, %src1: vector<4xf16>, %src2: vector<4xf16>) -> vector<4xf32> {
diff --git a/mlir/test/Dialect/Vector/vector-warp-distribute.mlir b/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
index 907260373487..bf90c4a6ebb3 100644
--- a/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
+++ b/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
@@ -1559,3 +1559,28 @@ func.func @warp_propagate_multi_dim_create_mask(%laneid: index, %m0: index, %m1:
// CHECK-PROP: %[[DISTM0:.+]] = affine.apply #[[$SUBM0]]()[%[[M0]], %[[LANEID]]]
// CHECK-PROP: %[[DISTM1:.+]] = affine.apply #[[$SUBM1]]()[%[[M1]], %[[LANEID]]]
// CHECK-PROP: vector.create_mask %[[DISTM0]], %[[DISTM1]], %[[M2]] : vector<1x2x4xi1>
+
+// -----
+
+func.func @warp_propagate_nd_write(%laneid: index, %dest: memref<4x1024xf32>) {
+ %c0 = arith.constant 0 : index
+ vector.warp_execute_on_lane_0(%laneid)[32] -> () {
+ %0 = "some_def"() : () -> (vector<4x1024xf32>)
+ vector.transfer_write %0, %dest[%c0, %c0] : vector<4x1024xf32>, memref<4x1024xf32>
+ vector.yield
+ }
+ return
+}
+
+// CHECK-DIST-AND-PROP: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 * 128)>
+
+// CHECK-DIST-AND-PROP-LABEL: func.func @warp_propagate_nd_write(
+// CHECK-DIST-AND-PROP: %[[W:.*]] = vector.warp_execute_on_lane_0(%{{.*}})[32] -> (vector<1x128xf32>) {
+// CHECK-DIST-AND-PROP: %[[V0:.*]] = "some_def"
+// CHECK-DIST-AND-PROP: vector.yield %[[V0]]
+// CHECK-DIST-AND-PROP-SAME: vector<4x1024xf32>
+// CHECK-DIST-AND-PROP: }
+
+// CHECK-DIST-AND-PROP: %[[IDS:.+]]:2 = affine.delinearize_index %{{.*}} into (%c4, %c8) : index, index
+// CHECK-DIST-AND-PROP: %[[INNER_ID:.+]] = affine.apply #map()[%[[IDS]]#1]
+// CHECK-DIST-AND-PROP: vector.transfer_write %[[W]], %{{.*}}[%[[IDS]]#0, %[[INNER_ID]]] {{.*}} : vector<1x128xf32>
diff --git a/mlir/test/Examples/transform-opt/empty.mlir b/mlir/test/Examples/transform-opt/empty.mlir
new file mode 100644
index 000000000000..b525769db688
--- /dev/null
+++ b/mlir/test/Examples/transform-opt/empty.mlir
@@ -0,0 +1,12 @@
+// RUN: mlir-transform-opt %s --transform=%p/self-contained.mlir | FileCheck %s
+// RUN: mlir-transform-opt %s --transform=%p/external-decl.mlir --verify-diagnostics
+// RUN: mlir-transform-opt %s --transform=%p/external-def.mlir --transform-entry-point=external_def | FileCheck %s --check-prefix=EXTERNAL
+// RUN: mlir-transform-opt %s --transform=%p/external-decl.mlir --transform-library=%p/external-def.mlir | FileCheck %s --check-prefix=EXTERNAL
+// RUN: mlir-transform-opt %s --transform=%p/syntax-error.mlir --verify-diagnostics
+// RUN: mlir-transform-opt %s --transform=%p/self-contained.mlir --transform-library=%p/syntax-error.mlir --verify-diagnostics
+// RUN: mlir-transform-opt %s --transform=%p/self-contained.mlir --transform-library=%p/external-def.mlir --transform-library=%p/syntax-error.mlir --verify-diagnostics
+
+// CHECK: IR printer: in self-contained
+// EXTERNAL: IR printer: external_def
+// CHECK-NOT: @__transform_main
+module {}
diff --git a/mlir/test/Examples/transform-opt/external-decl.mlir b/mlir/test/Examples/transform-opt/external-decl.mlir
new file mode 100644
index 000000000000..5a7373589242
--- /dev/null
+++ b/mlir/test/Examples/transform-opt/external-decl.mlir
@@ -0,0 +1,18 @@
+// This test just needs to parse. Note that the diagnostic message below will
+// be produced in *another* multi-file test, do *not* -verify-diagnostics here.
+// RUN: mlir-opt %s
+
+// RUN: mlir-transform-opt %s --transform-library=%p/external-def.mlir | FileCheck %s
+
+module attributes {transform.with_named_sequence} {
+ // The definition should not be printed here.
+ // CHECK: @external_def
+ // CHECK-NOT: transform.print
+ transform.named_sequence private @external_def(%root: !transform.any_op {transform.readonly})
+
+ transform.named_sequence private @__transform_main(%root: !transform.any_op) {
+ // expected-error @below {{unresolved external named sequence}}
+ transform.include @external_def failures(propagate) (%root) : (!transform.any_op) -> ()
+ transform.yield
+ }
+}
diff --git a/mlir/test/Examples/transform-opt/external-def.mlir b/mlir/test/Examples/transform-opt/external-def.mlir
new file mode 100644
index 000000000000..9dc4fbbdd6b6
--- /dev/null
+++ b/mlir/test/Examples/transform-opt/external-def.mlir
@@ -0,0 +1,8 @@
+// RUN: mlir-opt %s
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @external_def(%root: !transform.any_op {transform.readonly}) {
+ transform.print %root { name = "external_def" } : !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Examples/transform-opt/pass.mlir b/mlir/test/Examples/transform-opt/pass.mlir
new file mode 100644
index 000000000000..5c7c8bf1e256
--- /dev/null
+++ b/mlir/test/Examples/transform-opt/pass.mlir
@@ -0,0 +1,19 @@
+// RUN: mlir-transform-opt %s | FileCheck %s
+
+module attributes {transform.with_named_sequence} {
+ // CHECK-LABEL: @return_42
+ // CHECK: %[[C42:.+]] = arith.constant 42
+ // CHECK: return %[[C42]]
+ func.func @return_42() -> i32 {
+ %0 = arith.constant 21 : i32
+ %1 = arith.constant 2 : i32
+ %2 = arith.muli %0, %1 : i32
+ return %2 : i32
+ }
+
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op) {
+ %arg1 = transform.apply_registered_pass "canonicalize" to %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.print %arg1 : !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Examples/transform-opt/self-contained.mlir b/mlir/test/Examples/transform-opt/self-contained.mlir
new file mode 100644
index 000000000000..b9a93af61b8b
--- /dev/null
+++ b/mlir/test/Examples/transform-opt/self-contained.mlir
@@ -0,0 +1,21 @@
+// RUN: mlir-transform-opt %s | FileCheck %s
+// RUN: mlir-transform-opt %s --transform=%s | FileCheck %s
+// RUN: mlir-transform-opt %s --transform=%p/external-decl.mlir --verify-diagnostics
+// RUN: mlir-transform-opt %s --transform=%p/external-def.mlir --transform-entry-point=external_def | FileCheck %s --check-prefix=EXTERNAL
+// RUN: mlir-transform-opt %s --transform=%p/external-decl.mlir --transform-library=%p/external-def.mlir | FileCheck %s --check-prefix=EXTERNAL
+// RUN: mlir-transform-opt %s --transform=%p/syntax-error.mlir --verify-diagnostics
+
+// CHECK: IR printer: in self-contained
+// EXTERNAL: IR printer: external_def
+
+// The first occurrence comes from the print operation and the second is the
+// roundtrip output. However, we shouldn't have the symbol duplicated because
+// of library merging.
+// CHECK-COUNT-2: @__transform_main
+// CHECK-NOT: @__transform_main
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence private @__transform_main(%root: !transform.any_op) {
+ transform.print %root { name = "in self-contained" } : !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Examples/transform-opt/syntax-error.mlir b/mlir/test/Examples/transform-opt/syntax-error.mlir
new file mode 100644
index 000000000000..89f1d472fe89
--- /dev/null
+++ b/mlir/test/Examples/transform-opt/syntax-error.mlir
@@ -0,0 +1,5 @@
+// RUN: mlir-opt %s --verify-diagnostics
+// This file is used as additional input.
+
+// expected-error @below {{expected operation name in quotes}}
+module {
diff --git a/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-compare-results-i16.mlir b/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-compare-results-i16.mlir
index 15bafeda6740..437e49a6b814 100644
--- a/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-compare-results-i16.mlir
+++ b/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-compare-results-i16.mlir
@@ -26,7 +26,7 @@ func.func @check_results(%lhs : i16, %rhs : i16, %res0 : i16, %res1 : i16) -> ()
%mismatch = arith.cmpi ne, %res0, %res1 : i16
scf.if %mismatch -> () {
vector.print %res1 : i16
- vector.print str "Mismatch"
+ vector.print str "Mismatch\n"
}
return
}
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
index 12f13e8dbc4a..881e2799b5b0 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
@@ -88,7 +88,7 @@ func.func @entry() {
}
// CHECK: SME: END OF TEST OUTPUT
- vector.print str "SME: END OF TEST OUTPUT"
+ vector.print str "SME: END OF TEST OUTPUT\n"
return
}
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir
index ee3866de303e..588b44a36c29 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir
@@ -24,23 +24,23 @@ func.func @use_too_many_tiles(%a: memref<?x?xi16>, %b: memref<?x?xi16>, %c: mem
// CHECK-LABEL: tile_a:
// CHECK-COUNT-8: ( 0, 0, 0, 0, 0, 0, 0, 0
- vector.print str "tile_a:"
+ vector.print str "tile_a:\n"
vector.print %tile_a : vector<[8]x[8]xi16>
// CHECK-LABEL: tile_b:
// CHECK-COUNT-8: ( 1, 1, 1, 1, 1, 1, 1, 1
- vector.print str "tile_b:"
+ vector.print str "tile_b:\n"
vector.print %tile_b : vector<[8]x[8]xi16>
// CHECK-LABEL: tile_c:
// CHECK-COUNT-8: ( 2, 2, 2, 2, 2, 2, 2, 2
- vector.print str "tile_c:"
+ vector.print str "tile_c:\n"
vector.print %tile_c : vector<[8]x[8]xi16>
// CHECK-LABEL: tile_d:
// CHECK-COUNT-8: ( 3, 3, 3, 3, 3, 3, 3, 3
- vector.print str "tile_d:"
+ vector.print str "tile_d:\n"
vector.print %tile_d : vector<[8]x[8]xi16>
// CHECK-LABEL: tile_e:
// CHECK-COUNT-8: ( 4, 4, 4, 4, 4, 4, 4, 4
- vector.print str "tile_e:"
+ vector.print str "tile_e:\n"
vector.print %tile_e : vector<[8]x[8]xi16>
return
}
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir
index 22cf15d4f640..51a0c8f7c945 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir
@@ -36,7 +36,7 @@ func.func @matmul_f32() {
// Print and verify the output
// F32-LABEL: SVE: START OF TEST OUTPUT
- vector.print str "SVE: START OF TEST OUTPUT"
+ vector.print str "SVE: START OF TEST OUTPUT\n"
// F32-NEXT: Unranked Memref {{.*}} rank = 2 offset = 0 sizes = [5, 15] strides = [15, 1] data =
// F32-COUNT-5: [29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788]
@@ -44,7 +44,7 @@ func.func @matmul_f32() {
call @printMemrefF32(%xf) : (tensor<*xf32>) -> ()
// F32-NEXT: SVE: END OF TEST OUTPUT
- vector.print str "SVE: END OF TEST OUTPUT"
+ vector.print str "SVE: END OF TEST OUTPUT\n"
return
}
@@ -73,7 +73,7 @@ func.func @matmul_mixed_ty() {
// Print and verify the output
// MIXED-LABEL: SVE: START OF TEST OUTPUT
- vector.print str "SVE: START OF TEST OUTPUT"
+ vector.print str "SVE: START OF TEST OUTPUT\n"
// MIXED-NEXT: Unranked Memref {{.*}} rank = 2 offset = 0 sizes = [5, 15] strides = [15, 1] data =
// MIXED-COUNT-5: [45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387]
@@ -81,7 +81,7 @@ func.func @matmul_mixed_ty() {
call @printMemrefI32(%xf) : (tensor<*xi32>) -> ()
// MIXED-NEXT: SVE: END OF TEST OUTPUT
- vector.print str "SVE: END OF TEST OUTPUT"
+ vector.print str "SVE: END OF TEST OUTPUT\n"
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
index 6468c4b45d24..1184d407541b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -82,38 +82,39 @@ module {
return %0 : tensor<?x?xf64, #BSR>
}
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f64
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
%A = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #BSR>
- // CHECK: ( 0, 2, 3 )
- // CHECK-NEXT: ( 0, 2, 1 )
- // CHECK-NEXT: ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
- %pos = sparse_tensor.positions %A {level = 1 : index } : tensor<?x?xf64, #BSR> to memref<?xindex>
- %vecp = vector.transfer_read %pos[%c0], %c0 : memref<?xindex>, vector<3xindex>
- vector.print %vecp : vector<3xindex>
- %crd = sparse_tensor.coordinates %A {level = 1 : index } : tensor<?x?xf64, #BSR> to memref<?xindex>
- %vecc = vector.transfer_read %crd[%c0], %c0 : memref<?xindex>, vector<3xindex>
- vector.print %vecc : vector<3xindex>
- %val = sparse_tensor.values %A : tensor<?x?xf64, #BSR> to memref<?xf64>
- %vecv = vector.transfer_read %val[%c0], %f0 : memref<?xf64>, vector<12xf64>
- vector.print %vecv : vector<12xf64>
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 1,
+ // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %A : tensor<?x?xf64, #BSR>
- // CHECK-NEXT: ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 1
+ // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
+ // CHECK-NEXT: ----
%t1 = sparse_tensor.reinterpret_map %A : tensor<?x?xf64, #BSR>
to tensor<?x?x2x2xf64, #DSDD>
- %vdsdd = sparse_tensor.values %t1 : tensor<?x?x2x2xf64, #DSDD> to memref<?xf64>
- %vecdsdd = vector.transfer_read %vdsdd[%c0], %f0 : memref<?xf64>, vector<12xf64>
- vector.print %vecdsdd : vector<12xf64>
+ sparse_tensor.print %t1 : tensor<?x?x2x2xf64, #DSDD>
- // CHECK-NEXT: ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0 )
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 1,
+ // CHECK-NEXT: values : ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0,
+ // CHECK-NEXT: ----
%As = call @scale(%A) : (tensor<?x?xf64, #BSR>) -> (tensor<?x?xf64, #BSR>)
- %vals = sparse_tensor.values %As : tensor<?x?xf64, #BSR> to memref<?xf64>
- %vecs = vector.transfer_read %vals[%c0], %f0 : memref<?xf64>, vector<12xf64>
- vector.print %vecs : vector<12xf64>
+ sparse_tensor.print %As : tensor<?x?xf64, #BSR>
// Release the resources.
bufferization.dealloc_tensor %A: tensor<?x?xf64, #BSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
index cb06f099dd37..f8e83b501967 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
@@ -102,9 +102,15 @@
//
module {
- // CHECK: ( 0, 1, 2 )
- // CHECK-NEXT: ( 0, 2 )
- // CHECK-NEXT: ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2,
+ // CHECK-NEXT: crd[1] : ( 0, 2,
+ // CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
+ // CHECK-NEXT: ----
+ //
func.func @foo1() {
// Build.
%c0 = arith.constant 0 : index
@@ -115,23 +121,20 @@ module {
> : tensor<6x16xf64>
%s1 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_row_rowmajor>
// Test.
- %pos1 = sparse_tensor.positions %s1 {level = 1 : index } : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xindex>
- %vecp1 = vector.transfer_read %pos1[%c0], %c0 : memref<?xindex>, vector<3xindex>
- vector.print %vecp1 : vector<3xindex>
- %crd1 = sparse_tensor.coordinates %s1 {level = 1 : index } : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xindex>
- %vecc1 = vector.transfer_read %crd1[%c0], %c0 : memref<?xindex>, vector<2xindex>
- vector.print %vecc1 : vector<2xindex>
- %val1 = sparse_tensor.values %s1 : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xf64>
- %vecv1 = vector.transfer_read %val1[%c0], %f0 : memref<?xf64>, vector<24xf64>
- vector.print %vecv1 : vector<24xf64>
+ sparse_tensor.print %s1 : tensor<?x?xf64, #BSR_row_rowmajor>
// Release.
bufferization.dealloc_tensor %s1: tensor<?x?xf64, #BSR_row_rowmajor>
return
}
- // CHECK-NEXT: ( 0, 1, 2 )
- // CHECK-NEXT: ( 0, 2 )
- // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
+ //
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2,
+ // CHECK-NEXT: crd[1] : ( 0, 2,
+ // CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
+ // CHECK-NEXT: ----
+ //
func.func @foo2() {
// Build.
%c0 = arith.constant 0 : index
@@ -142,23 +145,20 @@ module {
> : tensor<6x16xf64>
%s2 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_row_colmajor>
// Test.
- %pos2 = sparse_tensor.positions %s2 {level = 1 : index } : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xindex>
- %vecp2 = vector.transfer_read %pos2[%c0], %c0 : memref<?xindex>, vector<3xindex>
- vector.print %vecp2 : vector<3xindex>
- %crd2 = sparse_tensor.coordinates %s2 {level = 1 : index } : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xindex>
- %vecc2 = vector.transfer_read %crd2[%c0], %c0 : memref<?xindex>, vector<2xindex>
- vector.print %vecc2 : vector<2xindex>
- %val2 = sparse_tensor.values %s2 : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xf64>
- %vecv2 = vector.transfer_read %val2[%c0], %f0 : memref<?xf64>, vector<24xf64>
- vector.print %vecv2 : vector<24xf64>
+ sparse_tensor.print %s2 : tensor<?x?xf64, #BSR_row_colmajor>
// Release.
bufferization.dealloc_tensor %s2: tensor<?x?xf64, #BSR_row_colmajor>
return
}
- // CHECK-NEXT: ( 0, 1, 1, 2, 2 )
- // CHECK-NEXT: ( 0, 1 )
- // CHECK-NEXT: ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
+ //
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
+ // CHECK-NEXT: crd[1] : ( 0, 1,
+ // CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
+ // CHECK-NEXT: ----
+ //
func.func @foo3() {
// Build.
%c0 = arith.constant 0 : index
@@ -169,23 +169,20 @@ module {
> : tensor<6x16xf64>
%s3 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_col_rowmajor>
// Test.
- %pos3 = sparse_tensor.positions %s3 {level = 1 : index } : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xindex>
- %vecp3 = vector.transfer_read %pos3[%c0], %c0 : memref<?xindex>, vector<5xindex>
- vector.print %vecp3 : vector<5xindex>
- %crd3 = sparse_tensor.coordinates %s3 {level = 1 : index } : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xindex>
- %vecc3 = vector.transfer_read %crd3[%c0], %c0 : memref<?xindex>, vector<2xindex>
- vector.print %vecc3 : vector<2xindex>
- %val3 = sparse_tensor.values %s3 : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xf64>
- %vecv3 = vector.transfer_read %val3[%c0], %f0 : memref<?xf64>, vector<24xf64>
- vector.print %vecv3 : vector<24xf64>
+ sparse_tensor.print %s3 : tensor<?x?xf64, #BSR_col_rowmajor>
// Release.
bufferization.dealloc_tensor %s3: tensor<?x?xf64, #BSR_col_rowmajor>
return
}
- // CHECK-NEXT: ( 0, 1, 1, 2, 2 )
- // CHECK-NEXT: ( 0, 1 )
- // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
+ //
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
+ // CHECK-NEXT: crd[1] : ( 0, 1,
+ // CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
+ // CHECK-NEXT: ----
+ //
func.func @foo4() {
// Build.
%c0 = arith.constant 0 : index
@@ -196,15 +193,7 @@ module {
> : tensor<6x16xf64>
%s4 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_col_colmajor>
// Test.
- %pos4 = sparse_tensor.positions %s4 {level = 1 : index } : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xindex>
- %vecp4 = vector.transfer_read %pos4[%c0], %c0 : memref<?xindex>, vector<5xindex>
- vector.print %vecp4 : vector<5xindex>
- %crd4 = sparse_tensor.coordinates %s4 {level = 1 : index } : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xindex>
- %vecc4 = vector.transfer_read %crd4[%c0], %c0 : memref<?xindex>, vector<2xindex>
- vector.print %vecc4 : vector<2xindex>
- %val4 = sparse_tensor.values %s4 : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xf64>
- %vecv4 = vector.transfer_read %val4[%c0], %f0 : memref<?xf64>, vector<24xf64>
- vector.print %vecv4 : vector<24xf64>
+ sparse_tensor.print %s4 : tensor<?x?xf64, #BSR_col_colmajor>
// Release.
bufferization.dealloc_tensor %s4: tensor<?x?xf64, #BSR_col_colmajor>
return
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
index 5f6524a4b7af..c6ee0ce07050 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -83,12 +83,11 @@ module {
}
func.func private @getTensorFilename(index) -> (!Filename)
- func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }
//
// Main driver that reads matrix from file and calls the kernel.
//
- func.func @entry() {
+ func.func @main() {
%d0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -104,14 +103,13 @@ module {
//
// Print the linearized 5x5 result for verification.
- // CHECK: 25
- // CHECK: [2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10
//
- %n = sparse_tensor.number_of_entries %0 : tensor<?x?xf64, #DenseMatrix>
- vector.print %n : index
- %m = sparse_tensor.values %0
- : tensor<?x?xf64, #DenseMatrix> to memref<?xf64>
- call @printMemref1dF64(%m) : (memref<?xf64>) -> ()
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 25
+ // CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10,
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %0 : tensor<?x?xf64, #DenseMatrix>
// Release the resources.
bufferization.dealloc_tensor %a : tensor<?x?xf64, #SparseMatrix>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
index 81cd2d81cbbc..0b34ff581016 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -67,20 +67,8 @@ module {
return %0 : tensor<?xbf16, #DenseVector>
}
- // Dumps a dense vector of type bf16.
- func.func @dump_vec(%arg0: tensor<?xbf16, #DenseVector>) {
- // Dump the values array to verify only sparse contents are stored.
- %c0 = arith.constant 0 : index
- %d0 = arith.constant -1.0 : bf16
- %0 = sparse_tensor.values %arg0 : tensor<?xbf16, #DenseVector> to memref<?xbf16>
- %1 = vector.transfer_read %0[%c0], %d0: memref<?xbf16>, vector<32xbf16>
- %f1 = arith.extf %1: vector<32xbf16> to vector<32xf32>
- vector.print %f1 : vector<32xf32>
- return
- }
-
// Driver method to call and verify the kernel.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
// Setup sparse vectors.
@@ -103,8 +91,12 @@ module {
//
// Verify the result.
//
- // CHECK: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
- call @dump_vec(%0) : (tensor<?xbf16, #DenseVector>) -> ()
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %0 : tensor<?xbf16, #DenseVector>
// Release the resources.
bufferization.dealloc_tensor %sv1 : tensor<?xbf16, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
index b320afdb8858..495682169c29 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -68,20 +68,8 @@ module {
return %0 : tensor<?xf16, #DenseVector>
}
- // Dumps a dense vector of type f16.
- func.func @dump_vec(%arg0: tensor<?xf16, #DenseVector>) {
- // Dump the values array to verify only sparse contents are stored.
- %c0 = arith.constant 0 : index
- %d0 = arith.constant -1.0 : f16
- %0 = sparse_tensor.values %arg0 : tensor<?xf16, #DenseVector> to memref<?xf16>
- %1 = vector.transfer_read %0[%c0], %d0: memref<?xf16>, vector<32xf16>
- %f1 = arith.extf %1: vector<32xf16> to vector<32xf32>
- vector.print %f1 : vector<32xf32>
- return
- }
-
// Driver method to call and verify the kernel.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
// Setup sparse vectors.
@@ -104,8 +92,12 @@ module {
//
// Verify the result.
//
- // CHECK: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
- call @dump_vec(%0) : (tensor<?xf16, #DenseVector>) -> ()
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %0 : tensor<?xf16, #DenseVector>
// Release the resources.
bufferization.dealloc_tensor %sv1 : tensor<?xf16, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
index c141df64c22e..3a32ff285270 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
@@ -45,91 +45,6 @@
module {
-
- func.func @dump(%arg0: tensor<5x4x3xf64, #TensorCSR>) {
- %c0 = arith.constant 0 : index
- %fu = arith.constant 99.0 : f64
- %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorCSR> to memref<?xindex>
- %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorCSR> to memref<?xindex>
- %p2 = sparse_tensor.positions %arg0 { level = 2 : index } : tensor<5x4x3xf64, #TensorCSR> to memref<?xindex>
- %i2 = sparse_tensor.coordinates %arg0 { level = 2 : index } : tensor<5x4x3xf64, #TensorCSR> to memref<?xindex>
- %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #TensorCSR> to memref<?xf64>
- %vp0 = vector.transfer_read %p0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vp0 : vector<2xindex>
- %vi0 = vector.transfer_read %i0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vi0 : vector<2xindex>
- %vp2 = vector.transfer_read %p2[%c0], %c0: memref<?xindex>, vector<9xindex>
- vector.print %vp2 : vector<9xindex>
- %vi2 = vector.transfer_read %i2[%c0], %c0: memref<?xindex>, vector<5xindex>
- vector.print %vi2 : vector<5xindex>
- %vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<5xf64>
- vector.print %vv : vector<5xf64>
- return
- }
-
- func.func @dump_row(%arg0: tensor<5x4x3xf64, #TensorRow>) {
- %c0 = arith.constant 0 : index
- %fu = arith.constant 99.0 : f64
- %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorRow> to memref<?xindex>
- %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorRow> to memref<?xindex>
- %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<5x4x3xf64, #TensorRow> to memref<?xindex>
- %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<5x4x3xf64, #TensorRow> to memref<?xindex>
- %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #TensorRow> to memref<?xf64>
- %vp0 = vector.transfer_read %p0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vp0 : vector<2xindex>
- %vi0 = vector.transfer_read %i0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vi0 : vector<2xindex>
- %vp1 = vector.transfer_read %p1[%c0], %c0: memref<?xindex>, vector<3xindex>
- vector.print %vp1 : vector<3xindex>
- %vi1 = vector.transfer_read %i1[%c0], %c0: memref<?xindex>, vector<4xindex>
- vector.print %vi1 : vector<4xindex>
- %vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<12xf64>
- vector.print %vv : vector<12xf64>
- return
- }
-
-func.func @dump_ccoo(%arg0: tensor<5x4x3xf64, #CCoo>) {
- %c0 = arith.constant 0 : index
- %fu = arith.constant 99.0 : f64
- %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
- %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
- %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
- %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
- %i2 = sparse_tensor.coordinates %arg0 { level = 2 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
- %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #CCoo> to memref<?xf64>
- %vp0 = vector.transfer_read %p0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vp0 : vector<2xindex>
- %vi0 = vector.transfer_read %i0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vi0 : vector<2xindex>
- %vp1 = vector.transfer_read %p1[%c0], %c0: memref<?xindex>, vector<3xindex>
- vector.print %vp1 : vector<3xindex>
- %vi1 = vector.transfer_read %i1[%c0], %c0: memref<?xindex>, vector<5xindex>
- vector.print %vi1 : vector<5xindex>
- %vi2 = vector.transfer_read %i2[%c0], %c0: memref<?xindex>, vector<5xindex>
- vector.print %vi2 : vector<5xindex>
- %vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<5xf64>
- vector.print %vv : vector<5xf64>
- return
- }
-
-func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
- %c0 = arith.constant 0 : index
- %fu = arith.constant 99.0 : f64
- %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<5x4x3xf64, #DCoo> to memref<?xindex>
- %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<5x4x3xf64, #DCoo> to memref<?xindex>
- %i2 = sparse_tensor.coordinates %arg0 { level = 2 : index } : tensor<5x4x3xf64, #DCoo> to memref<?xindex>
- %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #DCoo> to memref<?xf64>
- %vp1 = vector.transfer_read %p1[%c0], %c0: memref<?xindex>, vector<6xindex>
- vector.print %vp1 : vector<6xindex>
- %vi1 = vector.transfer_read %i1[%c0], %c0: memref<?xindex>, vector<5xindex>
- vector.print %vi1 : vector<5xindex>
- %vi2 = vector.transfer_read %i2[%c0], %c0: memref<?xindex>, vector<5xindex>
- vector.print %vi2 : vector<5xindex>
- %vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<5xf64>
- vector.print %vv : vector<5xf64>
- return
-}
-
//
// Main driver.
//
@@ -145,13 +60,14 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
%f4 = arith.constant 4.4 : f64
%f5 = arith.constant 5.5 : f64
- //
- // CHECK: ( 0, 2 )
- // CHECK-NEXT: ( 3, 4 )
- // CHECK-NEXT: ( 0, 2, 2, 2, 3, 3, 3, 4, 5 )
- // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
- // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
- //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 3, 4
+ // CHECK-NEXT: pos[2] : ( 0, 2, 2, 2, 3, 3, 3, 4, 5
+ // CHECK-NEXT: crd[2] : ( 1, 2, 1, 2, 2
+ // CHECK-NEXT: values : ( 1.1, 2.2, 3.3, 4.4, 5.5
+ // CHECK-NEXT: ----
%tensora = tensor.empty() : tensor<5x4x3xf64, #TensorCSR>
%tensor1 = sparse_tensor.insert %f1 into %tensora[%c3, %c0, %c1] : tensor<5x4x3xf64, #TensorCSR>
%tensor2 = sparse_tensor.insert %f2 into %tensor1[%c3, %c0, %c2] : tensor<5x4x3xf64, #TensorCSR>
@@ -159,15 +75,16 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
%tensor4 = sparse_tensor.insert %f4 into %tensor3[%c4, %c2, %c2] : tensor<5x4x3xf64, #TensorCSR>
%tensor5 = sparse_tensor.insert %f5 into %tensor4[%c4, %c3, %c2] : tensor<5x4x3xf64, #TensorCSR>
%tensorm = sparse_tensor.load %tensor5 hasInserts : tensor<5x4x3xf64, #TensorCSR>
- call @dump(%tensorm) : (tensor<5x4x3xf64, #TensorCSR>) -> ()
-
- //
- // CHECK-NEXT: ( 0, 2 )
- // CHECK-NEXT: ( 3, 4 )
- // CHECK-NEXT: ( 0, 2, 4 )
- // CHECK-NEXT: ( 0, 3, 2, 3 )
- // CHECK-NEXT: ( 0, 1.1, 2.2, 0, 3.3, 0, 0, 0, 4.4, 0, 0, 5.5 )
- //
+ sparse_tensor.print %tensorm : tensor<5x4x3xf64, #TensorCSR>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 3, 4
+ // CHECK-NEXT: pos[1] : ( 0, 2, 4
+ // CHECK-NEXT: crd[1] : ( 0, 3, 2, 3
+ // CHECK-NEXT: values : ( 0, 1.1, 2.2, 0, 3.3, 0, 0, 0, 4.4, 0, 0, 5.5
+ // CHECK-NEXT: ----
%rowa = tensor.empty() : tensor<5x4x3xf64, #TensorRow>
%row1 = sparse_tensor.insert %f1 into %rowa[%c3, %c0, %c1] : tensor<5x4x3xf64, #TensorRow>
%row2 = sparse_tensor.insert %f2 into %row1[%c3, %c0, %c2] : tensor<5x4x3xf64, #TensorRow>
@@ -175,15 +92,16 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
%row4 = sparse_tensor.insert %f4 into %row3[%c4, %c2, %c2] : tensor<5x4x3xf64, #TensorRow>
%row5 = sparse_tensor.insert %f5 into %row4[%c4, %c3, %c2] : tensor<5x4x3xf64, #TensorRow>
%rowm = sparse_tensor.load %row5 hasInserts : tensor<5x4x3xf64, #TensorRow>
- call @dump_row(%rowm) : (tensor<5x4x3xf64, #TensorRow>) -> ()
-
- //
- // CHECK: ( 0, 2 )
- // CHECK-NEXT: ( 3, 4 )
- // CHECK-NEXT: ( 0, 3, 5 )
- // CHECK-NEXT: ( 0, 0, 3, 2, 3 )
- // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
- // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
+ sparse_tensor.print %rowm : tensor<5x4x3xf64, #TensorRow>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 3, 4
+ // CHECK-NEXT: pos[1] : ( 0, 3, 5
+ // CHECK-NEXT: crd[1] : ( 0, 1, 0, 2, 3, 1, 2, 2, 3, 2
+ // CHECK-NEXT: values : ( 1.1, 2.2, 3.3, 4.4, 5.5
+ // CHECK-NEXT: ----
%ccoo = tensor.empty() : tensor<5x4x3xf64, #CCoo>
%ccoo1 = sparse_tensor.insert %f1 into %ccoo[%c3, %c0, %c1] : tensor<5x4x3xf64, #CCoo>
%ccoo2 = sparse_tensor.insert %f2 into %ccoo1[%c3, %c0, %c2] : tensor<5x4x3xf64, #CCoo>
@@ -191,13 +109,14 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
%ccoo4 = sparse_tensor.insert %f4 into %ccoo3[%c4, %c2, %c2] : tensor<5x4x3xf64, #CCoo>
%ccoo5 = sparse_tensor.insert %f5 into %ccoo4[%c4, %c3, %c2] : tensor<5x4x3xf64, #CCoo>
%ccoom = sparse_tensor.load %ccoo5 hasInserts : tensor<5x4x3xf64, #CCoo>
- call @dump_ccoo(%ccoom) : (tensor<5x4x3xf64, #CCoo>) -> ()
-
- //
- // CHECK-NEXT: ( 0, 0, 0, 0, 3, 5 )
- // CHECK-NEXT: ( 0, 0, 3, 2, 3 )
- // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
- // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
+ sparse_tensor.print %ccoom : tensor<5x4x3xf64, #CCoo>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[1] : ( 0, 0, 0, 0, 3, 5
+ // CHECK-NEXT: crd[1] : ( 0, 1, 0, 2, 3, 1, 2, 2, 3, 2
+ // CHECK-NEXT: values : ( 1.1, 2.2, 3.3, 4.4, 5.5
+ // CHECK-NEXT: ----
%dcoo = tensor.empty() : tensor<5x4x3xf64, #DCoo>
%dcoo1 = sparse_tensor.insert %f1 into %dcoo[%c3, %c0, %c1] : tensor<5x4x3xf64, #DCoo>
%dcoo2 = sparse_tensor.insert %f2 into %dcoo1[%c3, %c0, %c2] : tensor<5x4x3xf64, #DCoo>
@@ -205,23 +124,7 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
%dcoo4 = sparse_tensor.insert %f4 into %dcoo3[%c4, %c2, %c2] : tensor<5x4x3xf64, #DCoo>
%dcoo5 = sparse_tensor.insert %f5 into %dcoo4[%c4, %c3, %c2] : tensor<5x4x3xf64, #DCoo>
%dcoom = sparse_tensor.load %dcoo5 hasInserts : tensor<5x4x3xf64, #DCoo>
- call @dump_dcoo(%dcoom) : (tensor<5x4x3xf64, #DCoo>) -> ()
-
- // NOE sanity check.
- //
- // CHECK-NEXT: 5
- // CHECK-NEXT: 12
- // CHECK-NEXT: 5
- // CHECK-NEXT: 5
- //
- %noe1 = sparse_tensor.number_of_entries %tensorm : tensor<5x4x3xf64, #TensorCSR>
- vector.print %noe1 : index
- %noe2 = sparse_tensor.number_of_entries %rowm : tensor<5x4x3xf64, #TensorRow>
- vector.print %noe2 : index
- %noe3 = sparse_tensor.number_of_entries %ccoom : tensor<5x4x3xf64, #CCoo>
- vector.print %noe3 : index
- %noe4 = sparse_tensor.number_of_entries %dcoom : tensor<5x4x3xf64, #DCoo>
- vector.print %noe4 : index
+ sparse_tensor.print %dcoom : tensor<5x4x3xf64, #DCoo>
// Release resources.
bufferization.dealloc_tensor %tensorm : tensor<5x4x3xf64, #TensorCSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir
index 228d4e5f6f8a..e1f062121b12 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -28,7 +28,7 @@
}>
module {
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f64
%d = arith.constant dense<[[ 1.0, 2.0, 3.0, 4.0 ],
@@ -39,19 +39,14 @@ module {
%s = sparse_tensor.convert %d : tensor<5x4xf64> to tensor<5x4xf64, #CSR_hi>
//
- // CHECK: ( 0, 4, 4, 8, 8, 9, 9, 13 )
- // CHECK-NEXT: ( 0, 1, 2, 3, 0, 1, 2, 3, 2, 0, 1, 2, 3, 0, 1, 2, 3 )
- // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 5.5, 9, 10, 11, 12, 13, 14, 15, 16 )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 17
+ // CHECK-NEXT: pos[1] : ( 0, 4, 4, 8, 8, 9, 9, 13
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 2, 0, 1, 2, 3, 0, 1, 2, 3
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 5.5, 9, 10, 11, 12, 13, 14, 15, 16
+ // CHECK-NEXT: ----
//
- %pos = sparse_tensor.positions %s {level = 1 : index } : tensor<5x4xf64, #CSR_hi> to memref<?xindex>
- %vecp = vector.transfer_read %pos[%c0], %c0 : memref<?xindex>, vector<8xindex>
- vector.print %vecp : vector<8xindex>
- %crd = sparse_tensor.coordinates %s {level = 1 : index } : tensor<5x4xf64, #CSR_hi> to memref<?xindex>
- %vecc = vector.transfer_read %crd[%c0], %c0 : memref<?xindex>, vector<17xindex>
- vector.print %vecc : vector<17xindex>
- %val = sparse_tensor.values %s : tensor<5x4xf64, #CSR_hi> to memref<?xf64>
- %vecv = vector.transfer_read %val[%c0], %f0 : memref<?xf64>, vector<17xf64>
- vector.print %vecv : vector<17xf64>
+ sparse_tensor.print %s : tensor<5x4xf64, #CSR_hi>
// Release the resources.
bufferization.dealloc_tensor %s: tensor<5x4xf64, #CSR_hi>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
index fa0dbac269b9..863e1c62370e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -90,7 +90,7 @@ module {
//
// Main driver.
//
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
// Initialize various matrices, dense for stress testing,
@@ -140,33 +140,94 @@ module {
%b4 = sparse_tensor.convert %sb : tensor<8x4xf64> to tensor<8x4xf64, #DCSR>
//
- // Sanity check on stored entries before going into the computations.
- //
- // CHECK: 32
- // CHECK-NEXT: 32
- // CHECK-NEXT: 4
- // CHECK-NEXT: 4
- // CHECK-NEXT: 32
- // CHECK-NEXT: 32
- // CHECK-NEXT: 8
- // CHECK-NEXT: 8
- //
- %noea1 = sparse_tensor.number_of_entries %a1 : tensor<4x8xf64, #CSR>
- %noea2 = sparse_tensor.number_of_entries %a2 : tensor<4x8xf64, #DCSR>
- %noea3 = sparse_tensor.number_of_entries %a3 : tensor<4x8xf64, #CSR>
- %noea4 = sparse_tensor.number_of_entries %a4 : tensor<4x8xf64, #DCSR>
- %noeb1 = sparse_tensor.number_of_entries %b1 : tensor<8x4xf64, #CSR>
- %noeb2 = sparse_tensor.number_of_entries %b2 : tensor<8x4xf64, #DCSR>
- %noeb3 = sparse_tensor.number_of_entries %b3 : tensor<8x4xf64, #CSR>
- %noeb4 = sparse_tensor.number_of_entries %b4 : tensor<8x4xf64, #DCSR>
- vector.print %noea1 : index
- vector.print %noea2 : index
- vector.print %noea3 : index
- vector.print %noea4 : index
- vector.print %noeb1 : index
- vector.print %noeb2 : index
- vector.print %noeb3 : index
- vector.print %noeb4 : index
+ // Sanity check before going into the computations.
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: pos[1] : ( 0, 8, 16, 24, 32
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7
+ // CHECK-NEXT: values : ( 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 8.1, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 8.2, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3, 8.3, 1.4, 2.4, 3.4, 4.4, 5.4, 6.4, 7.4, 8.4
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %a1 : tensor<4x8xf64, #CSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: pos[0] : ( 0, 4
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 8, 16, 24, 32
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7
+ // CHECK-NEXT: values : ( 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 8.1, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 8.2, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3, 8.3, 1.4, 2.4, 3.4, 4.4, 5.4, 6.4, 7.4, 8.4
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %a2 : tensor<4x8xf64, #DCSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 4
+ // CHECK-NEXT: pos[1] : ( 0, 2, 2, 3, 4
+ // CHECK-NEXT: crd[1] : ( 1, 5, 1, 7
+ // CHECK-NEXT: values : ( 2.1, 6.1, 2.3, 1
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %a3 : tensor<4x8xf64, #CSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 4
+ // CHECK-NEXT: pos[0] : ( 0, 3
+ // CHECK-NEXT: crd[0] : ( 0, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3, 4
+ // CHECK-NEXT: crd[1] : ( 1, 5, 1, 7
+ // CHECK-NEXT: values : ( 2.1, 6.1, 2.3, 1
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %a4 : tensor<4x8xf64, #DCSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12, 16, 20, 24, 28, 32
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+ // CHECK-NEXT: values : ( 10.1, 11.1, 12.1, 13.1, 10.2, 11.2, 12.2, 13.2, 10.3, 11.3, 12.3, 13.3, 10.4, 11.4, 12.4, 13.4, 10.5, 11.5, 12.5, 13.5, 10.6, 11.6, 12.6, 13.6, 10.7, 11.7, 12.7, 13.7, 10.8, 11.8, 12.8, 13.8
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %b1 : tensor<8x4xf64, #CSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: pos[0] : ( 0, 8
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7
+ // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12, 16, 20, 24, 28, 32
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+ // CHECK-NEXT: values : ( 10.1, 11.1, 12.1, 13.1, 10.2, 11.2, 12.2, 13.2, 10.3, 11.3, 12.3, 13.3, 10.4, 11.4, 12.4, 13.4, 10.5, 11.5, 12.5, 13.5, 10.6, 11.6, 12.6, 13.6, 10.7, 11.7, 12.7, 13.7, 10.8, 11.8, 12.8, 13.8
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %b2 : tensor<8x4xf64, #DCSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 8
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2, 3, 4, 4, 5, 6, 8
+ // CHECK-NEXT: crd[1] : ( 3, 2, 1, 0, 1, 2, 2, 3
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %b3 : tensor<8x4xf64, #CSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 8
+ // CHECK-NEXT: pos[0] : ( 0, 7
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 5, 6, 7
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2, 3, 4, 5, 6, 8
+ // CHECK-NEXT: crd[1] : ( 3, 2, 1, 0, 1, 2, 2, 3
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %b4 : tensor<8x4xf64, #DCSR>
// Call kernels with dense.
%0 = call @matmul1(%da, %db, %zero)
@@ -208,24 +269,26 @@ module {
call @printMemrefF64(%u0) : (tensor<*xf64>) -> ()
//
- // CHECK: {{\[}}[388.76, 425.56, 462.36, 499.16],
- // CHECK-NEXT: [397.12, 434.72, 472.32, 509.92],
- // CHECK-NEXT: [405.48, 443.88, 482.28, 520.68],
- // CHECK-NEXT: [413.84, 453.04, 492.24, 531.44]]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 16
+ // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12, 16
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+ // CHECK-NEXT: values : ( 388.76, 425.56, 462.36, 499.16, 397.12, 434.72, 472.32, 509.92, 405.48, 443.88, 482.28, 520.68, 413.84, 453.04, 492.24, 531.44
+ // CHECK-NEXT: ----
//
- %c1 = sparse_tensor.convert %1 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
- %c1u = tensor.cast %c1 : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c1u) : (tensor<*xf64>) -> ()
+ sparse_tensor.print %1 : tensor<4x4xf64, #CSR>
//
- // CHECK: {{\[}}[388.76, 425.56, 462.36, 499.16],
- // CHECK-NEXT: [397.12, 434.72, 472.32, 509.92],
- // CHECK-NEXT: [405.48, 443.88, 482.28, 520.68],
- // CHECK-NEXT: [413.84, 453.04, 492.24, 531.44]]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 16
+ // CHECK-NEXT: pos[0] : ( 0, 4
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12, 16
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+ // CHECK-NEXT: values : ( 388.76, 425.56, 462.36, 499.16, 397.12, 434.72, 472.32, 509.92, 405.48, 443.88, 482.28, 520.68, 413.84, 453.04, 492.24, 531.44
+ // CHECK-NEXT: ----
//
- %c2 = sparse_tensor.convert %2 : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
- %c2u = tensor.cast %c2 : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c2u) : (tensor<*xf64>) -> ()
+ sparse_tensor.print %2 : tensor<4x4xf64, #DCSR>
//
// CHECK: {{\[}}[86.08, 94.28, 102.48, 110.68],
@@ -237,24 +300,26 @@ module {
call @printMemrefF64(%u3) : (tensor<*xf64>) -> ()
//
- // CHECK: {{\[}}[86.08, 94.28, 102.48, 110.68],
- // CHECK-NEXT: [0, 0, 0, 0],
- // CHECK-NEXT: [23.46, 25.76, 28.06, 30.36],
- // CHECK-NEXT: [10.8, 11.8, 12.8, 13.8]]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: pos[1] : ( 0, 4, 4, 8, 12
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+ // CHECK-NEXT: values : ( 86.08, 94.28, 102.48, 110.68, 23.46, 25.76, 28.06, 30.36, 10.8, 11.8, 12.8, 13.8
+ // CHECK-NEXT: ----
//
- %c4 = sparse_tensor.convert %4 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
- %c4u = tensor.cast %c4 : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c4u) : (tensor<*xf64>) -> ()
+ sparse_tensor.print %4 : tensor<4x4xf64, #CSR>
//
- // CHECK: {{\[}}[86.08, 94.28, 102.48, 110.68],
- // CHECK-NEXT: [0, 0, 0, 0],
- // CHECK-NEXT: [23.46, 25.76, 28.06, 30.36],
- // CHECK-NEXT: [10.8, 11.8, 12.8, 13.8]]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: pos[0] : ( 0, 3
+ // CHECK-NEXT: crd[0] : ( 0, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+ // CHECK-NEXT: values : ( 86.08, 94.28, 102.48, 110.68, 23.46, 25.76, 28.06, 30.36, 10.8, 11.8, 12.8, 13.8
+ // CHECK-NEXT: ----
//
- %c5 = sparse_tensor.convert %5 : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
- %c5u = tensor.cast %c5 : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c5u) : (tensor<*xf64>) -> ()
+ sparse_tensor.print %5 : tensor<4x4xf64, #DCSR>
//
// CHECK: {{\[}}[0, 30.5, 4.2, 0],
@@ -266,46 +331,26 @@ module {
call @printMemrefF64(%u6) : (tensor<*xf64>) -> ()
//
- // CHECK: {{\[}}[0, 30.5, 4.2, 0],
- // CHECK-NEXT: [0, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 4.6, 0],
- // CHECK-NEXT: [0, 0, 7, 8]]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[1] : ( 0, 2, 2, 3, 5
+ // CHECK-NEXT: crd[1] : ( 1, 2, 2, 2, 3
+ // CHECK-NEXT: values : ( 30.5, 4.2, 4.6, 7, 8
+ // CHECK-NEXT: ----
//
- %c7 = sparse_tensor.convert %7 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
- %c7u = tensor.cast %c7 : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c7u) : (tensor<*xf64>) -> ()
+ sparse_tensor.print %7 : tensor<4x4xf64, #CSR>
//
- // CHECK: {{\[}}[0, 30.5, 4.2, 0],
- // CHECK-NEXT: [0, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 4.6, 0],
- // CHECK-NEXT: [0, 0, 7, 8]]
- //
- %c8 = sparse_tensor.convert %8 : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
- %c8u = tensor.cast %c8 : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c8u) : (tensor<*xf64>) -> ()
-
- //
- // Sanity check on nonzeros.
- //
- // CHECK: [30.5, 4.2, 4.6, 7, 8{{.*}}]
- // CHECK: [30.5, 4.2, 4.6, 7, 8{{.*}}]
- //
- %val7 = sparse_tensor.values %7 : tensor<4x4xf64, #CSR> to memref<?xf64>
- %val8 = sparse_tensor.values %8 : tensor<4x4xf64, #DCSR> to memref<?xf64>
- call @printMemref1dF64(%val7) : (memref<?xf64>) -> ()
- call @printMemref1dF64(%val8) : (memref<?xf64>) -> ()
-
- //
- // Sanity check on stored entries after the computations.
- //
- // CHECK-NEXT: 5
- // CHECK-NEXT: 5
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[0] : ( 0, 3
+ // CHECK-NEXT: crd[0] : ( 0, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3, 5
+ // CHECK-NEXT: crd[1] : ( 1, 2, 2, 2, 3
+ // CHECK-NEXT: values : ( 30.5, 4.2, 4.6, 7, 8
+ // CHECK-NEXT: ----
//
- %noe7 = sparse_tensor.number_of_entries %7 : tensor<4x4xf64, #CSR>
- %noe8 = sparse_tensor.number_of_entries %8 : tensor<4x4xf64, #DCSR>
- vector.print %noe7 : index
- vector.print %noe8 : index
+ sparse_tensor.print %8 : tensor<4x4xf64, #DCSR>
// Release the resources.
bufferization.dealloc_tensor %a1 : tensor<4x8xf64, #CSR>
@@ -316,12 +361,6 @@ module {
bufferization.dealloc_tensor %b2 : tensor<8x4xf64, #DCSR>
bufferization.dealloc_tensor %b3 : tensor<8x4xf64, #CSR>
bufferization.dealloc_tensor %b4 : tensor<8x4xf64, #DCSR>
- bufferization.dealloc_tensor %c1 : tensor<4x4xf64>
- bufferization.dealloc_tensor %c2 : tensor<4x4xf64>
- bufferization.dealloc_tensor %c4 : tensor<4x4xf64>
- bufferization.dealloc_tensor %c5 : tensor<4x4xf64>
- bufferization.dealloc_tensor %c7 : tensor<4x4xf64>
- bufferization.dealloc_tensor %c8 : tensor<4x4xf64>
bufferization.dealloc_tensor %0 : tensor<4x4xf64>
bufferization.dealloc_tensor %1 : tensor<4x4xf64, #CSR>
bufferization.dealloc_tensor %2 : tensor<4x4xf64, #DCSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
index 96c8a30ade8e..b95f72e1a479 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -132,7 +132,7 @@ module {
//
// Main driver.
//
- func.func @entry() {
+ func.func @main() {
%c_0 = arith.constant 0 : index
%c_1 = arith.constant 1 : index
%c_2 = arith.constant 2 : index
@@ -170,14 +170,16 @@ module {
// DCSR test
//
- // CHECK: [0, 30.5, 4.2, 0],
- // CHECK-NEXT: [0, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 4.6, 0],
- // CHECK-NEXT: [0, 0, 7, 8]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[0] : ( 0, 3
+ // CHECK-NEXT: crd[0] : ( 0, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3, 5
+ // CHECK-NEXT: crd[1] : ( 1, 2, 2, 2, 3
+ // CHECK-NEXT: values : ( 30.5, 4.2, 4.6, 7, 8
+ // CHECK-NEXT: ----
//
- %c2 = sparse_tensor.convert %2 : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
- %c2u = tensor.cast %c2 : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c2u) : (tensor<*xf64>) -> ()
+ sparse_tensor.print %2 : tensor<4x4xf64, #DCSR>
%t1 = sparse_tensor.convert %sa : tensor<8x8xf64> to tensor<8x8xf64, #CSR>
%a1 = tensor.extract_slice %t1[0, 0][4, 8][1, 1] : tensor<8x8xf64, #CSR> to tensor<4x8xf64, #CSR_SLICE>
@@ -188,63 +190,63 @@ module {
// CSR test
//
- // CHECK: [0, 30.5, 4.2, 0],
- // CHECK-NEXT: [0, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 4.6, 0],
- // CHECK-NEXT: [0, 0, 7, 8]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[1] : ( 0, 2, 2, 3, 5
+ // CHECK-NEXT: crd[1] : ( 1, 2, 2, 2, 3
+ // CHECK-NEXT: values : ( 30.5, 4.2, 4.6, 7, 8
+ // CHECK-NEXT: ----
//
- %c3 = sparse_tensor.convert %3 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
- %c3u = tensor.cast %c3 : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c3u) : (tensor<*xf64>) -> ()
+ sparse_tensor.print %3 : tensor<4x4xf64, #CSR>
+
// slice x slice
//
- // CHECK: [2.3, 0, 0, 0],
- // CHECK-NEXT: [6.9, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 0, 0],
- // CHECK-NEXT: [12.6, 0, 0, 0]]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 3
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2, 2, 3
+ // CHECK-NEXT: crd[1] : ( 0, 0, 0
+ // CHECK-NEXT: values : ( 2.3, 6.9, 12.6
+ // CHECK-NEXT: ----
//
%s1 = tensor.extract_slice %tmp[0, 1][4, 4][2, 1] : tensor<8x8xf64, #DCSR> to tensor<4x4xf64, #DCSR_SLICE_1>
%s2 = tensor.extract_slice %b1[0, 0][4, 4][2, 1] : tensor<8x4xf64, #CSR> to tensor<4x4xf64, #CSR_SLICE_1>
%4 = call @matmul1(%s2, %s1)
: (tensor<4x4xf64, #CSR_SLICE_1>,
tensor<4x4xf64, #DCSR_SLICE_1>) -> tensor<4x4xf64, #CSR>
- %c4 = sparse_tensor.convert %4 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
- %c4u = tensor.cast %c4 : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c4u) : (tensor<*xf64>) -> ()
+ sparse_tensor.print %4 : tensor<4x4xf64, #CSR>
// slice coo x slice coo
//
- // CHECK: [2.3, 0, 0, 0],
- // CHECK-NEXT: [6.9, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 0, 0],
- // CHECK-NEXT: [12.6, 0, 0, 0]]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 3
+ // CHECK-NEXT: pos[0] : ( 0, 3
+ // CHECK-NEXT: crd[0] : ( 0, 0, 1, 0, 3, 0
+ // CHECK-NEXT: values : ( 2.3, 6.9, 12.6
+ // CHECK-NEXT: ----
//
%t1_coo = sparse_tensor.convert %sa : tensor<8x8xf64> to tensor<8x8xf64, #COO>
%b1_coo = sparse_tensor.convert %sb : tensor<8x4xf64> to tensor<8x4xf64, #COO>
%s2_coo = tensor.extract_slice %b1_coo[0, 0][4, 4][2, 1] : tensor<8x4xf64, #COO> to tensor<4x4xf64, #COO_SLICE_1>
%s1_coo = tensor.extract_slice %t1_coo[0, 1][4, 4][2, 1] : tensor<8x8xf64, #COO> to tensor<4x4xf64, #COO_SLICE_2>
%o_coo = call @matmul5(%s2_coo, %s1_coo) : (tensor<4x4xf64, #COO_SLICE_1>, tensor<4x4xf64, #COO_SLICE_2>) -> tensor<4x4xf64, #COO>
-
- %c4_coo = sparse_tensor.convert %o_coo : tensor<4x4xf64, #COO> to tensor<4x4xf64>
- %c4u_coo = tensor.cast %c4_coo : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c4u_coo) : (tensor<*xf64>) -> ()
+ sparse_tensor.print %o_coo : tensor<4x4xf64, #COO>
// slice x slice (same as above, but with dynamic stride information)
//
- // CHECK: [2.3, 0, 0, 0],
- // CHECK-NEXT: [6.9, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 0, 0],
- // CHECK-NEXT: [12.6, 0, 0, 0]]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 3
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2, 2, 3
+ // CHECK-NEXT: crd[1] : ( 0, 0, 0
+ // CHECK-NEXT: values : ( 2.3, 6.9, 12.6
+ // CHECK-NEXT: ----
//
%s1_dyn = tensor.extract_slice %tmp[%c_0, %c_1][4, 4][%c_2, %c_1] : tensor<8x8xf64, #DCSR> to tensor<4x4xf64, #DCSR_SLICE_dyn>
%s2_dyn = tensor.extract_slice %b1[%c_0, %c_0][4, 4][%c_2, %c_1] : tensor<8x4xf64, #CSR> to tensor<4x4xf64, #CSR_SLICE_dyn>
%dyn_4 = call @matmul_dyn(%s2_dyn, %s1_dyn)
: (tensor<4x4xf64, #CSR_SLICE_dyn>,
tensor<4x4xf64, #DCSR_SLICE_dyn>) -> tensor<4x4xf64, #CSR>
- %c4_dyn = sparse_tensor.convert %dyn_4 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
- %c4u_dyn = tensor.cast %c4_dyn : tensor<4x4xf64> to tensor<*xf64>
- call @printMemrefF64(%c4u_dyn) : (tensor<*xf64>) -> ()
+ sparse_tensor.print %dyn_4 : tensor<4x4xf64, #CSR>
// sparse slices should generate the same result as dense slices
//
@@ -265,11 +267,6 @@ module {
call @printMemrefF64(%du) : (tensor<*xf64>) -> ()
// Releases resources.
- bufferization.dealloc_tensor %c2 : tensor<4x4xf64>
- bufferization.dealloc_tensor %c3 : tensor<4x4xf64>
- bufferization.dealloc_tensor %c4 : tensor<4x4xf64>
- bufferization.dealloc_tensor %c4_coo : tensor<4x4xf64>
- bufferization.dealloc_tensor %c4_dyn : tensor<4x4xf64>
bufferization.dealloc_tensor %d : tensor<4x4xf64>
bufferization.dealloc_tensor %b1 : tensor<8x4xf64, #CSR>
bufferization.dealloc_tensor %t1 : tensor<8x8xf64, #CSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
index 2cecc2420343..2cef46f4cb15 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -61,8 +61,6 @@
}
module {
- func.func private @printMemrefF64(%ptr : tensor<*xf64>)
-
// Scales a sparse matrix into a new sparse matrix.
func.func @matrix_scale(%arga: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
%s = arith.constant 2.0 : f64
@@ -129,17 +127,8 @@ module {
return %0 : tensor<?x?xf64, #DCSR>
}
- // Dump a sparse matrix.
- func.func @dump(%arg0: tensor<?x?xf64, #DCSR>) {
- %dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64>
- %u = tensor.cast %dm : tensor<?x?xf64> to tensor<*xf64>
- call @printMemrefF64(%u) : (tensor<*xf64>) -> ()
- bufferization.dealloc_tensor %dm : tensor<?x?xf64>
- return
- }
-
// Driver method to call and verify matrix kernels.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%d1 = arith.constant 1.1 : f64
@@ -170,37 +159,76 @@ module {
//
// Verify the results.
//
- // CHECK: {{\[}}[1, 2, 0, 0, 0, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 0, 0, 0, 0, 0, 3],
- // CHECK-NEXT: [0, 0, 4, 0, 5, 0, 0, 6],
- // CHECK-NEXT: [7, 0, 8, 9, 0, 0, 0, 0]]
- // CHECK: {{\[}}[6, 0, 0, 0, 0, 0, 0, 5],
- // CHECK-NEXT: [4, 0, 0, 0, 0, 0, 3, 0],
- // CHECK-NEXT: [0, 2, 0, 0, 0, 0, 0, 1],
- // CHECK-NEXT: [0, 0, 0, 0, 0, 0, 0, 0]]
- // CHECK: {{\[}}[2, 4, 0, 0, 0, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 0, 0, 0, 0, 0, 6],
- // CHECK-NEXT: [0, 0, 8, 0, 10, 0, 0, 12],
- // CHECK-NEXT: [14, 0, 16, 18, 0, 0, 0, 0]]
- // CHECK: {{\[}}[2, 4, 0, 0, 0, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 0, 0, 0, 0, 0, 6],
- // CHECK-NEXT: [0, 0, 8, 0, 10, 0, 0, 12],
- // CHECK-NEXT: [14, 0, 16, 18, 0, 0, 0, 0]]
- // CHECK: {{\[}}[8, 4, 0, 0, 0, 0, 0, 5],
- // CHECK-NEXT: [4, 0, 0, 0, 0, 0, 3, 6],
- // CHECK-NEXT: [0, 2, 8, 0, 10, 0, 0, 13],
- // CHECK-NEXT: [14, 0, 16, 18, 0, 0, 0, 0]]
- // CHECK: {{\[}}[12, 0, 0, 0, 0, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 0, 0, 0, 0, 0, 0],
- // CHECK-NEXT: [0, 0, 0, 0, 0, 0, 0, 12],
- // CHECK-NEXT: [0, 0, 0, 0, 0, 0, 0, 0]]
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 9
+ // CHECK-NEXT: pos[0] : ( 0, 4
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9
+ // CHECK-NEXT: crd[1] : ( 0, 1, 7, 2, 4, 7, 0, 2, 3
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %sm1 : tensor<?x?xf64, #DCSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 6
+ // CHECK-NEXT: pos[0] : ( 0, 3
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2
+ // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6
+ // CHECK-NEXT: crd[1] : ( 0, 7, 0, 6, 1, 7
+ // CHECK-NEXT: values : ( 6, 5, 4, 3, 2, 1
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %sm2 : tensor<?x?xf64, #DCSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 9
+ // CHECK-NEXT: pos[0] : ( 0, 4
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9
+ // CHECK-NEXT: crd[1] : ( 0, 1, 7, 2, 4, 7, 0, 2, 3
+ // CHECK-NEXT: values : ( 2, 4, 6, 8, 10, 12, 14, 16, 18
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %0 : tensor<?x?xf64, #DCSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 9
+ // CHECK-NEXT: pos[0] : ( 0, 4
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9
+ // CHECK-NEXT: crd[1] : ( 0, 1, 7, 2, 4, 7, 0, 2, 3
+ // CHECK-NEXT: values : ( 2, 4, 6, 8, 10, 12, 14, 16, 18
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %1 : tensor<?x?xf64, #DCSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 13
+ // CHECK-NEXT: pos[0] : ( 0, 4
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 3, 6, 10, 13
+ // CHECK-NEXT: crd[1] : ( 0, 1, 7, 0, 6, 7, 1, 2, 4, 7, 0, 2, 3
+ // CHECK-NEXT: values : ( 8, 4, 5, 4, 3, 6, 2, 8, 10, 13, 14, 16, 18
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %2 : tensor<?x?xf64, #DCSR>
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 2
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 0, 2
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2
+ // CHECK-NEXT: crd[1] : ( 0, 7
+ // CHECK-NEXT: values : ( 12, 12
+ // CHECK-NEXT: ----
//
- call @dump(%sm1) : (tensor<?x?xf64, #DCSR>) -> ()
- call @dump(%sm2) : (tensor<?x?xf64, #DCSR>) -> ()
- call @dump(%0) : (tensor<?x?xf64, #DCSR>) -> ()
- call @dump(%1) : (tensor<?x?xf64, #DCSR>) -> ()
- call @dump(%2) : (tensor<?x?xf64, #DCSR>) -> ()
- call @dump(%3) : (tensor<?x?xf64, #DCSR>) -> ()
+ sparse_tensor.print %3 : tensor<?x?xf64, #DCSR>
// Release the resources.
bufferization.dealloc_tensor %sm1 : tensor<?x?xf64, #DCSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
new file mode 100755
index 000000000000..79728fdb0f8c
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
@@ -0,0 +1,269 @@
+//--------------------------------------------------------------------------------------------------
+// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
+//
+// Set-up that's shared across all tests in this directory. In principle, this
+// config could be moved to lit.local.cfg. However, there are downstream users that
+// do not use these LIT config files. Hence why this is kept inline.
+//
+// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
+// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
+// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
+// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
+// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
+// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
+// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
+//
+// DEFINE: %{env} =
+//--------------------------------------------------------------------------------------------------
+
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+
+#AllDense = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i : dense,
+ j : dense
+ )
+}>
+
+#AllDenseT = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j : dense,
+ i : dense
+ )
+}>
+
+#CSR = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i : dense,
+ j : compressed
+ )
+}>
+
+#DCSR = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i : compressed,
+ j : compressed
+ )
+}>
+
+#CSC = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j : dense,
+ i : compressed
+ )
+}>
+
+#DCSC = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j : compressed,
+ i : compressed
+ )
+}>
+
+#BSR = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i floordiv 2 : compressed,
+ j floordiv 4 : compressed,
+ i mod 2 : dense,
+ j mod 4 : dense
+ )
+}>
+
+#BSRC = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i floordiv 2 : compressed,
+ j floordiv 4 : compressed,
+ j mod 4 : dense,
+ i mod 2 : dense
+ )
+}>
+
+#BSC = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j floordiv 4 : compressed,
+ i floordiv 2 : compressed,
+ i mod 2 : dense,
+ j mod 4 : dense
+ )
+}>
+
+#BSCC = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j floordiv 4 : compressed,
+ i floordiv 2 : compressed,
+ j mod 4 : dense,
+ i mod 2 : dense
+ )
+}>
+
+#BSR0 = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i floordiv 2 : dense,
+ j floordiv 4 : compressed,
+ i mod 2 : dense,
+ j mod 4 : dense
+ )
+}>
+
+#BSC0 = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j floordiv 4 : dense,
+ i floordiv 2 : compressed,
+ i mod 2 : dense,
+ j mod 4 : dense
+ )
+}>
+
+module {
+
+ //
+ // Main driver that tests sparse tensor storage.
+ //
+ func.func @main() {
+ %x = arith.constant dense <[
+ [ 1, 0, 2, 0, 0, 0, 0, 0 ],
+ [ 0, 0, 0, 0, 0, 0, 0, 0 ],
+ [ 0, 0, 0, 0, 0, 0, 0, 0 ],
+ [ 0, 0, 3, 4, 0, 5, 0, 0 ] ]> : tensor<4x8xi32>
+
+ %XO = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #AllDense>
+ %XT = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #AllDenseT>
+
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %XO : tensor<4x8xi32, #AllDense>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: values : ( 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %XT : tensor<4x8xi32, #AllDenseT>
+
+ %a = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #CSR>
+ %b = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #DCSR>
+ %c = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #CSC>
+ %d = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #DCSC>
+ %e = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSR>
+ %f = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSRC>
+ %g = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSC>
+ %h = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSCC>
+ %i = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSR0>
+ %j = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSC0>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[1] : ( 0, 2, 2, 2, 5,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 2, 3, 5,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %a : tensor<4x8xi32, #CSR>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 0, 3,
+ // CHECK-NEXT: pos[1] : ( 0, 2, 5,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 2, 3, 5,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %b : tensor<4x8xi32, #DCSR>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[1] : ( 0, 1, 1, 3, 4, 4, 5, 5, 5,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 3, 3, 3,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %c : tensor<4x8xi32, #CSC>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[0] : ( 0, 4,
+ // CHECK-NEXT: crd[0] : ( 0, 2, 3, 5,
+ // CHECK-NEXT: pos[1] : ( 0, 1, 3, 4, 5,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 3, 3, 3,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %d : tensor<4x8xi32, #DCSC>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 0, 1,
+ // CHECK-NEXT: pos[1] : ( 0, 1, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 1,
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %e : tensor<4x8xi32, #BSR>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 0, 1,
+ // CHECK-NEXT: pos[1] : ( 0, 1, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 1,
+ // CHECK-NEXT: values : ( 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 4, 0, 0, 0, 5, 0, 0, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %f : tensor<4x8xi32, #BSRC>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 0, 1,
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 1, 1,
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %g : tensor<4x8xi32, #BSC>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 0, 1,
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 1, 1,
+ // CHECK-NEXT: values : ( 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 4, 0, 0, 0, 5, 0, 0, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %h : tensor<4x8xi32, #BSCC>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 1,
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %i : tensor<4x8xi32, #BSR0>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 1, 1,
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %j : tensor<4x8xi32, #BSC0>
+
+ // Release the resources.
+ bufferization.dealloc_tensor %XO : tensor<4x8xi32, #AllDense>
+ bufferization.dealloc_tensor %XT : tensor<4x8xi32, #AllDenseT>
+ bufferization.dealloc_tensor %a : tensor<4x8xi32, #CSR>
+ bufferization.dealloc_tensor %b : tensor<4x8xi32, #DCSR>
+ bufferization.dealloc_tensor %c : tensor<4x8xi32, #CSC>
+ bufferization.dealloc_tensor %d : tensor<4x8xi32, #DCSC>
+ bufferization.dealloc_tensor %e : tensor<4x8xi32, #BSR>
+ bufferization.dealloc_tensor %f : tensor<4x8xi32, #BSRC>
+ bufferization.dealloc_tensor %g : tensor<4x8xi32, #BSC>
+ bufferization.dealloc_tensor %h : tensor<4x8xi32, #BSCC>
+ bufferization.dealloc_tensor %i : tensor<4x8xi32, #BSR0>
+ bufferization.dealloc_tensor %j : tensor<4x8xi32, #BSC0>
+
+ return
+ }
+}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
index b44ffc30c3b1..1860fc1c7027 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -72,22 +72,7 @@ module {
return %0 : tensor<?xf32, #SparseVector>
}
- func.func @dump(%arg0: tensor<?xf32, #SparseVector>) {
- %c0 = arith.constant 0 : index
- %d0 = arith.constant -1.0 : f32
- %n = sparse_tensor.number_of_entries %arg0 : tensor<?xf32, #SparseVector>
- vector.print %n : index
- %values = sparse_tensor.values %arg0 : tensor<?xf32, #SparseVector> to memref<?xf32>
- %0 = vector.transfer_read %values[%c0], %d0: memref<?xf32>, vector<3xf32>
- vector.print %0 : vector<3xf32>
- %coordinates = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<?xf32, #SparseVector> to memref<?xindex>
- %1 = vector.transfer_read %coordinates[%c0], %c0: memref<?xindex>, vector<3xindex>
- vector.print %1 : vector<3xindex>
- return
- }
-
- // Driver method to call and verify functions cim and cre.
- func.func @entry() {
+ func.func @main() {
// Setup sparse vectors.
%v1 = arith.constant sparse<
[ [0], [20], [31] ],
@@ -104,20 +89,27 @@ module {
//
// Verify the results.
//
- // CHECK: 3
- // CHECK-NEXT: ( 5.13, 3, 5 )
- // CHECK-NEXT: ( 0, 20, 31 )
- // CHECK-NEXT: 3
- // CHECK-NEXT: ( 2, 4, 6 )
- // CHECK-NEXT: ( 0, 20, 31 )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 3
+ // CHECK-NEXT: pos[0] : ( 0, 3,
+ // CHECK-NEXT: crd[0] : ( 0, 20, 31,
+ // CHECK-NEXT: values : ( 5.13, 3, 5,
+ // CHECK-NEXT: ----
+ //
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 3
+ // CHECK-NEXT: pos[0] : ( 0, 3,
+ // CHECK-NEXT: crd[0] : ( 0, 20, 31,
+ // CHECK-NEXT: values : ( 2, 4, 6,
+ // CHECK-NEXT: ----
//
- call @dump(%0) : (tensor<?xf32, #SparseVector>) -> ()
- call @dump(%1) : (tensor<?xf32, #SparseVector>) -> ()
+ sparse_tensor.print %0 : tensor<?xf32, #SparseVector>
+ sparse_tensor.print %1 : tensor<?xf32, #SparseVector>
// Release the resources.
bufferization.dealloc_tensor %sv1 : tensor<?xcomplex<f32>, #SparseVector>
- bufferization.dealloc_tensor %0 : tensor<?xf32, #SparseVector>
- bufferization.dealloc_tensor %1 : tensor<?xf32, #SparseVector>
+ bufferization.dealloc_tensor %0 : tensor<?xf32, #SparseVector>
+ bufferization.dealloc_tensor %1 : tensor<?xf32, #SparseVector>
return
}
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/Emulated/test-setArmSVLBits.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/Emulated/test-setArmSVLBits.mlir
index 415181171e27..1794564a6a72 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/Emulated/test-setArmSVLBits.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/Emulated/test-setArmSVLBits.mlir
@@ -12,13 +12,13 @@ func.func @checkSVL() {
%svl_h = arm_sme.streaming_vl <half>
%svl_w = arm_sme.streaming_vl <word>
%svl_d = arm_sme.streaming_vl <double>
- vector.print str "SVL.b"
+ vector.print str "SVL.b\n"
vector.print %svl_b : index
- vector.print str "SVL.h"
+ vector.print str "SVL.h\n"
vector.print %svl_h : index
- vector.print str "SVL.w"
+ vector.print str "SVL.w\n"
vector.print %svl_w : index
- vector.print str "SVL.d"
+ vector.print str "SVL.d\n"
vector.print %svl_d : index
return
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
index 2b8899b6c6fc..41e724844fe4 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
@@ -53,13 +53,13 @@ func.func @test_load_store_zaq0() {
// CHECK-LABEL: INITIAL TILE A:
// CHECK: ( 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 )
- vector.print str "INITIAL TILE A:"
+ vector.print str "INITIAL TILE A:\n"
func.call @print_i8s(%tile_a_bytes, %zaq_size_bytes) : (memref<?xi8>, index) -> ()
vector.print punctuation <newline>
// CHECK-LABEL: INITIAL TILE B:
// CHECK: ( 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64 )
- vector.print str "INITIAL TILE B:"
+ vector.print str "INITIAL TILE B:\n"
func.call @print_i8s(%tile_b_bytes, %zaq_size_bytes) : (memref<?xi8>, index) -> ()
vector.print punctuation <newline>
@@ -68,13 +68,13 @@ func.func @test_load_store_zaq0() {
// CHECK-LABEL: FINAL TILE A:
// CHECK: ( 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 )
- vector.print str "FINAL TILE A:"
+ vector.print str "FINAL TILE A:\n"
func.call @print_i8s(%tile_a_bytes, %zaq_size_bytes) : (memref<?xi8>, index) -> ()
vector.print punctuation <newline>
// CHECK-LABEL: FINAL TILE B:
// CHECK: ( 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 )
- vector.print str "FINAL TILE B:"
+ vector.print str "FINAL TILE B:\n"
func.call @print_i8s(%tile_b_bytes, %zaq_size_bytes) : (memref<?xi8>, index) -> ()
return
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir
index 27be801252b8..68c31ac1dd8e 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir
@@ -49,12 +49,12 @@ func.func @entry() {
// CHECK-NEXT: ( 2, 2, 2, 2
// CHECK-NEXT: ( 3, 3, 3, 3
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
scf.for %i = %c0 to %za_s_size step %svl_s {
%tileslice = vector.load %mem1[%i] : memref<?xi32>, vector<[4]xi32>
vector.print %tileslice : vector<[4]xi32>
}
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
// 2. VERTICAL LAYOUT
// Dump "mem2". The smallest SVL is 128-bits so the tile will be at least
@@ -66,9 +66,9 @@ func.func @entry() {
// CHECK-NEXT: ( 0, 1, 2, 3
// CHECK-NEXT: ( 0, 1, 2, 3
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %0 : vector<[4]x[4]xi32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir
index 9d836d93c85b..cd48f2a9ebfd 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir
@@ -46,12 +46,12 @@ func.func @testTransposedReadWithMask(%maskRows: index, %maskCols: index) {
vector.transfer_write %readTransposed, %outDyn[%c0, %c0] {in_bounds = [true, true]} : vector<[16]x[4]xf32>, memref<?x?xf32>
/// Print the input memref.
- vector.print str "Input memref:"
+ vector.print str "Input memref:\n"
%inUnranked = memref.cast %inDyn : memref<?x?xf32> to memref<*xf32>
call @printMemrefF32(%inUnranked) : (memref<*xf32>) -> ()
/// Print the result memref.
- vector.print str "Masked transposed result:"
+ vector.print str "Masked transposed result:\n"
%outUnranked = memref.cast %outDyn : memref<?x?xf32> to memref<*xf32>
call @printMemrefF32(%outUnranked) : (memref<*xf32>) -> ()
@@ -84,12 +84,12 @@ func.func @testTransposedWriteWithMask(%maskRows: index, %maskCols: index) {
: vector<[16]x[4]xf32>, memref<?x?xf32>
/// Print the input memref.
- vector.print str "Input memref:"
+ vector.print str "Input memref:\n"
%inUnranked = memref.cast %inDyn : memref<?x?xf32> to memref<*xf32>
call @printMemrefF32(%inUnranked) : (memref<*xf32>) -> ()
/// Print the result memref.
- vector.print str "Masked transposed result:"
+ vector.print str "Masked transposed result:\n"
%outUnranked = memref.cast %outDyn : memref<?x?xf32> to memref<*xf32>
call @printMemrefF32(%outUnranked) : (memref<*xf32>) -> ()
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir
index 7e7869d1c957..fb6c06cfd699 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir
@@ -35,9 +35,9 @@ func.func @test_outerproduct_no_accumulator_4x4xf32() {
// WITHOUT-ACC-NEXT: ( 0, 2, 4, 6
// WITHOUT-ACC-NEXT: ( 0, 3, 6, 9
// WITHOUT-ACC: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xf32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -60,9 +60,9 @@ func.func @test_outerproduct_with_accumulator_4x4xf32() {
// WITH-ACC-NEXT: ( 10, 12, 14, 16
// WITH-ACC-NEXT: ( 10, 13, 16, 19
// WITH-ACC: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xf32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -91,9 +91,9 @@ func.func @test_masked_outerproduct_no_accumulator_4x4xf32() {
// WITH-MASK-NEXT: ( 3, 6, 0, 0
// WITH-MASK-NEXT: ( 0, 0, 0, 0
// WITH-MASK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xf32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -124,9 +124,9 @@ func.func @test_masked_outerproduct_with_accumulator_4x4xf32() {
// WITH-MASK-AND-ACC-NEXT: ( 10, 10, 10, 10
// WITH-MASK-AND-ACC-NEXT: ( 10, 10, 10, 10
// WITH-MASK-AND-ACC: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xf32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir
index 46bf799232ae..b8458606d3f3 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir
@@ -40,9 +40,9 @@ func.func @test_outerproduct_no_accumulator_2x2xf64() {
// CHECK-NEXT: ( 1, 2
// CHECK-NEXT: ( 2, 4
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[2]x[2]xf64>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -66,9 +66,9 @@ func.func @test_outerproduct_with_accumulator_2x2xf64() {
// WITH-ACC-NEXT: ( 11, 12
// WITH-ACC-NEXT: ( 12, 14
// WITH-ACC: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[2]x[2]xf64>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -96,9 +96,9 @@ func.func @test_masked_outerproduct_no_accumulator_2x2xf64() {
// WITH-MASK-NEXT: ( 1, 0
// WITH-MASK-NEXT: ( 2, 0
// WITH-MASK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[2]x[2]xf64>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -127,9 +127,9 @@ func.func @test_masked_outerproduct_with_accumulator_2x2xf64() {
// WITH-MASK-AND-ACC-NEXT: ( 11, 12
// WITH-MASK-AND-ACC-NEXT: ( 10, 10
// WITH-MASK-AND-ACC: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[2]x[2]xf64>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir
index 52f56883cad9..7421521b96bf 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir
@@ -14,7 +14,7 @@ func.func @transfer_read_2d(%A : memref<?x?xf32>, %base1: index, %base2: index)
%0 = vector.transfer_read %A[%base1, %base2], %pad {in_bounds=[true, true]} :
memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
@@ -27,7 +27,7 @@ func.func @transfer_read_2d_transposed(%A : memref<?x?xf32>, %base1: index, %bas
{permutation_map = affine_map<(d0, d1) -> (d1, d0)>, in_bounds=[true, true]}
: memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0 : vector<[4]x[4]xf32>
return
@@ -42,7 +42,7 @@ func.func @transfer_read_2d_mask(%A : memref<?x?xf32>, %base1: index, %base2: in
%0 = vector.transfer_read %A[%base1, %base2], %pad, %mask
{in_bounds = [true, true]} : memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
@@ -58,7 +58,7 @@ func.func @transfer_read_2d_mask_transposed(%A : memref<?x?xf32>, %base1: index,
{permutation_map = affine_map<(d0, d1) -> (d1, d0)>, in_bounds=[true, true]}
: memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
@@ -73,7 +73,7 @@ func.func @transfer_read_2d_mask_non_zero_pad(%A : memref<?x?xf32>, %base1: inde
%0 = vector.transfer_read %A[%base1, %base2], %pad, %mask
{in_bounds = [true, true]} : memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
@@ -89,7 +89,7 @@ func.func @transfer_read_2d_mask_non_zero_pad_transposed(%A : memref<?x?xf32>, %
{permutation_map = affine_map<(d0, d1) -> (d1, d0)>, in_bounds=[true, true]}
: memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir
index 710cc6672f00..2fef705861f2 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir
@@ -51,7 +51,7 @@ func.func @transfer_write_2d_mask_transposed(%A : memref<?x?xf32>, %base1: index
func.func @load_and_print(%A : memref<?x?xf32>, %base1: index, %base2: index) {
%0 = vector.load %A[%base1, %base2] : memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir
index 88bc0d0709d4..177c96f1d8aa 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir
@@ -51,9 +51,9 @@ func.func @entry() {
// CHECK-NEXT: ( 2, 2, 2, 2
// CHECK-NEXT: ( 3, 3, 3, 3
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xi32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
// Dump the transposed tile. The smallest SVL is 128-bits so the tile will be
// at least 4x4xi32.
@@ -64,9 +64,9 @@ func.func @entry() {
// CHECK-NEXT: ( 0, 1, 2, 3
// CHECK-NEXT: ( 0, 1, 2, 3
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %transposed_tile : vector<[4]x[4]xi32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir
index e14917486d84..3d74508cd23b 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir
@@ -23,9 +23,9 @@ func.func @entry() -> i32 {
// CHECK-NEXT: ( 123, 123, 123, 123
// CHECK-NEXT: ( 123, 123, 123, 123
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xi32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
%c0_i32 = arith.constant 0 : i32
return %c0_i32 : i32
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir
index b29790db14dd..48080fd0a26a 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir
@@ -255,7 +255,7 @@ func.func @load_store_two_za_s_tiles() -> i32 {
// CHECK-NEXT: ( 1, 1, 1, 1
// CHECK-NEXT: ( 1, 1, 1, 1
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
scf.for %i = %c0 to %size_of_two_tiles step %svl_s {
%av = vector.load %mem2[%i] : memref<?xi32>, vector<[4]xi32>
vector.print %av : vector<[4]xi32>
@@ -263,11 +263,11 @@ func.func @load_store_two_za_s_tiles() -> i32 {
%tileSizeMinusStep = arith.subi %size_of_tile, %svl_s : index
%isNextTile = arith.cmpi eq, %i, %tileSizeMinusStep : index
scf.if %isNextTile {
- vector.print str "TILE END"
- vector.print str "TILE BEGIN"
+ vector.print str "TILE END\n"
+ vector.print str "TILE BEGIN\n"
}
}
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return %c0_i32 : i32
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/Emulated/test-setArmVLBits.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/Emulated/test-setArmVLBits.mlir
index 4f46c6e1ebf6..aa8d0e4d5104 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/Emulated/test-setArmVLBits.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/Emulated/test-setArmVLBits.mlir
@@ -8,7 +8,7 @@
func.func @checkVScale() {
%vscale = vector.vscale
- vector.print str "vscale"
+ vector.print str "vscale = "
vector.print %vscale : index
return
}
@@ -20,28 +20,23 @@ func.func @setAndCheckVL(%bits: i32) {
}
func.func @main() {
- // CHECK: vscale
- // CHECK-NEXT: 1
+ // CHECK: vscale = 1
%c128 = arith.constant 128 : i32
func.call @setAndCheckVL(%c128) : (i32) -> ()
- // CHECK: vscale
- // CHECK-NEXT: 2
+ // CHECK: vscale = 2
%c256 = arith.constant 256 : i32
func.call @setAndCheckVL(%c256) : (i32) -> ()
- // CHECK: vscale
- // CHECK-NEXT: 4
+ // CHECK: vscale = 4
%c512 = arith.constant 512 : i32
func.call @setAndCheckVL(%c512) : (i32) -> ()
- // CHECK: vscale
- // CHECK-NEXT: 8
+ // CHECK: vscale = 8
%c1024 = arith.constant 1024 : i32
func.call @setAndCheckVL(%c1024) : (i32) -> ()
- // CHECK: vscale
- // CHECK-NEXT: 16
+ // CHECK: vscale = 16
%c2048 = arith.constant 2048 : i32
func.call @setAndCheckVL(%c2048) : (i32) -> ()
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/arrays-of-scalable-vectors.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/arrays-of-scalable-vectors.mlir
index c486bf0de5d3..afb23e8e5206 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/arrays-of-scalable-vectors.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/arrays-of-scalable-vectors.mlir
@@ -24,7 +24,7 @@ func.func @read_and_print_2d_vector(%memref: memref<3x?xf32>) {
/// Print each of the vectors.
/// vscale is >= 1, so at least 8 elements will be printed.
- vector.print str "read_and_print_2d_vector()"
+ vector.print str "read_and_print_2d_vector()\n"
// CHECK-LABEL: read_and_print_2d_vector()
// CHECK: ( 8, 8, 8, 8, 8, 8, 8, 8
vector.print %row0 : vector<[8]xf32>
@@ -62,21 +62,21 @@ func.func @add_arrays_of_scalable_vectors(%a: memref<1x2x?xf32>, %b: memref<1x2x
// CHECK-LABEL: Vector A
// CHECK-NEXT: ( 5, 5, 5, 5
// CHECK-NEXT: ( 5, 5, 5, 5
- vector.print str "\nVector A"
+ vector.print str "\nVector A\n"
%vector_a = vector.transfer_read %a[%c0, %c0, %c0], %cst, %mask_a {in_bounds = [true, true, true]} : memref<1x2x?xf32>, vector<1x2x[4]xf32>
func.call @print_1x2xVSCALExf32(%vector_a) : (vector<1x2x[4]xf32>) -> ()
// CHECK-LABEL: Vector B
// CHECK-NEXT: ( 4, 4, 4, 4
// CHECK-NEXT: ( 4, 4, 4, 4
- vector.print str "\nVector B"
+ vector.print str "\nVector B\n"
%vector_b = vector.transfer_read %b[%c0, %c0, %c0], %cst, %mask_b {in_bounds = [true, true, true]} : memref<1x2x?xf32>, vector<1x2x[4]xf32>
func.call @print_1x2xVSCALExf32(%vector_b) : (vector<1x2x[4]xf32>) -> ()
// CHECK-LABEL: Sum
// CHECK-NEXT: ( 9, 9, 9, 9
// CHECK-NEXT: ( 9, 9, 9, 9
- vector.print str "\nSum"
+ vector.print str "\nSum\n"
%sum = arith.addf %vector_a, %vector_b : vector<1x2x[4]xf32>
func.call @print_1x2xVSCALExf32(%sum) : (vector<1x2x[4]xf32>) -> ()
@@ -97,7 +97,7 @@ func.func @entry() {
linalg.fill ins(%f32_8 : f32) outs(%test_1_memref :memref<3x?xf32>)
- vector.print str "=> Print and read 2D arrays of scalable vectors:"
+ vector.print str "=> Print and read 2D arrays of scalable vectors:\n"
func.call @read_and_print_2d_vector(%test_1_memref) : (memref<3x?xf32>) -> ()
vector.print str "\n====================\n"
@@ -109,7 +109,7 @@ func.func @entry() {
linalg.fill ins(%f32_5 : f32) outs(%test_2_memref_a :memref<1x2x?xf32>)
linalg.fill ins(%f32_4 : f32) outs(%test_2_memref_b :memref<1x2x?xf32>)
- vector.print str "=> Reading and adding two 3D arrays of scalable vectors:"
+ vector.print str "=> Reading and adding two 3D arrays of scalable vectors:\n"
func.call @add_arrays_of_scalable_vectors(
%test_2_memref_a, %test_2_memref_b) : (memref<1x2x?xf32>, memref<1x2x?xf32>) -> ()
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-contraction.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-contraction.mlir
index d86ff56d79e3..79121bf31c26 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-contraction.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-contraction.mlir
@@ -1,4 +1,4 @@
-// DEFINE: %{compile} = mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule\
+// DEFINE: %{compile} = mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule\
// DEFINE: -cse -canonicalize -convert-vector-to-scf -arm-sve-legalize-vector-storage\
// DEFINE: -convert-vector-to-llvm="enable-arm-sve" -test-lower-to-llvm -o %t
// DEFINE: %{entry} =
@@ -188,12 +188,14 @@ func.func @matmul_f32() {
return
}
-transform.sequence failures(propagate) {
-^bb1(%module_op: !transform.any_op):
- %f = transform.structured.match ops{["func.func"]} in %module_op
- : (!transform.any_op) -> !transform.any_op
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%module_op: !transform.any_op) {
+ %f = transform.structured.match ops{["func.func"]} in %module_op
+ : (!transform.any_op) -> !transform.any_op
- transform.apply_patterns to %f {
- transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
- } : !transform.any_op
+ transform.apply_patterns to %f {
+ transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
+ } : !transform.any_op
+ transform.yield
+ }
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-print-str.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-print-str.mlir
index 78d6609ccaf9..25a44f22c2dc 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/test-print-str.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/test-print-str.mlir
@@ -7,8 +7,8 @@
func.func @entry() {
// CHECK: Hello, World!
- vector.print str "Hello, World!"
+ vector.print str "Hello, World!\n"
// CHECK-NEXT: Bye!
- vector.print str "Bye!"
+ vector.print str "Bye!\n"
return
}
diff --git a/mlir/test/Target/Cpp/bitwise_operators.mlir b/mlir/test/Target/Cpp/bitwise_operators.mlir
new file mode 100644
index 000000000000..e666359fc82c
--- /dev/null
+++ b/mlir/test/Target/Cpp/bitwise_operators.mlir
@@ -0,0 +1,20 @@
+// RUN: mlir-translate -mlir-to-cpp %s | FileCheck %s
+
+func.func @bitwise(%arg0: i32, %arg1: i32) -> () {
+ %0 = emitc.bitwise_and %arg0, %arg1 : (i32, i32) -> i32
+ %1 = emitc.bitwise_left_shift %arg0, %arg1 : (i32, i32) -> i32
+ %2 = emitc.bitwise_not %arg0 : (i32) -> i32
+ %3 = emitc.bitwise_or %arg0, %arg1 : (i32, i32) -> i32
+ %4 = emitc.bitwise_right_shift %arg0, %arg1 : (i32, i32) -> i32
+ %5 = emitc.bitwise_xor %arg0, %arg1 : (i32, i32) -> i32
+
+ return
+}
+
+// CHECK-LABEL: void bitwise
+// CHECK-NEXT: int32_t [[V2:[^ ]*]] = [[V0:[^ ]*]] & [[V1:[^ ]*]];
+// CHECK-NEXT: int32_t [[V3:[^ ]*]] = [[V0]] << [[V1]];
+// CHECK-NEXT: int32_t [[V4:[^ ]*]] = ~[[V0]];
+// CHECK-NEXT: int32_t [[V5:[^ ]*]] = [[V0]] | [[V1]];
+// CHECK-NEXT: int32_t [[V6:[^ ]*]] = [[V0]] >> [[V1]];
+// CHECK-NEXT: int32_t [[V7:[^ ]*]] = [[V0]] ^ [[V1]];
diff --git a/mlir/test/Target/Cpp/logical_operators.mlir b/mlir/test/Target/Cpp/logical_operators.mlir
new file mode 100644
index 000000000000..7083dc218fca
--- /dev/null
+++ b/mlir/test/Target/Cpp/logical_operators.mlir
@@ -0,0 +1,14 @@
+// RUN: mlir-translate -mlir-to-cpp %s | FileCheck %s
+
+func.func @logical(%arg0: i32, %arg1: i32) -> () {
+ %0 = emitc.logical_and %arg0, %arg1 : i32, i32
+ %1 = emitc.logical_not %arg0 : i32
+ %2 = emitc.logical_or %arg0, %arg1 : i32, i32
+
+ return
+}
+
+// CHECK-LABEL: void logical
+// CHECK-NEXT: bool [[V2:[^ ]*]] = [[V0:[^ ]*]] && [[V1:[^ ]*]];
+// CHECK-NEXT: bool [[V3:[^ ]*]] = ![[V0]];
+// CHECK-NEXT: bool [[V4:[^ ]*]] = [[V0]] || [[V1]];
diff --git a/mlir/test/Target/LLVMIR/attribute-alias-scopes.mlir b/mlir/test/Target/LLVMIR/attribute-alias-scopes.mlir
index 4434aea4ec96..fa3395533af2 100644
--- a/mlir/test/Target/LLVMIR/attribute-alias-scopes.mlir
+++ b/mlir/test/Target/LLVMIR/attribute-alias-scopes.mlir
@@ -59,14 +59,48 @@ llvm.func @alias_scopes(%arg1 : !llvm.ptr) {
#alias_scope1 = #llvm.alias_scope<id = distinct[1]<>, domain = #alias_scope_domain>
// CHECK-LABEL: @noalias_intr_only
-llvm.func @noalias_intr_only(%arg1 : !llvm.ptr) {
- %0 = llvm.mlir.constant(0 : i32) : i32
- // CHECK: call void @llvm.experimental.noalias.scope.decl(metadata ![[SCOPES1:[0-9]+]])
+llvm.func @noalias_intr_only() {
+ // CHECK: call void @llvm.experimental.noalias.scope.decl(metadata ![[SCOPES:[0-9]+]])
llvm.intr.experimental.noalias.scope.decl #alias_scope1
llvm.return
}
// Check the translated metadata.
// CHECK-DAG: ![[DOMAIN:[0-9]+]] = distinct !{![[DOMAIN]], !"The domain"}
-// CHECK-DAG: ![[SCOPE1:[0-9]+]] = distinct !{![[SCOPE1]], ![[DOMAIN]]}
-// CHECK-DAG: ![[SCOPES1]] = !{![[SCOPE1]]}
+// CHECK-DAG: ![[SCOPE:[0-9]+]] = distinct !{![[SCOPE]], ![[DOMAIN]]}
+// CHECK-DAG: ![[SCOPES]] = !{![[SCOPE]]}
+
+// -----
+
+// This test ensures the alias scope translation creates a temporary metadata
+// node as a placeholder for self-references. Without this, the debug info
+// translation of a type list with a null entry could inadvertently reference
+// access group metadata. This occurs when both translations generate a metadata
+// list with a null entry, which are then uniqued to the same metadata node.
+// The access group translation subsequently updates the null entry to a
+// self-reference, which causes the type list to reference the access
+// group node as well. The use of a temporary placeholder node avoids the issue.
+
+#alias_scope_domain = #llvm.alias_scope_domain<id = distinct[0]<>>
+#alias_scope = #llvm.alias_scope<id = distinct[1]<>, domain = #alias_scope_domain>
+
+#di_null_type = #llvm.di_null_type
+#di_subroutine_type = #llvm.di_subroutine_type<types = #di_null_type>
+#di_file = #llvm.di_file<"attribute-alias-scope.mlir" in "">
+#di_compile_unit = #llvm.di_compile_unit<id = distinct[3]<>, sourceLanguage = DW_LANG_C11, file = #di_file, isOptimized = true, emissionKind = Full>
+#di_subprogram = #llvm.di_subprogram<id = distinct[2]<>, compileUnit = #di_compile_unit, scope = #di_file, file = #di_file, subprogramFlags = "Definition", type = #di_subroutine_type>
+
+// CHECK-LABEL: @self_reference
+llvm.func @self_reference() {
+ // CHECK: call void @llvm.experimental.noalias.scope.decl(metadata ![[SCOPES:[0-9]+]])
+ llvm.intr.experimental.noalias.scope.decl #alias_scope
+ llvm.return
+} loc(fused<#di_subprogram>[unknown])
+
+// Check that the translated subroutine types do not reference the access group
+// domain since both of them are created as metadata list with a null entry.
+// CHECK-DAG: ![[DOMAIN:[0-9]+]] = distinct !{![[DOMAIN]]}
+// CHECK-DAG: ![[SCOPE:[0-9]+]] = distinct !{![[SCOPE]], ![[DOMAIN]]}
+// CHECK-DAG: ![[SCOPES]] = !{![[SCOPE]]}
+// CHECK-DAG: = !DISubroutineType(types: ![[TYPES:[0-9]+]])
+// CHECK-DAG: ![[TYPES]] = !{null}
diff --git a/mlir/test/Target/LLVMIR/erase-dangling-constants.mlir b/mlir/test/Target/LLVMIR/erase-dangling-constants.mlir
deleted file mode 100644
index dbb675595600..000000000000
--- a/mlir/test/Target/LLVMIR/erase-dangling-constants.mlir
+++ /dev/null
@@ -1,73 +0,0 @@
-// REQUIRES: asserts
-// RUN: mlir-translate -mlir-to-llvmir %s -debug-only=llvm-dialect-to-llvm-ir 2>&1 | FileCheck %s
-
-// CHECK: Convert initializer for dup_const
-// CHECK: 6 new constants hit
-// CHECK: 3 dangling constants erased
-// CHECK: Convert initializer for unique_const
-// CHECK: 6 new constants hit
-// CHECK: 5 dangling constants erased
-
-
-// CHECK:@dup_const = global { [2 x double], [2 x double], [2 x double] } { [2 x double] [double 3.612250e-02, double 5.119230e-02], [2 x double] [double 3.612250e-02, double 5.119230e-02], [2 x double] [double 3.612250e-02, double 5.119230e-02] }
-
-llvm.mlir.global @dup_const() : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)> {
- %c0 = llvm.mlir.constant(3.612250e-02 : f64) : f64
- %c1 = llvm.mlir.constant(5.119230e-02 : f64) : f64
-
- %empty0 = llvm.mlir.undef : !llvm.array<2 x f64>
- %a00 = llvm.insertvalue %c0, %empty0[0] : !llvm.array<2 x f64>
-
- %empty1 = llvm.mlir.undef : !llvm.array<2 x f64>
- %a10 = llvm.insertvalue %c0, %empty1[0] : !llvm.array<2 x f64>
-
- %empty2 = llvm.mlir.undef : !llvm.array<2 x f64>
- %a20 = llvm.insertvalue %c0, %empty2[0] : !llvm.array<2 x f64>
-
-// NOTE: a00, a10, a20 are all same ConstantAggregate which not used at this point.
-// should not delete it before all of the uses of the ConstantAggregate finished.
-
- %a01 = llvm.insertvalue %c1, %a00[1] : !llvm.array<2 x f64>
- %a11 = llvm.insertvalue %c1, %a10[1] : !llvm.array<2 x f64>
- %a21 = llvm.insertvalue %c1, %a20[1] : !llvm.array<2 x f64>
- %empty_r = llvm.mlir.undef : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)>
- %r0 = llvm.insertvalue %a01, %empty_r[0] : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)>
- %r1 = llvm.insertvalue %a11, %r0[1] : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)>
- %r2 = llvm.insertvalue %a21, %r1[2] : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)>
-
- llvm.return %r2 : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)>
- }
-
-// CHECK:@unique_const = global { [2 x double], [2 x double], [2 x double] } { [2 x double] [double 3.612250e-02, double 5.119230e-02], [2 x double] [double 3.312250e-02, double 5.219230e-02], [2 x double] [double 3.412250e-02, double 5.419230e-02] }
-
-llvm.mlir.global @unique_const() : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)> {
- %c0 = llvm.mlir.constant(3.612250e-02 : f64) : f64
- %c1 = llvm.mlir.constant(5.119230e-02 : f64) : f64
-
- %c2 = llvm.mlir.constant(3.312250e-02 : f64) : f64
- %c3 = llvm.mlir.constant(5.219230e-02 : f64) : f64
-
- %c4 = llvm.mlir.constant(3.412250e-02 : f64) : f64
- %c5 = llvm.mlir.constant(5.419230e-02 : f64) : f64
-
- %2 = llvm.mlir.undef : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)>
-
- %3 = llvm.mlir.undef : !llvm.array<2 x f64>
-
- %4 = llvm.insertvalue %c0, %3[0] : !llvm.array<2 x f64>
- %5 = llvm.insertvalue %c1, %4[1] : !llvm.array<2 x f64>
-
- %6 = llvm.insertvalue %5, %2[0] : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)>
-
- %7 = llvm.insertvalue %c2, %3[0] : !llvm.array<2 x f64>
- %8 = llvm.insertvalue %c3, %7[1] : !llvm.array<2 x f64>
-
- %9 = llvm.insertvalue %8, %6[1] : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)>
-
- %10 = llvm.insertvalue %c4, %3[0] : !llvm.array<2 x f64>
- %11 = llvm.insertvalue %c5, %10[1] : !llvm.array<2 x f64>
-
- %12 = llvm.insertvalue %11, %9[2] : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)>
-
- llvm.return %12 : !llvm.struct<(array<2 x f64>, array<2 x f64>, array<2 x f64>)>
-}
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
index 39a1e036e85c..12bd108ba86c 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
@@ -2186,6 +2186,43 @@ llvm.func @single_nowait(%x: i32, %y: i32, %zaddr: !llvm.ptr) {
// -----
+llvm.func @copy_i32(!llvm.ptr, !llvm.ptr)
+llvm.func @copy_f32(!llvm.ptr, !llvm.ptr)
+
+// CHECK-LABEL: @single_copyprivate
+// CHECK-SAME: (ptr %[[ip:.*]], ptr %[[fp:.*]])
+llvm.func @single_copyprivate(%ip: !llvm.ptr, %fp: !llvm.ptr) {
+ // CHECK: %[[didit_addr:.*]] = alloca i32
+ // CHECK: store i32 0, ptr %[[didit_addr]]
+ // CHECK: call i32 @__kmpc_single
+ omp.single copyprivate(%ip -> @copy_i32 : !llvm.ptr, %fp -> @copy_f32 : !llvm.ptr) {
+ // CHECK: %[[i:.*]] = load i32, ptr %[[ip]]
+ %i = llvm.load %ip : !llvm.ptr -> i32
+ // CHECK: %[[i2:.*]] = add i32 %[[i]], %[[i]]
+ %i2 = llvm.add %i, %i : i32
+ // CHECK: store i32 %[[i2]], ptr %[[ip]]
+ llvm.store %i2, %ip : i32, !llvm.ptr
+ // CHECK: %[[f:.*]] = load float, ptr %[[fp]]
+ %f = llvm.load %fp : !llvm.ptr -> f32
+ // CHECK: %[[f2:.*]] = fadd float %[[f]], %[[f]]
+ %f2 = llvm.fadd %f, %f : f32
+ // CHECK: store float %[[f2]], ptr %[[fp]]
+ llvm.store %f2, %fp : f32, !llvm.ptr
+ // CHECK: store i32 1, ptr %[[didit_addr]]
+ // CHECK: call void @__kmpc_end_single
+ // CHECK: %[[didit:.*]] = load i32, ptr %[[didit_addr]]
+ // CHECK: call void @__kmpc_copyprivate({{.*}}, ptr %[[ip]], ptr @copy_i32, i32 %[[didit]])
+ // CHECK: %[[didit2:.*]] = load i32, ptr %[[didit_addr]]
+ // CHECK: call void @__kmpc_copyprivate({{.*}}, ptr %[[fp]], ptr @copy_f32, i32 %[[didit2]])
+ // CHECK-NOT: call void @__kmpc_barrier
+ omp.terminator
+ }
+ // CHECK: ret void
+ llvm.return
+}
+
+// -----
+
// CHECK: @_QFsubEx = internal global i32 undef
// CHECK: @_QFsubEx.cache = common global ptr null
diff --git a/mlir/test/Target/LLVMIR/openmp-private.mlir b/mlir/test/Target/LLVMIR/openmp-private.mlir
new file mode 100644
index 000000000000..58bda87c3b7b
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/openmp-private.mlir
@@ -0,0 +1,142 @@
+// Test code-gen for `omp.parallel` ops with delayed privatizers (i.e. using
+// `omp.private` ops).
+
+// RUN: mlir-translate -mlir-to-llvmir -split-input-file %s | FileCheck %s
+
+llvm.func @parallel_op_1_private(%arg0: !llvm.ptr) {
+ omp.parallel private(@x.privatizer %arg0 -> %arg2 : !llvm.ptr) {
+ %0 = llvm.load %arg2 : !llvm.ptr -> f32
+ omp.terminator
+ }
+ llvm.return
+}
+
+// CHECK-LABEL: @parallel_op_1_private
+// CHECK-SAME: (ptr %[[ORIG:.*]]) {
+// CHECK: %[[OMP_PAR_ARG:.*]] = alloca { ptr }, align 8
+// CHECK: %[[ORIG_GEP:.*]] = getelementptr { ptr }, ptr %[[OMP_PAR_ARG]], i32 0, i32 0
+// CHECK: store ptr %[[ORIG]], ptr %[[ORIG_GEP]], align 8
+// CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 1, ptr @parallel_op_1_private..omp_par, ptr %[[OMP_PAR_ARG]])
+// CHECK: }
+
+// CHECK-LABEL: void @parallel_op_1_private..omp_par
+// CHECK-SAME: (ptr noalias %{{.*}}, ptr noalias %{{.*}}, ptr %[[ARG:.*]])
+// CHECK: %[[ORIG_PTR_PTR:.*]] = getelementptr { ptr }, ptr %[[ARG]], i32 0, i32 0
+// CHECK: %[[ORIG_PTR:.*]] = load ptr, ptr %[[ORIG_PTR_PTR]], align 8
+
+// Check that the privatizer alloc region was inlined properly.
+// CHECK: %[[PRIV_ALLOC:.*]] = alloca float, align 4
+// CHECK: %[[ORIG_VAL:.*]] = load float, ptr %[[ORIG_PTR]], align 4
+// CHECK: store float %[[ORIG_VAL]], ptr %[[PRIV_ALLOC]], align 4
+// CHECK-NEXT: br
+
+// Check that the privatized value is used (rather than the original one).
+// CHECK: load float, ptr %[[PRIV_ALLOC]], align 4
+// CHECK: }
+
+llvm.func @parallel_op_2_privates(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
+ omp.parallel private(@x.privatizer %arg0 -> %arg2 : !llvm.ptr, @y.privatizer %arg1 -> %arg3 : !llvm.ptr) {
+ %0 = llvm.load %arg2 : !llvm.ptr -> f32
+ %1 = llvm.load %arg3 : !llvm.ptr -> i32
+ omp.terminator
+ }
+ llvm.return
+}
+
+// CHECK-LABEL: @parallel_op_2_privates
+// CHECK-SAME: (ptr %[[ORIG1:.*]], ptr %[[ORIG2:.*]]) {
+// CHECK: %[[OMP_PAR_ARG:.*]] = alloca { ptr, ptr }, align 8
+// CHECK: %[[ORIG1_GEP:.*]] = getelementptr { ptr, ptr }, ptr %[[OMP_PAR_ARG]], i32 0, i32 0
+// CHECK: store ptr %[[ORIG1]], ptr %[[ORIG1_GEP]], align 8
+// CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 1, ptr @parallel_op_2_privates..omp_par, ptr %[[OMP_PAR_ARG]])
+// CHECK: }
+
+// CHECK-LABEL: void @parallel_op_2_privates..omp_par
+// CHECK-SAME: (ptr noalias %{{.*}}, ptr noalias %{{.*}}, ptr %[[ARG:.*]])
+// CHECK: %[[ORIG1_PTR_PTR:.*]] = getelementptr { ptr, ptr }, ptr %[[ARG]], i32 0, i32 0
+// CHECK: %[[ORIG1_PTR:.*]] = load ptr, ptr %[[ORIG1_PTR_PTR]], align 8
+// CHECK: %[[ORIG2_PTR_PTR:.*]] = getelementptr { ptr, ptr }, ptr %[[ARG]], i32 0, i32 1
+// CHECK: %[[ORIG2_PTR:.*]] = load ptr, ptr %[[ORIG2_PTR_PTR]], align 8
+
+// Check that the privatizer alloc region was inlined properly.
+// CHECK: %[[PRIV1_ALLOC:.*]] = alloca float, align 4
+// CHECK: %[[ORIG1_VAL:.*]] = load float, ptr %[[ORIG1_PTR]], align 4
+// CHECK: store float %[[ORIG1_VAL]], ptr %[[PRIV1_ALLOC]], align 4
+// CHECK: %[[PRIV2_ALLOC:.*]] = alloca i32, align 4
+// CHECK: %[[ORIG2_VAL:.*]] = load i32, ptr %[[ORIG2_PTR]], align 4
+// CHECK: store i32 %[[ORIG2_VAL]], ptr %[[PRIV2_ALLOC]], align 4
+// CHECK-NEXT: br
+
+// Check that the privatized value is used (rather than the original one).
+// CHECK: load float, ptr %[[PRIV1_ALLOC]], align 4
+// CHECK: load i32, ptr %[[PRIV2_ALLOC]], align 4
+// CHECK: }
+
+omp.private {type = private} @x.privatizer : !llvm.ptr alloc {
+^bb0(%arg0: !llvm.ptr):
+ %c1 = llvm.mlir.constant(1 : i32) : i32
+ %0 = llvm.alloca %c1 x f32 : (i32) -> !llvm.ptr
+ %1 = llvm.load %arg0 : !llvm.ptr -> f32
+ llvm.store %1, %0 : f32, !llvm.ptr
+ omp.yield(%0 : !llvm.ptr)
+}
+
+omp.private {type = private} @y.privatizer : !llvm.ptr alloc {
+^bb0(%arg0: !llvm.ptr):
+ %c1 = llvm.mlir.constant(1 : i32) : i32
+ %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
+ %1 = llvm.load %arg0 : !llvm.ptr -> i32
+ llvm.store %1, %0 : i32, !llvm.ptr
+ omp.yield(%0 : !llvm.ptr)
+}
+
+// -----
+
+llvm.func @parallel_op_private_multi_block(%arg0: !llvm.ptr) {
+ omp.parallel private(@multi_block.privatizer %arg0 -> %arg2 : !llvm.ptr) {
+ %0 = llvm.load %arg2 : !llvm.ptr -> f32
+ omp.terminator
+ }
+ llvm.return
+}
+
+// CHECK-LABEL: define internal void @parallel_op_private_multi_block..omp_par
+// CHECK: omp.par.entry:
+// CHECK: %[[ORIG_PTR_PTR:.*]] = getelementptr { ptr }, ptr %{{.*}}, i32 0, i32 0
+// CHECK: %[[ORIG_PTR:.*]] = load ptr, ptr %[[ORIG_PTR_PTR]], align 8
+// CHECK: br label %[[PRIV_BB1:.*]]
+
+// Check contents of the first block in the `alloc` region.
+// CHECK: [[PRIV_BB1]]:
+// CHECK-NEXT: %[[PRIV_ALLOC:.*]] = alloca float, align 4
+// CHECK-NEXT: br label %[[PRIV_BB2:.*]]
+
+// Check contents of the second block in the `alloc` region.
+// CHECK: [[PRIV_BB2]]:
+// CHECK-NEXT: %[[ORIG_PTR2:.*]] = phi ptr [ %[[ORIG_PTR]], %[[PRIV_BB1]] ]
+// CHECK-NEXT: %[[PRIV_ALLOC2:.*]] = phi ptr [ %[[PRIV_ALLOC]], %[[PRIV_BB1]] ]
+// CHECK-NEXT: %[[ORIG_VAL:.*]] = load float, ptr %[[ORIG_PTR2]], align 4
+// CHECK-NEXT: store float %[[ORIG_VAL]], ptr %[[PRIV_ALLOC2]], align 4
+// CHECK-NEXT: br label %[[PRIV_CONT:.*]]
+
+// Check that the privatizer's continuation block yileds the private clone's
+// address.
+// CHECK: [[PRIV_CONT]]:
+// CHECK-NEXT: %[[PRIV_ALLOC3:.*]] = phi ptr [ %[[PRIV_ALLOC2]], %[[PRIV_BB2]] ]
+// CHECK-NEXT: br label %[[PAR_REG:.*]]
+
+// Check that the body of the parallel region loads from the private clone.
+// CHECK: [[PAR_REG]]:
+// CHECK: %{{.*}} = load float, ptr %[[PRIV_ALLOC3]], align 4
+
+omp.private {type = private} @multi_block.privatizer : !llvm.ptr alloc {
+^bb0(%arg0: !llvm.ptr):
+ %c1 = llvm.mlir.constant(1 : i32) : i32
+ %0 = llvm.alloca %c1 x f32 : (i32) -> !llvm.ptr
+ llvm.br ^bb1(%arg0, %0 : !llvm.ptr, !llvm.ptr)
+
+^bb1(%arg1: !llvm.ptr, %arg2: !llvm.ptr):
+ %1 = llvm.load %arg1 : !llvm.ptr -> f32
+ llvm.store %1, %arg2 : f32, !llvm.ptr
+ omp.yield(%arg2 : !llvm.ptr)
+}
diff --git a/mlir/test/lib/Dialect/Affine/CMakeLists.txt b/mlir/test/lib/Dialect/Affine/CMakeLists.txt
index af9f312694ab..14960a45d39b 100644
--- a/mlir/test/lib/Dialect/Affine/CMakeLists.txt
+++ b/mlir/test/lib/Dialect/Affine/CMakeLists.txt
@@ -3,6 +3,7 @@ add_mlir_library(MLIRAffineTransformsTestPasses
TestAffineDataCopy.cpp
TestAffineLoopUnswitching.cpp
TestAffineLoopParametricTiling.cpp
+ TestAccessAnalysis.cpp
TestDecomposeAffineOps.cpp
TestReifyValueBounds.cpp
TestLoopFusion.cpp
@@ -21,6 +22,7 @@ add_mlir_library(MLIRAffineTransformsTestPasses
LINK_LIBS PUBLIC
MLIRArithTransforms
+ MLIRAffineAnalysis
MLIRAffineTransforms
MLIRAffineUtils
MLIRIR
diff --git a/mlir/test/lib/Dialect/Affine/TestAccessAnalysis.cpp b/mlir/test/lib/Dialect/Affine/TestAccessAnalysis.cpp
new file mode 100644
index 000000000000..b38046299d50
--- /dev/null
+++ b/mlir/test/lib/Dialect/Affine/TestAccessAnalysis.cpp
@@ -0,0 +1,83 @@
+//===- TestAccessAnalysis.cpp - Test affine access analysis utility -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a pass to test affine access analysis utilities.
+//
+//===----------------------------------------------------------------------===//
+#include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
+#include "mlir/Dialect/Affine/Analysis/Utils.h"
+#include "mlir/Dialect/Affine/LoopFusionUtils.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Pass/Pass.h"
+
+#define PASS_NAME "test-affine-access-analysis"
+
+using namespace mlir;
+using namespace mlir::affine;
+
+namespace {
+
+struct TestAccessAnalysis
+ : public PassWrapper<TestAccessAnalysis, OperationPass<func::FuncOp>> {
+ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAccessAnalysis)
+
+ StringRef getArgument() const final { return PASS_NAME; }
+ StringRef getDescription() const final {
+ return "Tests affine memory access analysis utility";
+ }
+
+ void runOnOperation() override;
+};
+
+} // namespace
+
+/// Gathers all affine load/store ops in loop nest rooted at 'forOp' into
+/// 'loadAndStoreOps'.
+static void
+gatherLoadsAndStores(AffineForOp forOp,
+ SmallVectorImpl<Operation *> &loadAndStoreOps) {
+ forOp.walk([&](Operation *op) {
+ if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op))
+ loadAndStoreOps.push_back(op);
+ });
+}
+
+void TestAccessAnalysis::runOnOperation() {
+ SmallVector<Operation *> loadStores;
+ SmallVector<AffineForOp> enclosingOps;
+ // Go over all top-level affine.for ops and test each contained affine
+ // access's contiguity along every surrounding loop IV.
+ for (auto forOp : getOperation().getOps<AffineForOp>()) {
+ loadStores.clear();
+ gatherLoadsAndStores(forOp, loadStores);
+ for (Operation *memOp : loadStores) {
+ enclosingOps.clear();
+ getAffineForIVs(*memOp, &enclosingOps);
+ for (unsigned d = 0, e = enclosingOps.size(); d < e; d++) {
+ int memRefDim;
+ bool isContiguous;
+ if (auto read = dyn_cast<AffineReadOpInterface>(memOp)) {
+ isContiguous = isContiguousAccess(enclosingOps[d].getInductionVar(),
+ read, &memRefDim);
+ } else {
+ isContiguous = isContiguousAccess(enclosingOps[d].getInductionVar(),
+ cast<AffineWriteOpInterface>(memOp),
+ &memRefDim);
+ }
+ if (isContiguous && memRefDim == 0)
+ memOp->emitRemark("contiguous along loop ") << d << '\n';
+ }
+ }
+ }
+}
+
+namespace mlir {
+void registerTestAffineAccessAnalysisPass() {
+ PassRegistration<TestAccessAnalysis>();
+}
+} // namespace mlir
diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
index bde4255ee4b3..abc0e43c7b7f 100644
--- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp
+++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
@@ -1768,7 +1768,6 @@ struct TestMergeSingleBlockOps
rewriter.inlineBlockBefore(&innerBlock, op);
rewriter.eraseOp(innerTerminator);
rewriter.eraseOp(op);
- rewriter.modifyOpInPlace(op, [] {});
return success();
}
};
diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
index 178a58e796b2..915f713f7047 100644
--- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
@@ -630,15 +630,13 @@ struct TestVectorDistribution
});
MLIRContext *ctx = &getContext();
auto distributionFn = [](Value val) {
- // Create a map (d0, d1) -> (d1) to distribute along the inner
- // dimension. Once we support n-d distribution we can add more
- // complex cases.
+ // Create an identity dim map of the same rank as the vector.
VectorType vecType = dyn_cast<VectorType>(val.getType());
int64_t vecRank = vecType ? vecType.getRank() : 0;
OpBuilder builder(val.getContext());
if (vecRank == 0)
return AffineMap::get(val.getContext());
- return AffineMap::get(vecRank, 0, builder.getAffineDimExpr(vecRank - 1));
+ return AffineMap::getMultiDimIdentityMap(vecRank, val.getContext());
};
auto shuffleFn = [](Location loc, OpBuilder &builder, Value val,
Value srcIdx, int64_t warpSz) {
diff --git a/mlir/test/lit.cfg.py b/mlir/test/lit.cfg.py
index 904dfb680a04..7636ef30c2d3 100644
--- a/mlir/test/lit.cfg.py
+++ b/mlir/test/lit.cfg.py
@@ -161,6 +161,7 @@ tools.extend(
ToolSubst("transform-opt-ch2", unresolved="ignore"),
ToolSubst("transform-opt-ch3", unresolved="ignore"),
ToolSubst("transform-opt-ch4", unresolved="ignore"),
+ ToolSubst("mlir-transform-opt", unresolved="ignore"),
ToolSubst("%mlir_lib_dir", config.mlir_lib_dir, unresolved="ignore"),
ToolSubst("%mlir_src_dir", config.mlir_src_root, unresolved="ignore"),
]
diff --git a/mlir/test/mlir-cpu-runner/expand-arith-ops.mlir b/mlir/test/mlir-cpu-runner/expand-arith-ops.mlir
index 44141cc4eeaf..2b62b8c0bb14 100644
--- a/mlir/test/mlir-cpu-runner/expand-arith-ops.mlir
+++ b/mlir/test/mlir-cpu-runner/expand-arith-ops.mlir
@@ -13,10 +13,21 @@ func.func @trunc_bf16(%a : f32) {
}
func.func @main() {
- // CHECK: 1.00781
- %roundOneI = arith.constant 0x3f808000 : i32
- %roundOneF = arith.bitcast %roundOneI : i32 to f32
- call @trunc_bf16(%roundOneF): (f32) -> ()
+ // Note: this is a tie (low 16 bits are 0x8000). We expect the rounding behavior
+ // to break ties "to nearest-even", which in this case means downwards,
+ // since bit 16 is not set.
+ // CHECK: 1
+ %value_1_00391_I = arith.constant 0x3f808000 : i32
+ %value_1_00391_F = arith.bitcast %value_1_00391_I : i32 to f32
+ call @trunc_bf16(%value_1_00391_F): (f32) -> ()
+
+ // Note: this is a tie (low 16 bits are 0x8000). We expect the rounding behavior
+ // to break ties "to nearest-even", which in this case means upwards,
+ // since bit 16 is set.
+ // CHECK-NEXT: 1.0156
+ %value_1_01172_I = arith.constant 0x3f818000 : i32
+ %value_1_01172_F = arith.bitcast %value_1_01172_I : i32 to f32
+ call @trunc_bf16(%value_1_01172_F): (f32) -> ()
// CHECK-NEXT: -1
%noRoundNegOneI = arith.constant 0xbf808000 : i32
@@ -38,15 +49,27 @@ func.func @main() {
%neginff = arith.bitcast %neginfi : i32 to f32
call @trunc_bf16(%neginff): (f32) -> ()
+ // Note: this rounds upwards. As the mantissa was already saturated, this rounding
+ // causes the exponent to be incremented. As the exponent was already the
+ // maximum exponent value for finite values, this increment of the exponent
+ // causes this to overflow to +inf.
+ // CHECK-NEXT: inf
+ %big_overflowing_i = arith.constant 0x7f7fffff : i32
+ %big_overflowing_f = arith.bitcast %big_overflowing_i : i32 to f32
+ call @trunc_bf16(%big_overflowing_f): (f32) -> ()
+
+ // Same as the previous testcase but negative.
+ // CHECK-NEXT: -inf
+ %negbig_overflowing_i = arith.constant 0xff7fffff : i32
+ %negbig_overflowing_f = arith.bitcast %negbig_overflowing_i : i32 to f32
+ call @trunc_bf16(%negbig_overflowing_f): (f32) -> ()
+
+ // In contrast to the previous two testcases, the upwards-rounding here
+ // does not cause overflow.
// CHECK-NEXT: 3.38953e+38
- %bigi = arith.constant 0x7f7fffff : i32
- %bigf = arith.bitcast %bigi : i32 to f32
- call @trunc_bf16(%bigf): (f32) -> ()
-
- // CHECK-NEXT: -3.38953e+38
- %negbigi = arith.constant 0xff7fffff : i32
- %negbigf = arith.bitcast %negbigi : i32 to f32
- call @trunc_bf16(%negbigf): (f32) -> ()
+ %big_nonoverflowing_i = arith.constant 0x7f7effff : i32
+ %big_nonoverflowing_f = arith.bitcast %big_nonoverflowing_i : i32 to f32
+ call @trunc_bf16(%big_nonoverflowing_f): (f32) -> ()
// CHECK-NEXT: 1.625
%exprolli = arith.constant 0x3fcfffff : i32
diff --git a/mlir/test/python/dialects/gpu/dialect.py b/mlir/test/python/dialects/gpu/dialect.py
index 0293e8f276be..2f49e2e05399 100644
--- a/mlir/test/python/dialects/gpu/dialect.py
+++ b/mlir/test/python/dialects/gpu/dialect.py
@@ -27,6 +27,6 @@ def testMMAElementWiseAttr():
module = Module.create()
with InsertionPoint(module.body):
gpu.BlockDimOp(gpu.Dimension.y)
- # CHECK: %0 = gpu.block_dim y
+ # CHECK: %block_dim_y = gpu.block_dim y
print(module)
pass
diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp
index 4dfa05cc8ca8..0ba1a3a534e3 100644
--- a/mlir/tools/mlir-opt/mlir-opt.cpp
+++ b/mlir/tools/mlir-opt/mlir-opt.cpp
@@ -43,6 +43,7 @@ void registerSliceAnalysisTestPass();
void registerSymbolTestPasses();
void registerRegionTestPasses();
void registerTestAffineDataCopyPass();
+void registerTestAffineAccessAnalysisPass();
void registerTestAffineReifyValueBoundsPass();
void registerTestAffineLoopUnswitchingPass();
void registerTestAffineWalk();
@@ -169,6 +170,7 @@ void registerTestPasses() {
registerSymbolTestPasses();
registerRegionTestPasses();
registerTestAffineDataCopyPass();
+ registerTestAffineAccessAnalysisPass();
registerTestAffineLoopUnswitchingPass();
registerTestAffineReifyValueBoundsPass();
registerTestAffineWalk();
diff --git a/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp b/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
index 26bfbd5c11e8..cea49356538f 100644
--- a/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
+++ b/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
@@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//
+#include "mlir/Config/mlir-config.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/IR/MLIRContext.h"
@@ -29,10 +30,10 @@
using namespace mlir;
// Skip the test if the NVPTX target was not built.
-#if MLIR_CUDA_CONVERSIONS_ENABLED == 0
-#define SKIP_WITHOUT_NVPTX(x) DISABLED_##x
-#else
+#if MLIR_ENABLE_CUDA_CONVERSIONS
#define SKIP_WITHOUT_NVPTX(x) x
+#else
+#define SKIP_WITHOUT_NVPTX(x) DISABLED_##x
#endif
class MLIRTargetLLVMNVVM : public ::testing::Test {
diff --git a/openmp/CMakeLists.txt b/openmp/CMakeLists.txt
index 03068af22629..3c4ff76ad6d1 100644
--- a/openmp/CMakeLists.txt
+++ b/openmp/CMakeLists.txt
@@ -46,9 +46,15 @@ if (OPENMP_STANDALONE_BUILD)
set(CMAKE_CXX_EXTENSIONS NO)
else()
set(OPENMP_ENABLE_WERROR ${LLVM_ENABLE_WERROR})
- # If building in tree, we honor the same install suffix LLVM uses.
- set(OPENMP_INSTALL_LIBDIR "lib${LLVM_LIBDIR_SUFFIX}" CACHE STRING
- "Path where built OpenMP libraries should be installed.")
+
+ # When building in tree we install the runtime according to the LLVM settings.
+ if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE)
+ set(OPENMP_INSTALL_LIBDIR lib${LLVM_LIBDIR_SUFFIX}/${LLVM_DEFAULT_TARGET_TRIPLE} CACHE STRING
+ "Path where built openmp libraries should be installed.")
+ else()
+ set(OPENMP_INSTALL_LIBDIR "lib${LLVM_LIBDIR_SUFFIX}" CACHE STRING
+ "Path where built OpenMP libraries should be installed.")
+ endif()
if (NOT MSVC)
set(OPENMP_TEST_C_COMPILER ${LLVM_RUNTIME_OUTPUT_INTDIR}/clang)
diff --git a/openmp/libomptarget/plugins-nextgen/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/CMakeLists.txt
index 3cc2b8512b77..3ca02368253e 100644
--- a/openmp/libomptarget/plugins-nextgen/CMakeLists.txt
+++ b/openmp/libomptarget/plugins-nextgen/CMakeLists.txt
@@ -46,6 +46,7 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "${tmachine}$")
${OPENMP_PTHREAD_LIB}
NO_INSTALL_RPATH
+ BUILDTREE_ONLY
)
if(LIBOMPTARGET_DEP_LIBFFI_FOUND)
diff --git a/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt
index 68ce63467a6c..9e0ea08d8375 100644
--- a/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt
+++ b/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt
@@ -78,6 +78,7 @@ add_llvm_library(omptarget.rtl.amdgpu SHARED
${LDFLAGS_UNDEFINED}
NO_INSTALL_RPATH
+ BUILDTREE_ONLY
)
if ((OMPT_TARGET_DEFAULT) AND (LIBOMPTARGET_OMPT_SUPPORT))
diff --git a/openmp/libomptarget/plugins-nextgen/cuda/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/cuda/CMakeLists.txt
index 95b288cab311..2bfb47168a7f 100644
--- a/openmp/libomptarget/plugins-nextgen/cuda/CMakeLists.txt
+++ b/openmp/libomptarget/plugins-nextgen/cuda/CMakeLists.txt
@@ -38,6 +38,7 @@ add_llvm_library(omptarget.rtl.cuda SHARED
${OPENMP_PTHREAD_LIB}
NO_INSTALL_RPATH
+ BUILDTREE_ONLY
)
if ((OMPT_TARGET_DEFAULT) AND (LIBOMPTARGET_OMPT_SUPPORT))
diff --git a/openmp/libomptarget/src/CMakeLists.txt b/openmp/libomptarget/src/CMakeLists.txt
index 1a0e26f104be..9bc3f3339583 100644
--- a/openmp/libomptarget/src/CMakeLists.txt
+++ b/openmp/libomptarget/src/CMakeLists.txt
@@ -41,6 +41,7 @@ add_llvm_library(omptarget
omp
NO_INSTALL_RPATH
+ BUILDTREE_ONLY
)
target_include_directories(omptarget PRIVATE ${LIBOMPTARGET_INCLUDE_DIR})
diff --git a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
index 18af1af657f9..6d3904f84673 100644
--- a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
@@ -1843,7 +1843,6 @@ cc_library(
":driver",
":driver_options_inc_gen",
":edit",
- ":install_api",
":lex",
":parse",
":sema",
@@ -2066,7 +2065,9 @@ cc_library(
deps = [
":ast",
":basic",
+ ":frontend",
":support",
+ "//llvm:Core",
"//llvm:Support",
"//llvm:TextAPI",
],
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index a1a5b7fe9bf4..16ceaadf276f 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -80,8 +80,8 @@ libc_support_library(
)
libc_support_library(
- name = "__support_macros_properties_float",
- hdrs = ["src/__support/macros/properties/float.h"],
+ name = "__support_macros_properties_types",
+ hdrs = ["src/__support/macros/properties/types.h"],
deps = [
":__support_macros_properties_architectures",
":__support_macros_properties_compiler",
@@ -332,7 +332,7 @@ libc_support_library(
deps = [
":__support_macros_attributes",
":__support_macros_config",
- ":__support_macros_properties_float",
+ ":__support_macros_properties_types",
":llvm_libc_macros_stdfix_macros",
],
)
@@ -697,7 +697,7 @@ libc_support_library(
":__support_cpp_type_traits",
":__support_libc_assert",
":__support_macros_attributes",
- ":__support_macros_properties_float",
+ ":__support_macros_properties_types",
":__support_math_extras",
":__support_uint128",
],
diff --git a/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl b/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl
index 17eb30c8e458..7d815bc4a229 100644
--- a/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl
+++ b/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl
@@ -17,6 +17,7 @@ def libc_common_copts():
libc_include_path = paths.join(root_label.workspace_root, root_label.package)
return [
"-I" + libc_include_path,
+ "-I" + paths.join(libc_include_path, "include"),
"-DLIBC_NAMESPACE=" + LIBC_NAMESPACE,
]
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 59ee03d9a321..7860ccd0406a 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -6,7 +6,6 @@
# The MLIR "Multi-Level Intermediate Representation" Compiler Infrastructure
load("@bazel_skylib//rules:expand_template.bzl", "expand_template")
-load("@bazel_skylib//rules:write_file.bzl", "write_file")
load(
":build_defs.bzl",
"cc_headers_only",
@@ -36,7 +35,10 @@ expand_template(
"#cmakedefine01 MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS": "#define MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS 0",
"#cmakedefine MLIR_GREEDY_REWRITE_RANDOMIZER_SEED ${MLIR_GREEDY_REWRITE_RANDOMIZER_SEED}": "/* #undef MLIR_GREEDY_REWRITE_RANDOMIZER_SEED */",
"#cmakedefine01 MLIR_ENABLE_PDL_IN_PATTERNMATCH": "#define MLIR_ENABLE_PDL_IN_PATTERNMATCH 1",
- },
+ } | if_cuda_available(
+ {"#cmakedefine01 MLIR_ENABLE_CUDA_CONVERSIONS": "#define MLIR_ENABLE_CUDA_CONVERSIONS 1"},
+ {"#cmakedefine01 MLIR_ENABLE_CUDA_CONVERSIONS": "#define MLIR_ENABLE_CUDA_CONVERSIONS 0"},
+ ),
template = "include/mlir/Config/mlir-config.h.cmake",
)
@@ -4109,6 +4111,7 @@ cc_library(
includes = ["include"],
deps = [
":AffineDialect",
+ ":AffineTransforms",
":AffineUtils",
":ArithDialect",
":ConversionPassIncGen",
@@ -5468,7 +5471,6 @@ cc_library(
srcs = ["lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp"],
hdrs = ["include/mlir/Dialect/GPU/Pipelines/Passes.h"],
includes = ["include"],
- local_defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
deps = [
":AffineToStandard",
":ArithToLLVM",
@@ -5492,6 +5494,7 @@ cc_library(
":Transforms",
":VectorToLLVM",
":VectorToSCF",
+ ":config",
],
)
@@ -5541,6 +5544,7 @@ cc_library(
":Transforms",
":VCIXToLLVMIRTranslation",
":VectorDialect",
+ ":config",
"//llvm:Core",
"//llvm:MC",
"//llvm:Support",
@@ -6176,6 +6180,7 @@ cc_library(
":NVVMToLLVMIRTranslation",
":TargetLLVM",
":ToLLVMIRTranslation",
+ ":config",
"//llvm:NVPTXCodeGen",
"//llvm:Support",
],
@@ -9131,6 +9136,7 @@ cc_library(
":VectorTransforms",
":X86VectorDialect",
":X86VectorTransforms",
+ ":config",
],
)
diff --git a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
index 68d9b23fd564..583411aa60e5 100644
--- a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
@@ -552,7 +552,6 @@ cc_library(
cc_library(
name = "TestTransforms",
srcs = glob(["lib/Transforms/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
":TestDialect",
@@ -579,7 +578,6 @@ cc_library(
cc_library(
name = "TestFuncToLLVM",
srcs = glob(["lib/Conversion/FuncToLLVM/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
":TestDialect",
@@ -594,7 +592,6 @@ cc_library(
cc_library(
name = "TestOneToNTypeConversion",
srcs = glob(["lib/Conversion/OneToNTypeConversion/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
":TestDialect",
@@ -653,7 +650,6 @@ cc_library(
cc_library(
name = "TestDLTI",
srcs = glob(["lib/Dialect/DLTI/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
":TestDialect",
@@ -667,7 +663,7 @@ cc_library(
cc_library(
name = "TestGPU",
srcs = glob(["lib/Dialect/GPU/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"] + if_cuda_available([
+ defines = if_cuda_available([
"MLIR_GPU_TO_CUBIN_PASS_ENABLE",
]),
includes = ["lib/Dialect/Test"],
@@ -714,7 +710,6 @@ cc_library(
cc_library(
name = "TestLinalg",
srcs = glob(["lib/Dialect/Linalg/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//llvm:Support",
@@ -748,7 +743,6 @@ cc_library(
cc_library(
name = "TestLLVM",
srcs = glob(["lib/Dialect/LLVM/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//mlir:AffineToStandard",
@@ -773,7 +767,6 @@ cc_library(
cc_library(
name = "TestMath",
srcs = glob(["lib/Dialect/Math/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//mlir:ArithDialect",
@@ -790,7 +783,6 @@ cc_library(
cc_library(
name = "TestMathToVCIX",
srcs = glob(["lib/Conversion/MathToVCIX/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//mlir:ArithDialect",
@@ -807,7 +799,6 @@ cc_library(
cc_library(
name = "TestMemRef",
srcs = glob(["lib/Dialect/MemRef/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
":TestDialect",
@@ -847,7 +838,6 @@ cc_library(
cc_library(
name = "TestNVGPU",
srcs = glob(["lib/Dialect/NVGPU/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//mlir:AffineDialect",
@@ -871,7 +861,6 @@ cc_library(
cc_library(
name = "TestSCF",
srcs = glob(["lib/Dialect/SCF/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//llvm:Support",
@@ -891,7 +880,6 @@ cc_library(
cc_library(
name = "TestArith",
srcs = glob(["lib/Dialect/Arith/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//mlir:ArithDialect",
@@ -908,7 +896,6 @@ cc_library(
cc_library(
name = "TestArmSME",
srcs = glob(["lib/Dialect/ArmSME/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//mlir:ArithToArmSME",
@@ -927,7 +914,6 @@ cc_library(
cc_library(
name = "TestBufferization",
srcs = glob(["lib/Dialect/Bufferization/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//mlir:BufferizationDialect",
@@ -989,7 +975,6 @@ cc_library(
cc_library(
name = "TestFunc",
srcs = glob(["lib/Dialect/Func/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
":TestDialect",
@@ -1005,7 +990,6 @@ cc_library(
cc_library(
name = "TestTensor",
srcs = glob(["lib/Dialect/Tensor/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//mlir:ArithDialect",
@@ -1022,7 +1006,6 @@ cc_library(
cc_library(
name = "TestVector",
srcs = glob(["lib/Dialect/Vector/*.cpp"]),
- defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
includes = ["lib/Dialect/Test"],
deps = [
"//mlir:AffineDialect",