summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/RISCV/rvv/fold-binop-into-select.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV/rvv/fold-binop-into-select.ll')
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fold-binop-into-select.ll60
1 files changed, 60 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-binop-into-select.ll b/llvm/test/CodeGen/RISCV/rvv/fold-binop-into-select.ll
new file mode 100644
index 000000000000..3a8d08f306a5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-binop-into-select.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+
+; The following binop x, (zext i1) tests will be vector-legalized into a vselect
+; of two splat_vectors, but on RV64 the splat value will be implicitly
+; truncated:
+;
+; t15: nxv2i32 = splat_vector Constant:i64<1>
+; t13: nxv2i32 = splat_vector Constant:i64<0>
+; t16: nxv2i32 = vselect t2, t15, t13
+; t7: nxv2i32 = add t4, t16
+;
+; Make sure that foldSelectWithIdentityConstant in DAGCombiner.cpp handles the
+; truncating splat, so we pull the vselect back and fold it into a mask.
+
+define <vscale x 2 x i32> @i1_zext_add(<vscale x 2 x i1> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: i1_zext_add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
+; CHECK-NEXT: ret
+ %zext = zext <vscale x 2 x i1> %a to <vscale x 2 x i32>
+ %add = add <vscale x 2 x i32> %b, %zext
+ ret <vscale x 2 x i32> %add
+}
+
+define <vscale x 2 x i32> @i1_zext_add_commuted(<vscale x 2 x i1> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: i1_zext_add_commuted:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
+; CHECK-NEXT: ret
+ %zext = zext <vscale x 2 x i1> %a to <vscale x 2 x i32>
+ %add = add <vscale x 2 x i32> %zext, %b
+ ret <vscale x 2 x i32> %add
+}
+
+define <vscale x 2 x i32> @i1_zext_sub(<vscale x 2 x i1> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: i1_zext_sub:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %zext = zext <vscale x 2 x i1> %a to <vscale x 2 x i32>
+ %sub = sub <vscale x 2 x i32> %b, %zext
+ ret <vscale x 2 x i32> %sub
+}
+
+define <vscale x 2 x i32> @i1_zext_or(<vscale x 2 x i1> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: i1_zext_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vor.vi v8, v8, 1, v0.t
+; CHECK-NEXT: ret
+ %zext = zext <vscale x 2 x i1> %a to <vscale x 2 x i32>
+ %or = or <vscale x 2 x i32> %b, %zext
+ ret <vscale x 2 x i32> %or
+}