summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPhilip Reames <preames@rivosinc.com>2024-02-14 13:21:56 -0800
committerPhilip Reames <listmail@philipreames.com>2024-02-14 13:34:08 -0800
commit8d326542926d4fba89cfb0ec01a0c1a1bd0789d6 (patch)
treed7ce4ddb6d0e2f37ad519548b383c9f61821d125
parent1da4494184566d68f32206e3ac5a8b90bc05889d (diff)
[RISCV] Add coverage for an upcoming set of vector narrowing changes
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll51
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll54
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll54
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll54
4 files changed, 213 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
index 37d05f08d0ff..d2e0113e69b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
@@ -148,3 +148,54 @@ define void @abs_v4i64(ptr %x) {
ret void
}
declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
+
+define void @abs_v4i64_of_sext_v4i8(ptr %x) {
+; CHECK-LABEL: abs_v4i64_of_sext_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsext.vf8 v10, v8
+; CHECK-NEXT: vrsub.vi v8, v10, 0
+; CHECK-NEXT: vmax.vv v8, v10, v8
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i8>, ptr %x
+ %a.ext = sext <4 x i8> %a to <4 x i64>
+ %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
+ store <4 x i64> %b, ptr %x
+ ret void
+}
+
+define void @abs_v4i64_of_sext_v4i16(ptr %x) {
+; CHECK-LABEL: abs_v4i64_of_sext_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vrsub.vi v8, v10, 0
+; CHECK-NEXT: vmax.vv v8, v10, v8
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i16>, ptr %x
+ %a.ext = sext <4 x i16> %a to <4 x i64>
+ %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
+ store <4 x i64> %b, ptr %x
+ ret void
+}
+
+define void @abs_v4i64_of_sext_v4i32(ptr %x) {
+; CHECK-LABEL: abs_v4i64_of_sext_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vrsub.vi v8, v10, 0
+; CHECK-NEXT: vmax.vv v8, v10, v8
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i32>, ptr %x
+ %a.ext = sext <4 x i32> %a to <4 x i64>
+ %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
+ store <4 x i64> %b, ptr %x
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
index c8de041a26f4..7bffbaa1c21e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
@@ -880,3 +880,57 @@ define <2 x i64> @vwadd_vx_v2i64_i64(ptr %x, ptr %y) nounwind {
%g = add <2 x i64> %e, %f
ret <2 x i64> %g
}
+
+define <2 x i32> @vwadd_v2i32_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwadd_v2i32_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = sext <2 x i8> %a to <2 x i32>
+ %d = sext <2 x i8> %b to <2 x i32>
+ %e = add <2 x i32> %c, %d
+ ret <2 x i32> %e
+}
+
+define <2 x i64> @vwadd_v2i64_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwadd_v2i64_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = sext <2 x i8> %a to <2 x i64>
+ %d = sext <2 x i8> %b to <2 x i64>
+ %e = add <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
+
+define <2 x i64> @vwadd_v2i64_of_v2i16(ptr %x, ptr %y) {
+; CHECK-LABEL: vwadd_v2i64_of_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i16>, ptr %x
+ %b = load <2 x i16>, ptr %y
+ %c = sext <2 x i16> %a to <2 x i64>
+ %d = sext <2 x i16> %b to <2 x i64>
+ %e = add <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index e51ca9f153dc..8779c6dd9fc3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -908,3 +908,57 @@ define <4 x i64> @crash(<4 x i16> %x, <4 x i16> %y) {
%c = add <4 x i64> %a, %b
ret <4 x i64> %c
}
+
+define <2 x i32> @vwaddu_v2i32_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwaddu_v2i32_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwaddu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = zext <2 x i8> %a to <2 x i32>
+ %d = zext <2 x i8> %b to <2 x i32>
+ %e = add <2 x i32> %c, %d
+ ret <2 x i32> %e
+}
+
+define <2 x i64> @vwaddu_v2i64_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwaddu_v2i64_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vzext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwaddu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = zext <2 x i8> %a to <2 x i64>
+ %d = zext <2 x i8> %b to <2 x i64>
+ %e = add <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
+
+define <2 x i64> @vwaddu_v2i64_of_v2i16(ptr %x, ptr %y) {
+; CHECK-LABEL: vwaddu_v2i64_of_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwaddu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i16>, ptr %x
+ %b = load <2 x i16>, ptr %y
+ %c = zext <2 x i16> %a to <2 x i64>
+ %d = zext <2 x i16> %b to <2 x i64>
+ %e = add <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
index cf00fe14858d..d2d54796069b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
@@ -895,3 +895,57 @@ define <2 x i64> @vwsubu_vx_v2i64_i64(ptr %x, ptr %y) nounwind {
%g = sub <2 x i64> %e, %f
ret <2 x i64> %g
}
+
+define <2 x i32> @vwsubu_v2i32_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwsubu_v2i32_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwsubu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = zext <2 x i8> %a to <2 x i32>
+ %d = zext <2 x i8> %b to <2 x i32>
+ %e = sub <2 x i32> %c, %d
+ ret <2 x i32> %e
+}
+
+define <2 x i64> @vwsubu_v2i64_of_v2i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vwsubu_v2i64_of_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vzext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwsubu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, ptr %x
+ %b = load <2 x i8>, ptr %y
+ %c = zext <2 x i8> %a to <2 x i64>
+ %d = zext <2 x i8> %b to <2 x i64>
+ %e = sub <2 x i64> %c, %d
+ ret <2 x i64> %e
+}
+
+define <2 x i64> @vwsubu_v2i64_of_v2i16(ptr %x, ptr %y) {
+; CHECK-LABEL: vwsubu_v2i64_of_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwsubu.vv v8, v11, v10
+; CHECK-NEXT: ret
+ %a = load <2 x i16>, ptr %x
+ %b = load <2 x i16>, ptr %y
+ %c = zext <2 x i16> %a to <2 x i64>
+ %d = zext <2 x i16> %b to <2 x i64>
+ %e = sub <2 x i64> %c, %d
+ ret <2 x i64> %e
+}