summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@sifive.com>2023-12-21 08:38:32 -0800
committerGitHub <noreply@github.com>2023-12-21 08:38:32 -0800
commit0dcff0db3a11d8a3cb8f78bf3b995f1211c2585d (patch)
treed60dad409229f13f8d74c5396d579ad9b9ae44e7
parent199a0f9f5aaf72ff856f68e3bb708e783252af17 (diff)
[RISCV] Add codegen support for experimental.vp.splice (#74688)
IR intrinsics were already defined, but no codegen support had been added. I extracted this code from our downstream. Some of it may have come from https://repo.hca.bsc.es/gitlab/rferrer/llvm-epi/ originally.
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp17
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h1
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp89
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.h1
-rw-r--r--llvm/test/CodeGen/RISCV/vp-splice-fixed-vectors.ll281
-rw-r--r--llvm/test/CodeGen/RISCV/vp-splice-mask-fixed-vectors.ll316
-rw-r--r--llvm/test/CodeGen/RISCV/vp-splice-mask-vectors.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/vp-splice.ll330
8 files changed, 1586 insertions, 2 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 362fa92dd44b..3d21bd22e6ef 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -1871,6 +1871,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
Res = PromoteIntOp_VP_STRIDED(N, OpNo);
break;
+ case ISD::EXPERIMENTAL_VP_SPLICE:
+ Res = PromoteIntOp_VP_SPLICE(N, OpNo);
+ break;
}
// If the result is null, the sub-method took care of registering results etc.
@@ -2549,6 +2552,20 @@ SDValue DAGTypeLegalizer::PromoteIntOp_VP_STRIDED(SDNode *N, unsigned OpNo) {
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
+SDValue DAGTypeLegalizer::PromoteIntOp_VP_SPLICE(SDNode *N, unsigned OpNo) {
+ SmallVector<SDValue, 6> NewOps(N->op_begin(), N->op_end());
+
+ if (OpNo == 2) { // Offset operand
+ NewOps[OpNo] = SExtPromotedInteger(N->getOperand(OpNo));
+ return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
+ }
+
+ assert((OpNo == 4 || OpNo == 5) && "Unexpected operand for promotion");
+
+ NewOps[OpNo] = ZExtPromotedInteger(N->getOperand(OpNo));
+ return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
+}
+
//===----------------------------------------------------------------------===//
// Integer Result Expansion
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 9d5931b44ac6..4a249e7a2dc9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -410,6 +410,7 @@ private:
SDValue PromoteIntOp_STACKMAP(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_PATCHPOINT(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_VP_STRIDED(SDNode *N, unsigned OpNo);
+ SDValue PromoteIntOp_VP_SPLICE(SDNode *N, unsigned OpNo);
void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index de15bea72e46..d6dedd669ffd 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -675,7 +675,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::VP_FP_TO_UINT, ISD::VP_SETCC, ISD::VP_SIGN_EXTEND,
ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE, ISD::VP_SMIN,
ISD::VP_SMAX, ISD::VP_UMIN, ISD::VP_UMAX,
- ISD::VP_ABS, ISD::EXPERIMENTAL_VP_REVERSE};
+ ISD::VP_ABS, ISD::EXPERIMENTAL_VP_REVERSE, ISD::EXPERIMENTAL_VP_SPLICE};
static const unsigned FloatingPointVPOps[] = {
ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL,
@@ -688,7 +688,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::VP_FCEIL, ISD::VP_FFLOOR, ISD::VP_FROUND,
ISD::VP_FROUNDEVEN, ISD::VP_FCOPYSIGN, ISD::VP_FROUNDTOZERO,
ISD::VP_FRINT, ISD::VP_FNEARBYINT, ISD::VP_IS_FPCLASS,
- ISD::EXPERIMENTAL_VP_REVERSE};
+ ISD::EXPERIMENTAL_VP_REVERSE, ISD::EXPERIMENTAL_VP_SPLICE};
static const unsigned IntegerVecReduceOps[] = {
ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR,
@@ -773,6 +773,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
+ setOperationAction(ISD::EXPERIMENTAL_VP_SPLICE, VT, Custom);
setOperationAction(ISD::EXPERIMENTAL_VP_REVERSE, VT, Custom);
setOperationPromotedToType(
@@ -1147,6 +1148,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::VP_SETCC, ISD::VP_TRUNCATE},
VT, Custom);
+ setOperationAction(ISD::EXPERIMENTAL_VP_SPLICE, VT, Custom);
setOperationAction(ISD::EXPERIMENTAL_VP_REVERSE, VT, Custom);
continue;
}
@@ -6637,6 +6639,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
!Subtarget.hasVInstructionsF16()))
return SplitVPOp(Op, DAG);
return lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
+ case ISD::EXPERIMENTAL_VP_SPLICE:
+ return lowerVPSpliceExperimental(Op, DAG);
case ISD::EXPERIMENTAL_VP_REVERSE:
return lowerVPReverseExperimental(Op, DAG);
}
@@ -10583,6 +10587,87 @@ SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op,
}
SDValue
+RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+
+ SDValue Op1 = Op.getOperand(0);
+ SDValue Op2 = Op.getOperand(1);
+ SDValue Offset = Op.getOperand(2);
+ SDValue Mask = Op.getOperand(3);
+ SDValue EVL1 = Op.getOperand(4);
+ SDValue EVL2 = Op.getOperand(5);
+
+ const MVT XLenVT = Subtarget.getXLenVT();
+ MVT VT = Op.getSimpleValueType();
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector()) {
+ ContainerVT = getContainerForFixedLengthVector(VT);
+ Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
+ Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
+ MVT MaskVT = getMaskTypeFor(ContainerVT);
+ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+ }
+
+ bool IsMaskVector = VT.getVectorElementType() == MVT::i1;
+ if (IsMaskVector) {
+ ContainerVT = ContainerVT.changeVectorElementType(MVT::i8);
+
+ // Expand input operands
+ SDValue SplatOneOp1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT),
+ DAG.getConstant(1, DL, XLenVT), EVL1);
+ SDValue SplatZeroOp1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT),
+ DAG.getConstant(0, DL, XLenVT), EVL1);
+ Op1 = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Op1, SplatOneOp1,
+ SplatZeroOp1, EVL1);
+
+ SDValue SplatOneOp2 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT),
+ DAG.getConstant(1, DL, XLenVT), EVL2);
+ SDValue SplatZeroOp2 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT),
+ DAG.getConstant(0, DL, XLenVT), EVL2);
+ Op2 = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Op2, SplatOneOp2,
+ SplatZeroOp2, EVL2);
+ }
+
+ int64_t ImmValue = cast<ConstantSDNode>(Offset)->getSExtValue();
+ SDValue DownOffset, UpOffset;
+ if (ImmValue >= 0) {
+ // The operand is a TargetConstant, we need to rebuild it as a regular
+ // constant.
+ DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
+ UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, EVL1, DownOffset);
+ } else {
+ // The operand is a TargetConstant, we need to rebuild it as a regular
+ // constant rather than negating the original operand.
+ UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
+ DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, EVL1, UpOffset);
+ }
+
+ SDValue SlideDown =
+ getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+ Op1, DownOffset, Mask, UpOffset);
+ SDValue Result = getVSlideup(DAG, Subtarget, DL, ContainerVT, SlideDown, Op2,
+ UpOffset, Mask, EVL2, RISCVII::TAIL_AGNOSTIC);
+
+ if (IsMaskVector) {
+ // Truncate Result back to a mask vector (Result has same EVL as Op2)
+ Result = DAG.getNode(
+ RISCVISD::SETCC_VL, DL, ContainerVT.changeVectorElementType(MVT::i1),
+ {Result, DAG.getConstant(0, DL, ContainerVT),
+ DAG.getCondCode(ISD::SETNE), DAG.getUNDEF(getMaskTypeFor(ContainerVT)),
+ Mask, EVL2});
+ }
+
+ if (!VT.isFixedLengthVector())
+ return Result;
+ return convertFromScalableVector(VT, Result, DAG, Subtarget);
+}
+
+SDValue
RISCVTargetLowering::lowerVPReverseExperimental(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 41a2dc5771c8..2d9f716cdf9a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -910,6 +910,7 @@ private:
SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVPExtMaskOp(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVPSetCCMaskOp(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVPSpliceExperimental(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVPReverseExperimental(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVPStridedLoad(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/RISCV/vp-splice-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/vp-splice-fixed-vectors.ll
new file mode 100644
index 000000000000..f7c8c251e197
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/vp-splice-fixed-vectors.ll
@@ -0,0 +1,281 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v -verify-machineinstrs -riscv-v-vector-bits-min=128 \
+; RUN: < %s | FileCheck %s
+
+declare <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64>, <2 x i64>, i32, <2 x i1>, i32, i32)
+declare <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32>, <4 x i32>, i32, <4 x i1>, i32, i32)
+declare <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16>, <8 x i16>, i32, <8 x i1>, i32, i32)
+declare <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8>, <16 x i8>, i32, <16 x i1>, i32, i32)
+
+declare <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double>, <2 x double>, i32, <2 x i1>, i32, i32)
+declare <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float>, <4 x float>, i32, <4 x i1>, i32, i32)
+
+define <2 x i64> @test_vp_splice_v2i64(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+
+ %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @test_vp_splice_v2i64_negative_offset(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v2i64_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+
+ %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 -5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @test_vp_splice_v2i64_masked(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v2i64_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <2 x i64> %v
+}
+
+define <4 x i32> @test_vp_splice_v4i32(<4 x i32> %va, <4 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+
+ %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @test_vp_splice_v4i32_negative_offset(<4 x i32> %va, <4 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v4i32_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+
+ %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 -5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @test_vp_splice_v4i32_masked(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v4i32_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <4 x i32> %v
+}
+
+define <8 x i16> @test_vp_splice_v8i16(<8 x i16> %va, <8 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+
+ %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 5, <8 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @test_vp_splice_v8i16_negative_offset(<8 x i16> %va, <8 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v8i16_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+
+ %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 -5, <8 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @test_vp_splice_v8i16_masked(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v8i16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <8 x i16> %v
+}
+
+define <16 x i8> @test_vp_splice_v16i8(<16 x i8> %va, <16 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+
+ %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 5, <16 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @test_vp_splice_v16i8_negative_offset(<16 x i8> %va, <16 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v16i8_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+
+ %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 -5, <16 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @test_vp_splice_v16i8_masked(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v16i8_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 5, <16 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <16 x i8> %v
+}
+
+define <2 x double> @test_vp_splice_v2f64(<2 x double> %va, <2 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+
+ %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <2 x double> %v
+}
+
+define <2 x double> @test_vp_splice_v2f64_negative_offset(<2 x double> %va, <2 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v2f64_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+
+ %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 -5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <2 x double> %v
+}
+
+define <2 x double> @test_vp_splice_v2f64_masked(<2 x double> %va, <2 x double> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v2f64_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <2 x double> %v
+}
+
+define <4 x float> @test_vp_splice_v4f32(<4 x float> %va, <4 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+
+ %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <4 x float> %v
+}
+
+define <4 x float> @test_vp_splice_v4f32_negative_offset(<4 x float> %va, <4 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v4f32_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+
+ %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 -5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <4 x float> %v
+}
+
+define <4 x float> @test_vp_splice_v4f32_masked(<4 x float> %va, <4 x float> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v4f32_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <4 x float> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/vp-splice-mask-fixed-vectors.ll
new file mode 100644
index 000000000000..9579973aee0d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/vp-splice-mask-fixed-vectors.ll
@@ -0,0 +1,316 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -riscv-v-vector-bits-min=128 \
+; RUN: < %s | FileCheck %s
+
+declare <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1>, <2 x i1>, i32, <2 x i1>, i32, i32)
+declare <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1>, <4 x i1>, i32, <4 x i1>, i32, i32)
+declare <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1>, <8 x i1>, i32, <8 x i1>, i32, i32)
+declare <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1>, <16 x i1>, i32, <16 x i1>, i32, i32)
+
+define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v9, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+
+ %v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <2 x i1> %v
+}
+
+define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v2i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, mf8, ta, ma
+; CHECK-NEXT: vslidedown.vx v9, v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vslideup.vi v9, v8, 5
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+
+ %v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 -5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <2 x i1> %v
+}
+
+define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v2i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v11, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <2 x i1> %v
+}
+
+define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v9, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+
+ %v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <4 x i1> %v
+}
+
+define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v4i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v9, v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vslideup.vi v9, v8, 5
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+
+ %v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 -5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <4 x i1> %v
+}
+
+define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v4i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v11, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <4 x i1> %v
+}
+
+define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v9, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+
+ %v = call <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1> %va, <8 x i1> %vb, i32 5, <8 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <8 x i1> %v
+}
+
+define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v8i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v9, v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vi v9, v8, 5
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+
+ %v = call <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1> %va, <8 x i1> %vb, i32 -5, <8 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <8 x i1> %v
+}
+
+define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v8i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v11, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1> %va, <8 x i1> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <8 x i1> %v
+}
+
+define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v9, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+
+ %v = call <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1> %va, <16 x i1> %vb, i32 5, <16 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <16 x i1> %v
+}
+
+define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v16i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v9, v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v9, v8, 5
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+
+ %v = call <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1> %va, <16 x i1> %vb, i32 -5, <16 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <16 x i1> %v
+}
+
+define <16 x i1> @test_vp_splice_v16i1_masked(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_v16i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v11, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1> %va, <16 x i1> %vb, i32 5, <16 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <16 x i1> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/vp-splice-mask-vectors.ll
new file mode 100644
index 000000000000..4eaadb3c24fb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/vp-splice-mask-vectors.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>, i32, <vscale x 1 x i1>, i32, i32)
+declare <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32, <vscale x 2 x i1>, i32, i32)
+declare <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, i32, <vscale x 4 x i1>, i32, i32)
+declare <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, i32, <vscale x 8 x i1>, i32, i32)
+declare <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, i32, <vscale x 16 x i1>, i32, i32)
+declare <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, i32, <vscale x 32 x i1>, i32, i32)
+declare <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>, i32, <vscale x 64 x i1>, i32, i32)
+
+define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v9, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+
+ %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 1 x i1> %v
+}
+
+define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv1i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, mf8, ta, ma
+; CHECK-NEXT: vslidedown.vx v9, v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vslideup.vi v9, v8, 5
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+
+ %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 1 x i1> %v
+}
+
+define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv1i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v11, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 1 x i1> %v
+}
+
+define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v9, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+
+ %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x i1> %v
+}
+
+define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v9, v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vslideup.vi v9, v8, 5
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+
+ %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x i1> %v
+}
+
+define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v11, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x i1> %v
+}
+
+define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v9, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+
+ %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 5, <vscale x 4 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 4 x i1> %v
+}
+
+define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv4i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v9, v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vi v9, v8, 5
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+
+ %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 -5, <vscale x 4 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 4 x i1> %v
+}
+
+define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv4i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v11, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 5, <vscale x 4 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 4 x i1> %v
+}
+
+define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v9, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+
+ %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 5, <vscale x 8 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 8 x i1> %v
+}
+
+define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv8i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v9, v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v9, v8, 5
+; CHECK-NEXT: vmsne.vi v0, v9, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+
+ %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 -5, <vscale x 8 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 8 x i1> %v
+}
+
+define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv8i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v11, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 5, <vscale x 8 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 8 x i1> %v
+}
+
+define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v10, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+
+ %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 5, <vscale x 16 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 16 x i1> %v
+}
+
+define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv16i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, m2, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v10, 5
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+
+ %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 -5, <vscale x 16 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 16 x i1> %v
+}
+
+define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv16i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v14, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v10, v14, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vslideup.vx v10, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 5, <vscale x 16 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 16 x i1> %v
+}
+
+define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v12, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+
+ %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 5, <vscale x 32 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 32 x i1> %v
+}
+
+define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv32i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, m4, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v12, 5
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+
+ %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 -5, <vscale x 32 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 32 x i1> %v
+}
+
+define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv32i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v16, v16, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vslideup.vx v16, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
+; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 5, <vscale x 32 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 32 x i1> %v
+}
+
+define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v24, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v16, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 64 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
+
+ %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 5, <vscale x 64 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 64 x i1> %v
+}
+
+define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv64i1_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v24, 0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, m8, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v16, 5
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 64 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
+
+ %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 -5, <vscale x 64 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 64 x i1> %v
+}
+
+define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv64i1_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v24, 0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v24, v24, 1, v0
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vslidedown.vi v24, v24, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vslideup.vx v24, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, ma
+; CHECK-NEXT: vmsne.vi v8, v24, 0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: ret
+ %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 5, <vscale x 64 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 64 x i1> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/vp-splice.ll b/llvm/test/CodeGen/RISCV/vp-splice.ll
new file mode 100644
index 000000000000..7d85370e390b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/vp-splice.ll
@@ -0,0 +1,330 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+
+declare <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i32, <vscale x 2 x i1>, i32, i32)
+
+declare <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i32, <vscale x 1 x i1>, i32, i32)
+declare <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, <vscale x 2 x i1>, i32, i32)
+declare <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, i32, <vscale x 4 x i1>, i32, i32)
+declare <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, i32, <vscale x 8 x i1>, i32, i32)
+
+declare <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, i32, <vscale x 1 x i1>, i32, i32)
+declare <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i32, <vscale x 2 x i1>, i32, i32)
+
+declare <vscale x 16 x i64> @llvm.experimental.vp.splice.nxv16i64(<vscale x 16 x i64>, <vscale x 16 x i64>, i32, <vscale x 16 x i1>, i32, i32)
+
+define <vscale x 2 x i64> @test_vp_splice_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v10, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+
+ %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @test_vp_splice_nxv2i64_negative_offset(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2i64_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e64, m2, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v10, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+
+ %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 -5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @test_vp_splice_nxv2i64_masked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2i64_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 1 x i64> @test_vp_splice_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+
+ %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @test_vp_splice_nxv1i64_negative_offset(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv1i64_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+
+ %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 -5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @test_vp_splice_nxv1i64_masked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv1i64_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 2 x i32> @test_vp_splice_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+
+ %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @test_vp_splice_nxv2i32_negative_offset(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2i32_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+
+ %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 -5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @test_vp_splice_nxv2i32_masked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2i32_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 4 x i16> @test_vp_splice_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+
+ %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 5, <vscale x 4 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @test_vp_splice_nxv4i16_negative_offset(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv4i16_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+
+ %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 -5, <vscale x 4 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @test_vp_splice_nxv4i16_masked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv4i16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 5, <vscale x 4 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 8 x i8> @test_vp_splice_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+
+ %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 5, <vscale x 8 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @test_vp_splice_nxv8i8_negative_offset(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv8i8_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+
+ %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 -5, <vscale x 8 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @test_vp_splice_nxv8i8_masked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv8i8_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 5, <vscale x 8 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 1 x double> @test_vp_splice_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+
+ %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 1 x double> %v
+}
+
+define <vscale x 1 x double> @test_vp_splice_nxv1f64_negative_offset(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv1f64_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+
+ %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 -5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 1 x double> %v
+}
+
+define <vscale x 1 x double> @test_vp_splice_nxv1f64_masked(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv1f64_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 1 x double> %v
+}
+
+define <vscale x 2 x float> @test_vp_splice_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+
+ %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x float> %v
+}
+
+define <vscale x 2 x float> @test_vp_splice_nxv2f32_negative_offset(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2f32_negative_offset:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+ %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+
+ %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 -5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x float> %v
+}
+
+define <vscale x 2 x float> @test_vp_splice_nxv2f32_masked(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+; CHECK-LABEL: test_vp_splice_nxv2f32_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
+ ret <vscale x 2 x float> %v
+}