summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Maitland <michaeltmaitland@gmail.com>2024-03-07 13:40:30 -0800
committerMichael Maitland <michaeltmaitland@gmail.com>2024-04-03 15:27:15 -0700
commit8aa3a77eaf198afb7e01453e6daf6566b687945d (patch)
tree3ed76260eef3f1a787ec5198725e2ebbf78f424b
parent029e1d751503268e3d8b01db769e710835c3010d (diff)
[RISCV][GISEL] Legalize G_ZEXT, G_SEXT, and G_ANYEXT, G_SPLAT_VECTOR, and G_ICMP for scalable vector types
This patch legalizes G_ZEXT, G_SEXT, and G_ANYEXT. If the type is a legal mask type, then the instruction is legalized as the element-wise select, where the condition on the select is the mask typed source operand, and the true and false values are 1 or -1 (for zero/any-extension and sign extension) and zero. If the type is a legal integer or vector integer type, then the instruction is marked as legal. The legalization of the extends may introduce a G_SPLAT_VECTOR, which needs to be legalized in this patch for the extend test cases to pass. A G_SPLAT_VECTOR is legal if the vector type is a legal integer or floating point vector type and the source operand is sXLen type. This is because the SelectionDAG patterns only support sXLen typed ISD::SPLAT_VECTORS, and we'd like to reuse those patterns. A G_SPLAT_VECTOR is cutom legalized if it has a legal s1 element vector type and s1 scalar operand. It is legalized to G_VMSET_VL or G_VMCLR_VL if the splat is all ones or all zeros respectivley. In the case of a non-constant mask splat, we legalize by promoting the scalar value to s8. In order to get the s8 element vector back into s1 vector, we use a G_ICMP. In order for the splat vector and extend tests to pass, we also need to legalize G_ICMP in this patch. A G_ICMP is legal if the destination type is a legal bool vector and the LHS and RHS are legal integer vector types.
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp9
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp2
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp184
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h2
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrGISel.td25
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir1589
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir810
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir1589
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir694
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir817
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-s64-rv32.mir116
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir1589
-rw-r--r--llvm/test/MachineVerifier/test_g_fcmp.mir13
-rw-r--r--llvm/test/MachineVerifier/test_g_icmp.mir13
14 files changed, 7436 insertions, 16 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 797bbf7efe60..95c6a359e52e 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -3006,6 +3006,15 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
Observer.changedInstr(MI);
return Legalized;
}
+ case TargetOpcode::G_SPLAT_VECTOR: {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+
+ Observer.changingInstr(MI);
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
}
}
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index b8ba782254c3..6b35caf83491 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -1278,7 +1278,7 @@ MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
return DstTy.isScalar();
else
return DstTy.isVector() &&
- DstTy.getNumElements() == Op0Ty.getNumElements();
+ DstTy.getElementCount() == Op0Ty.getElementCount();
}() && "Type Mismatch");
break;
}
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 7f3510727dbc..38c1f9868d7d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -139,20 +139,21 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.clampScalar(0, s32, sXLen)
.minScalarSameAs(1, 0);
+ auto &ExtActions =
+ getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
+ .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)));
if (ST.is64Bit()) {
- getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
- .legalFor({{sXLen, s32}})
- .maxScalar(0, sXLen);
-
+ ExtActions.legalFor({{sXLen, s32}});
getActionDefinitionsBuilder(G_SEXT_INREG)
.customFor({sXLen})
.maxScalar(0, sXLen)
.lower();
} else {
- getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}).maxScalar(0, sXLen);
-
getActionDefinitionsBuilder(G_SEXT_INREG).maxScalar(0, sXLen).lower();
}
+ ExtActions.customIf(typeIsLegalBoolVec(1, BoolVecTys, ST))
+ .maxScalar(0, sXLen);
// Merge/Unmerge
for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
@@ -235,7 +236,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
getActionDefinitionsBuilder(G_ICMP)
.legalFor({{sXLen, sXLen}, {sXLen, p0}})
- .widenScalarToNextPow2(1)
+ .legalIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST),
+ typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)))
+ .widenScalarOrEltToNextPow2OrMinSize(1, 8)
.clampScalar(1, sXLen, sXLen)
.clampScalar(0, sXLen, sXLen);
@@ -418,6 +421,29 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.clampScalar(0, sXLen, sXLen)
.customFor({sXLen});
+ auto &SplatActions =
+ getActionDefinitionsBuilder(G_SPLAT_VECTOR)
+ .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeIs(1, sXLen)))
+ .customIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST), typeIs(1, s1)));
+ // Handle case of s64 element vectors on RV32. If the subtarget does not have
+ // f64, then try to lower it to G_SPLAT_VECTOR_SPLIT_64_VL. If the subtarget
+ // does have f64, then we don't know whether the type is an f64 or an i64,
+ // so mark the G_SPLAT_VECTOR as legal and decide later what to do with it,
+ // depending on how the instructions it consumes are legalized. They are not
+ // legalized yet since legalization is in reverse postorder, so we cannot
+ // make the decision at this moment.
+ if (XLen == 32) {
+ if (ST.hasVInstructionsF64() && ST.hasStdExtD())
+ SplatActions.legalIf(all(
+ typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));
+ else if (ST.hasVInstructionsI64())
+ SplatActions.customIf(all(
+ typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));
+ }
+
+ SplatActions.clampScalar(1, sXLen, sXLen);
+
getLegacyLegalizerInfo().computeTables();
}
@@ -576,7 +602,145 @@ bool RISCVLegalizerInfo::legalizeVScale(MachineInstr &MI,
auto VScale = MIB.buildLShr(XLenTy, VLENB, MIB.buildConstant(XLenTy, 3));
MIB.buildMul(Dst, VScale, MIB.buildConstant(XLenTy, Val));
}
+ MI.eraseFromParent();
+ return true;
+}
+
+// Custom-lower extensions from mask vectors by using a vselect either with 1
+// for zero/any-extension or -1 for sign-extension:
+// (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
+// Note that any-extension is lowered identically to zero-extension.
+bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
+ MachineIRBuilder &MIB) const {
+
+ unsigned Opc = MI.getOpcode();
+ assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
+ Opc == TargetOpcode::G_ANYEXT);
+
+ MachineRegisterInfo &MRI = *MIB.getMRI();
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+
+ LLT DstTy = MRI.getType(Dst);
+ int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;
+ LLT DstEltTy = DstTy.getElementType();
+ auto SplatZero = MIB.buildSplatVector(DstTy, MIB.buildConstant(DstEltTy, 0));
+ auto SplatTrue =
+ MIB.buildSplatVector(DstTy, MIB.buildConstant(DstEltTy, ExtTrueVal));
+ MIB.buildSelect(Dst, Src, SplatTrue, SplatZero);
+
+ MI.eraseFromParent();
+ return true;
+}
+
+/// Return the type of the mask type suitable for masking the provided
+/// vector type. This is simply an i1 element type vector of the same
+/// (possibly scalable) length.
+static LLT getMaskTypeFor(LLT VecTy) {
+ assert(VecTy.isVector());
+ ElementCount EC = VecTy.getElementCount();
+ return LLT::vector(EC, LLT::scalar(1));
+}
+
+/// Creates an all ones mask suitable for masking a vector of type VecTy with
+/// vector length VL.
+static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL,
+ MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI) {
+ LLT MaskTy = getMaskTypeFor(VecTy);
+ return MIB.buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
+}
+
+/// Gets the two common "VL" operands: an all-ones mask and the vector length.
+/// VecTy is a scalable vector type.
+static std::pair<MachineInstrBuilder, Register>
+buildDefaultVLOps(const DstOp &Dst, MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI) {
+ LLT VecTy = Dst.getLLTTy(MRI);
+ assert(VecTy.isScalableVector() && "Expecting scalable container type");
+ Register VL(RISCV::X0);
+ MachineInstrBuilder Mask = buildAllOnesMask(VecTy, VL, MIB, MRI);
+ return {Mask, VL};
+}
+
+static MachineInstrBuilder
+buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo,
+ Register Hi, Register VL, MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI) {
+ // TODO: If the Hi bits of the splat are undefined, then it's fine to just
+ // splat Lo even if it might be sign extended. I don't think we have
+ // introduced a case where we're build a s64 where the upper bits are undef
+ // yet.
+
+ // Fall back to a stack store and stride x0 vector load.
+ // TODO: need to lower G_SPLAT_VECTOR_SPLIT_I64. This is done in
+ // preprocessDAG in SDAG.
+ return MIB.buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
+ {Passthru, Lo, Hi, VL});
+}
+
+static MachineInstrBuilder
+buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru,
+ const SrcOp &Scalar, Register VL,
+ MachineIRBuilder &MIB, MachineRegisterInfo &MRI) {
+ assert(Scalar.getLLTTy(MRI) == LLT::scalar(64) && "Unexpected VecTy!");
+ auto Unmerge = MIB.buildUnmerge(LLT::scalar(32), Scalar);
+ return buildSplatPartsS64WithVL(Dst, Passthru, Unmerge.getReg(0),
+ Unmerge.getReg(1), VL, MIB, MRI);
+}
+
+// Lower splats of s1 types to G_ICMP. For each mask vector type, we have a
+// legal equivalently-sized i8 type, so we can use that as a go-between.
+// Splats of s1 types that have constant value can be legalized as VMSET_VL or
+// VMCLR_VL.
+bool RISCVLegalizerInfo::legalizeSplatVector(MachineInstr &MI,
+ MachineIRBuilder &MIB) const {
+ assert(MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
+
+ MachineRegisterInfo &MRI = *MIB.getMRI();
+
+ Register Dst = MI.getOperand(0).getReg();
+ Register SplatVal = MI.getOperand(1).getReg();
+
+ LLT VecTy = MRI.getType(Dst);
+ LLT XLenTy(STI.getXLenVT());
+
+ // Handle case of s64 element vectors on rv32
+ if (XLenTy.getSizeInBits() == 32 &&
+ VecTy.getElementType().getSizeInBits() == 64) {
+ auto [_, VL] = buildDefaultVLOps(Dst, MIB, MRI);
+ buildSplatSplitS64WithVL(Dst, MIB.buildUndef(VecTy), SplatVal, VL, MIB,
+ MRI);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // All-zeros or all-ones splats are handled specially.
+ MachineInstr &SplatValMI = *MRI.getVRegDef(SplatVal);
+ if (isAllOnesOrAllOnesSplat(SplatValMI, MRI)) {
+ auto VL = buildDefaultVLOps(VecTy, MIB, MRI).second;
+ MIB.buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
+ MI.eraseFromParent();
+ return true;
+ }
+ if (isNullOrNullSplat(SplatValMI, MRI)) {
+ auto VL = buildDefaultVLOps(VecTy, MIB, MRI).second;
+ MIB.buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
+ MI.eraseFromParent();
+ return true;
+ }
+ // Handle non-constant mask splat (i.e. not sure if it's all zeros or all
+ // ones) by promoting it to an s8 splat.
+ LLT InterEltTy = LLT::scalar(8);
+ LLT InterTy = VecTy.changeElementType(InterEltTy);
+ auto ZExtSplatVal = MIB.buildZExt(InterEltTy, SplatVal);
+ auto And =
+ MIB.buildAnd(InterEltTy, ZExtSplatVal, MIB.buildConstant(InterEltTy, 1));
+ auto LHS = MIB.buildSplatVector(InterTy, And);
+ auto ZeroSplat =
+ MIB.buildSplatVector(InterTy, MIB.buildConstant(InterEltTy, 0));
+ MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, LHS, ZeroSplat);
MI.eraseFromParent();
return true;
}
@@ -640,6 +804,12 @@ bool RISCVLegalizerInfo::legalizeCustom(
return legalizeVAStart(MI, MIRBuilder);
case TargetOpcode::G_VSCALE:
return legalizeVScale(MI, MIRBuilder);
+ case TargetOpcode::G_ZEXT:
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_ANYEXT:
+ return legalizeExt(MI, MIRBuilder);
+ case TargetOpcode::G_SPLAT_VECTOR:
+ return legalizeSplatVector(MI, MIRBuilder);
}
llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index e2a98c8d2c73..5bb1e7a72827 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -43,6 +43,8 @@ private:
bool legalizeVAStart(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
+ bool legalizeExt(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
+ bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
};
} // end namespace llvm
#endif
diff --git a/llvm/lib/Target/RISCV/RISCVInstrGISel.td b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
index 54e22d625781..ba40662c49c1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
@@ -32,3 +32,28 @@ def G_READ_VLENB : RISCVGenericInstruction {
let hasSideEffects = false;
}
def : GINodeEquiv<G_READ_VLENB, riscv_read_vlenb>;
+
+// Pseudo equivalent to a RISCVISD::VMCLR_VL
+def G_VMCLR_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$vl);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMCLR_VL, riscv_vmclr_vl>;
+
+// Pseudo equivalent to a RISCVISD::VMSET_VL
+def G_VMSET_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$vl);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMSET_VL, riscv_vmset_vl>;
+
+// Pseudo equivalent to a RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL. There is no
+// record to mark as equivalent to using GINodeEquiv because it gets lowered
+// before instruction selection.
+def G_SPLAT_VECTOR_SPLIT_I64_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$passthru, type1:$hi, type1:$lo, type2:$vl);
+ let hasSideEffects = false;
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir
new file mode 100644
index 000000000000..3a2d40fe1931
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir
@@ -0,0 +1,1589 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV32 %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV64 %s
+
+# Extend from s1 element vectors
+---
+name: anyext_nxv1i8_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv1i8_nxv1i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv1i8_nxv1i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v0
+ %0:_(<vscale x 1 x s8>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i16_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv1i16_nxv1i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv1i16_nxv1i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v0
+ %0:_(<vscale x 1 x s16>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i32_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv1i32_nxv1i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv1i32_nxv1i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v0
+ %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i64_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv1i64_nxv1i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv1i64_nxv1i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v0
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i8_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv2i8_nxv2i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv2i8_nxv2i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v0
+ %0:_(<vscale x 2 x s8>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i16_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv2i16_nxv2i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv2i16_nxv2i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v0
+ %0:_(<vscale x 2 x s16>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i32_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv2i32_nxv2i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv2i32_nxv2i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v0
+ %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv2i64_nxv2i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: anyext_nxv2i64_nxv2i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = COPY $v0
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i8_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv4i8_nxv4i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv4i8_nxv4i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = COPY $v0
+ %0:_(<vscale x 4 x s8>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv4i16_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv4i16_nxv4i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv4i16_nxv4i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = COPY $v0
+ %0:_(<vscale x 4 x s16>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv4i32_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv4i32_nxv4i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: anyext_nxv4i32_nxv4i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = COPY $v0
+ %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv4i64_nxv4i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: anyext_nxv4i64_nxv4i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = COPY $v0
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i8_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv8i8_nxv8i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv8i8_nxv8i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = COPY $v0
+ %0:_(<vscale x 8 x s8>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv8i16_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv8i16_nxv8i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: anyext_nxv8i16_nxv8i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = COPY $v0
+ %0:_(<vscale x 8 x s16>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv8i32_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv8i32_nxv8i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: anyext_nxv8i32_nxv8i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = COPY $v0
+ %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv8i64_nxv8i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: anyext_nxv8i64_nxv8i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = COPY $v0
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv16i8_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv16i8_nxv16i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: anyext_nxv16i8_nxv16i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = COPY $v0
+ %0:_(<vscale x 16 x s8>) = G_ANYEXT %1(<vscale x 16 x s1>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv16i16_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv16i16_nxv16i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: anyext_nxv16i16_nxv16i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = COPY $v0
+ %0:_(<vscale x 16 x s16>) = G_ANYEXT %1(<vscale x 16 x s1>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv16i32_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv16i32_nxv16i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: anyext_nxv16i32_nxv16i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = COPY $v0
+ %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s1>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv32i8_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv32i8_nxv32i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: anyext_nxv32i8_nxv32i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = COPY $v0
+ %0:_(<vscale x 32 x s8>) = G_ANYEXT %1(<vscale x 32 x s1>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv32i16_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv32i16_nxv32i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: anyext_nxv32i16_nxv32i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = COPY $v0
+ %0:_(<vscale x 32 x s16>) = G_ANYEXT %1(<vscale x 32 x s1>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv64i8_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: anyext_nxv64i8_nxv64i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: anyext_nxv64i8_nxv64i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = COPY $v0
+ %0:_(<vscale x 64 x s8>) = G_ANYEXT %1(<vscale x 64 x s1>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s8 element vectors
+---
+name: anyext_nxv1i16_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv1i16_nxv1i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv1i16_nxv1i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ %0:_(<vscale x 1 x s16>) = G_ANYEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i32_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv1i32_nxv1i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv1i32_nxv1i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i64_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv1i64_nxv1i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv1i64_nxv1i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i16_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv2i16_nxv2i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv2i16_nxv2i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ %0:_(<vscale x 2 x s16>) = G_ANYEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i32_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv2i32_nxv2i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv2i32_nxv2i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv2i64_nxv2i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: anyext_nxv2i64_nxv2i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s8>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i16_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv4i16_nxv4i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv4i16_nxv4i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ %0:_(<vscale x 4 x s16>) = G_ANYEXT %1(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv4i32_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv4i32_nxv4i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: anyext_nxv4i32_nxv4i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s8>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv4i64_nxv4i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: anyext_nxv4i64_nxv4i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s8>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i16_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv8i16_nxv8i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: anyext_nxv8i16_nxv8i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ %0:_(<vscale x 8 x s16>) = G_ANYEXT %1(<vscale x 8 x s8>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv8i32_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv8i32_nxv8i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: anyext_nxv8i32_nxv8i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s8>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv8i64_nxv8i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: anyext_nxv8i64_nxv8i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s8>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv16i16_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv16i16_nxv16i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: anyext_nxv16i16_nxv16i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ %0:_(<vscale x 16 x s16>) = G_ANYEXT %1(<vscale x 16 x s8>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv16i32_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv16i32_nxv16i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m4
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: anyext_nxv16i32_nxv16i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m4
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s8>) = COPY $v8m4
+ %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s8>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv32i16_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv32i16_nxv32i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ANYEXT [[COPY]](<vscale x 32 x s8>)
+ ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: anyext_nxv32i16_nxv32i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ANYEXT [[COPY]](<vscale x 32 x s8>)
+ ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s8>) = COPY $v8m4
+ %0:_(<vscale x 32 x s16>) = G_ANYEXT %1(<vscale x 32 x s8>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s16 element vectors
+---
+name: anyext_nxv1i32_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv1i32_nxv1i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv1i32_nxv1i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v8
+ %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i64_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv1i64_nxv1i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv1i64_nxv1i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v8
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i32_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv2i32_nxv2i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv2i32_nxv2i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v8
+ %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv2i64_nxv2i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: anyext_nxv2i64_nxv2i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s16>) = COPY $v8
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s16>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i32_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv4i32_nxv4i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: anyext_nxv4i32_nxv4i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s16>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv4i64_nxv4i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: anyext_nxv4i64_nxv4i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s16>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i32_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv8i32_nxv8i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: anyext_nxv8i32_nxv8i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s16>) = COPY $v8m2
+ %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s16>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv8i64_nxv8i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: anyext_nxv8i64_nxv8i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s16>) = COPY $v8m2
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s16>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv16i32_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv16i32_nxv16i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s16>)
+ ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: anyext_nxv16i32_nxv16i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s16>)
+ ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s16>) = COPY $v8m4
+ %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s16>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s32 element vectors
+---
+name: anyext_nxv1i64_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv1i64_nxv1i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s32>)
+ ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: anyext_nxv1i64_nxv1i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s32>)
+ ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v8
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv2i64_nxv2i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s32>)
+ ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: anyext_nxv2i64_nxv2i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s32>)
+ ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s32>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv4i64_nxv4i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s32>)
+ ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: anyext_nxv4i64_nxv4i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s32>)
+ ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s32>) = COPY $v8m2
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s32>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: anyext_nxv8i64_nxv8i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s32>)
+ ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: anyext_nxv8i64_nxv8i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s32>)
+ ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s32>) = COPY $v8m4
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s32>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir
new file mode 100644
index 000000000000..d1df9540daaa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir
@@ -0,0 +1,810 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV32 %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV64 %s
+
+---
+name: icmp_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv1i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+ ; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 1 x s8>), [[SELECT1]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv1i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+ ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
+ ; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 1 x s8>), [[SELECT1]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv2i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+ ; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 2 x s8>), [[SELECT1]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv2i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+ ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
+ ; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 2 x s8>), [[SELECT1]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv4i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+ ; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 4 x s8>), [[SELECT1]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv4i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+ ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
+ ; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 4 x s8>), [[SELECT1]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv8i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+ ; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 8 x s8>), [[SELECT1]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv8i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+ ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
+ ; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 8 x s8>), [[SELECT1]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv16i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+ ; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 16 x s8>), [[SELECT1]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv16i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+ ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
+ ; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 16 x s8>), [[SELECT1]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv32i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv32i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+ ; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 32 x s8>), [[SELECT1]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv32i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+ ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
+ ; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 32 x s8>), [[SELECT1]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv64i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv64i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+ ; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 64 x s8>), [[SELECT1]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv64i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+ ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
+ ; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 64 x s8>), [[SELECT1]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv1i8
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s8>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv1i8
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s8>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv2i8
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s8>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv2i8
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s8>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv4i8
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s8>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv4i8
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s8>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv8i8
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s8>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv8i8
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s8>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv16i8
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s8>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv16i8
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s8>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv32i8
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s8>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv32i8
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s8>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv64i8
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s8>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv64i8
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s8>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv1i16
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s16>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv1i16
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s16>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv2i16
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s16>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv2i16
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s16>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv4i16
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s16>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv4i16
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s16>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv8i16
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s16>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv8i16
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s16>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv16i16
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s16>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv16i16
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s16>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv32i16
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s16>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv32i16
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s16>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv1i32
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s32>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv1i32
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s32>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv2i32
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s32>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv2i32
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s32>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv4i32
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s32>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv4i32
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s32>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv8i32
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s32>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv8i32
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s32>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv16i32
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s32>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv16i32
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s32>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv1i64
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s64>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv1i64
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s64>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv2i64
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s64>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv2i64
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s64>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv4i64
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s64>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv4i64
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s64>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: icmp_nxv8i64
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s64>), [[DEF]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: icmp_nxv8i64
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s64>), [[DEF]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir
new file mode 100644
index 000000000000..1571daf16b2d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir
@@ -0,0 +1,1589 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV32 %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV64 %s
+
+# Extend from s1 element vectors
+---
+name: sext_nxv1i8_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv1i8_nxv1i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv1i8_nxv1i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v0
+ %0:_(<vscale x 1 x s8>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i16_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv1i16_nxv1i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv1i16_nxv1i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v0
+ %0:_(<vscale x 1 x s16>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i32_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv1i32_nxv1i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv1i32_nxv1i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v0
+ %0:_(<vscale x 1 x s32>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i64_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv1i64_nxv1i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv1i64_nxv1i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v0
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i8_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv2i8_nxv2i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv2i8_nxv2i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v0
+ %0:_(<vscale x 2 x s8>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i16_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv2i16_nxv2i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv2i16_nxv2i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v0
+ %0:_(<vscale x 2 x s16>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i32_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv2i32_nxv2i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv2i32_nxv2i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v0
+ %0:_(<vscale x 2 x s32>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv2i64_nxv2i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: sext_nxv2i64_nxv2i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = COPY $v0
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i8_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv4i8_nxv4i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv4i8_nxv4i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = COPY $v0
+ %0:_(<vscale x 4 x s8>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv4i16_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv4i16_nxv4i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv4i16_nxv4i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = COPY $v0
+ %0:_(<vscale x 4 x s16>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv4i32_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv4i32_nxv4i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: sext_nxv4i32_nxv4i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = COPY $v0
+ %0:_(<vscale x 4 x s32>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv4i64_nxv4i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: sext_nxv4i64_nxv4i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = COPY $v0
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i8_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv8i8_nxv8i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv8i8_nxv8i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = COPY $v0
+ %0:_(<vscale x 8 x s8>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv8i16_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv8i16_nxv8i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: sext_nxv8i16_nxv8i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = COPY $v0
+ %0:_(<vscale x 8 x s16>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv8i32_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv8i32_nxv8i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: sext_nxv8i32_nxv8i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = COPY $v0
+ %0:_(<vscale x 8 x s32>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv8i64_nxv8i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: sext_nxv8i64_nxv8i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = COPY $v0
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv16i8_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv16i8_nxv16i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: sext_nxv16i8_nxv16i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = COPY $v0
+ %0:_(<vscale x 16 x s8>) = G_SEXT %1(<vscale x 16 x s1>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv16i16_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv16i16_nxv16i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: sext_nxv16i16_nxv16i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = COPY $v0
+ %0:_(<vscale x 16 x s16>) = G_SEXT %1(<vscale x 16 x s1>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv16i32_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv16i32_nxv16i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: sext_nxv16i32_nxv16i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = COPY $v0
+ %0:_(<vscale x 16 x s32>) = G_SEXT %1(<vscale x 16 x s1>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv32i8_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv32i8_nxv32i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: sext_nxv32i8_nxv32i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = COPY $v0
+ %0:_(<vscale x 32 x s8>) = G_SEXT %1(<vscale x 32 x s1>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv32i16_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv32i16_nxv32i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: sext_nxv32i16_nxv32i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = COPY $v0
+ %0:_(<vscale x 32 x s16>) = G_SEXT %1(<vscale x 32 x s1>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv64i8_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v0
+ ; RV32-LABEL: name: sext_nxv64i8_nxv64i1
+ ; RV32: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: sext_nxv64i8_nxv64i1
+ ; RV64: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = COPY $v0
+ %0:_(<vscale x 64 x s8>) = G_SEXT %1(<vscale x 64 x s1>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s8 element vectors
+---
+name: sext_nxv1i16_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv1i16_nxv1i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv1i16_nxv1i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ %0:_(<vscale x 1 x s16>) = G_SEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i32_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv1i32_nxv1i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv1i32_nxv1i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ %0:_(<vscale x 1 x s32>) = G_SEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i64_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv1i64_nxv1i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv1i64_nxv1i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i16_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv2i16_nxv2i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv2i16_nxv2i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ %0:_(<vscale x 2 x s16>) = G_SEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i32_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv2i32_nxv2i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv2i32_nxv2i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ %0:_(<vscale x 2 x s32>) = G_SEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv2i64_nxv2i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV32-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: sext_nxv2i64_nxv2i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV64-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s8>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i16_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv4i16_nxv4i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv4i16_nxv4i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ %0:_(<vscale x 4 x s16>) = G_SEXT %1(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv4i32_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv4i32_nxv4i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV32-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: sext_nxv4i32_nxv4i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV64-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ %0:_(<vscale x 4 x s32>) = G_SEXT %1(<vscale x 4 x s8>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv4i64_nxv4i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV32-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: sext_nxv4i64_nxv4i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV64-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s8>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i16_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv8i16_nxv8i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV32-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: sext_nxv8i16_nxv8i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV64-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ %0:_(<vscale x 8 x s16>) = G_SEXT %1(<vscale x 8 x s8>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv8i32_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv8i32_nxv8i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV32-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: sext_nxv8i32_nxv8i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV64-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ %0:_(<vscale x 8 x s32>) = G_SEXT %1(<vscale x 8 x s8>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv8i64_nxv8i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV32-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: sext_nxv8i64_nxv8i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV64-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s8>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv16i16_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv16i16_nxv16i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV32-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: sext_nxv16i16_nxv16i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV64-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ %0:_(<vscale x 16 x s16>) = G_SEXT %1(<vscale x 16 x s8>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv16i32_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv16i32_nxv16i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV32-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: sext_nxv16i32_nxv16i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV64-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ %0:_(<vscale x 16 x s32>) = G_SEXT %1(<vscale x 16 x s8>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv32i16_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv32i16_nxv32i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SEXT [[COPY]](<vscale x 32 x s8>)
+ ; RV32-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: sext_nxv32i16_nxv32i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SEXT [[COPY]](<vscale x 32 x s8>)
+ ; RV64-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s8>) = COPY $v8m4
+ %0:_(<vscale x 32 x s16>) = G_SEXT %1(<vscale x 32 x s8>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s16 element vectors
+---
+name: sext_nxv1i32_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv1i32_nxv1i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV32-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv1i32_nxv1i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV64-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v8
+ %0:_(<vscale x 1 x s32>) = G_SEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i64_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv1i64_nxv1i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV32-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv1i64_nxv1i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV64-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v8
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i32_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv2i32_nxv2i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV32-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv2i32_nxv2i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV64-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v8
+ %0:_(<vscale x 2 x s32>) = G_SEXT %1(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv2i64_nxv2i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV32-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: sext_nxv2i64_nxv2i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV64-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s16>) = COPY $v8
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s16>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i32_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv4i32_nxv4i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV32-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: sext_nxv4i32_nxv4i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV64-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ %0:_(<vscale x 4 x s32>) = G_SEXT %1(<vscale x 4 x s16>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv4i64_nxv4i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV32-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: sext_nxv4i64_nxv4i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV64-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s16>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i32_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv8i32_nxv8i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV32-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: sext_nxv8i32_nxv8i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV64-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s16>) = COPY $v8m2
+ %0:_(<vscale x 8 x s32>) = G_SEXT %1(<vscale x 8 x s16>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv8i64_nxv8i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV32-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: sext_nxv8i64_nxv8i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV64-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s16>) = COPY $v8m2
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s16>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv16i32_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv16i32_nxv16i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SEXT [[COPY]](<vscale x 16 x s16>)
+ ; RV32-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: sext_nxv16i32_nxv16i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SEXT [[COPY]](<vscale x 16 x s16>)
+ ; RV64-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s16>) = COPY $v8m4
+ %0:_(<vscale x 16 x s32>) = G_SEXT %1(<vscale x 16 x s16>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s32 element vectors
+---
+name: sext_nxv1i64_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv1i64_nxv1i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s32>)
+ ; RV32-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: sext_nxv1i64_nxv1i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SEXT [[COPY]](<vscale x 1 x s32>)
+ ; RV64-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v8
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv2i64_nxv2i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s32>)
+ ; RV32-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: sext_nxv2i64_nxv2i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SEXT [[COPY]](<vscale x 2 x s32>)
+ ; RV64-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s32>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv4i64_nxv4i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s32>)
+ ; RV32-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: sext_nxv4i64_nxv4i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SEXT [[COPY]](<vscale x 4 x s32>)
+ ; RV64-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s32>) = COPY $v8m2
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s32>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: sext_nxv8i64_nxv8i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV32-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s32>)
+ ; RV32-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: sext_nxv8i64_nxv8i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV64-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SEXT [[COPY]](<vscale x 8 x s32>)
+ ; RV64-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s32>) = COPY $v8m4
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s32>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir
new file mode 100644
index 000000000000..109536a07d5e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir
@@ -0,0 +1,694 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: splatvector_nxv1i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv1i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv1i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv1i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 1 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv2i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv2i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv2i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv2i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv4i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv4i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv4i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv4i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv8i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv8i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv8i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv8i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 8 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv16i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv16i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv16i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv16i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 16 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv32i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv32i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv32i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv32i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv32i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv32i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 32 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv64i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv64i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv64i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv64i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv64i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv64i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 64 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+...
+
+---
+name: splatvector_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+
+---
+name: splatvector_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv16i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
new file mode 100644
index 000000000000..7bf5f83dd914
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
@@ -0,0 +1,817 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: splatvector_nxv1i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv1i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv1i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv1i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 1 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv2i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv2i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv2i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv2i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv4i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv4i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv4i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv4i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv8i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv8i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv8i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv8i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 8 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv16i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv16i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv16i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv16i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 16 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv32i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv32i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv32i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv32i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv32i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv32i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 32 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv64i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv64i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv64i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv64i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv64i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv64i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 64 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+...
+
+---
+name: splatvector_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+
+---
+name: splatvector_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv16i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: splatvector_nxv1i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i64
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i64
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv4i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i64
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv8i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i64
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-s64-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-s64-rv32.mir
new file mode 100644
index 000000000000..806c9b97c91a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-s64-rv32.mir
@@ -0,0 +1,116 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=HasF64 %s
+# RUN: llc -mtriple=riscv32 -mattr=+Zve64x -run-pass=legalizer %s -o - | FileCheck --check-prefix=NoF64 %s
+
+---
+name: splatvector_nxv1i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; HasF64-LABEL: name: splatvector_nxv1i64
+ ; HasF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; HasF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; HasF64-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[DEF]](s32), [[DEF1]](s32)
+ ; HasF64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; HasF64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+ ; HasF64-NEXT: PseudoRET implicit $v8
+ ;
+ ; NoF64-LABEL: name: splatvector_nxv1i64
+ ; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; NoF64-NEXT: [[DEF2:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF2]], [[DEF]](s32), [[DEF1]], $x0
+ ; NoF64-NEXT: $v8 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 1 x s64>)
+ ; NoF64-NEXT: PseudoRET implicit $v8
+ %0:_(s64) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %0(s64)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; HasF64-LABEL: name: splatvector_nxv2i64
+ ; HasF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; HasF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; HasF64-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[DEF]](s32), [[DEF1]](s32)
+ ; HasF64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; HasF64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+ ; HasF64-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; NoF64-LABEL: name: splatvector_nxv2i64
+ ; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; NoF64-NEXT: [[DEF2:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF2]], [[DEF]](s32), [[DEF1]], $x0
+ ; NoF64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 2 x s64>)
+ ; NoF64-NEXT: PseudoRET implicit $v8m2
+ %0:_(s64) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %0(s64)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv4i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; HasF64-LABEL: name: splatvector_nxv4i64
+ ; HasF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; HasF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; HasF64-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[DEF]](s32), [[DEF1]](s32)
+ ; HasF64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; HasF64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+ ; HasF64-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; NoF64-LABEL: name: splatvector_nxv4i64
+ ; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; NoF64-NEXT: [[DEF2:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF2]], [[DEF]](s32), [[DEF1]], $x0
+ ; NoF64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 4 x s64>)
+ ; NoF64-NEXT: PseudoRET implicit $v8m4
+ %0:_(s64) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %0(s64)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv8i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; HasF64-LABEL: name: splatvector_nxv8i64
+ ; HasF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; HasF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; HasF64-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[DEF]](s32), [[DEF1]](s32)
+ ; HasF64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; HasF64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+ ; HasF64-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; NoF64-LABEL: name: splatvector_nxv8i64
+ ; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; NoF64-NEXT: [[DEF2:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF2]], [[DEF]](s32), [[DEF1]], $x0
+ ; NoF64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 8 x s64>)
+ ; NoF64-NEXT: PseudoRET implicit $v8m8
+ %0:_(s64) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %0(s64)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir
new file mode 100644
index 000000000000..fe4ddfa847dd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir
@@ -0,0 +1,1589 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV32 %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV64 %s
+
+# Extend from s1 element vectors
+---
+name: zext_nxv1i8_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv1i8_nxv1i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv1i8_nxv1i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v8
+ %0:_(<vscale x 1 x s8>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i16_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv1i16_nxv1i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv1i16_nxv1i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v8
+ %0:_(<vscale x 1 x s16>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i32_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv1i32_nxv1i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv1i32_nxv1i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v8
+ %0:_(<vscale x 1 x s32>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i64_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv1i64_nxv1i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv1i64_nxv1i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = COPY $v8
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i8_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv2i8_nxv2i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv2i8_nxv2i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v8
+ %0:_(<vscale x 2 x s8>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i16_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv2i16_nxv2i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv2i16_nxv2i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v8
+ %0:_(<vscale x 2 x s16>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i32_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv2i32_nxv2i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv2i32_nxv2i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v8
+ %0:_(<vscale x 2 x s32>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv2i64_nxv2i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: zext_nxv2i64_nxv2i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = COPY $v8
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i8_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv4i8_nxv4i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv4i8_nxv4i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = COPY $v8
+ %0:_(<vscale x 4 x s8>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv4i16_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv4i16_nxv4i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv4i16_nxv4i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = COPY $v8
+ %0:_(<vscale x 4 x s16>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv4i32_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv4i32_nxv4i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: zext_nxv4i32_nxv4i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = COPY $v8
+ %0:_(<vscale x 4 x s32>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv4i64_nxv4i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: zext_nxv4i64_nxv4i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = COPY $v8
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i8_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv8i8_nxv8i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv8i8_nxv8i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = COPY $v8
+ %0:_(<vscale x 8 x s8>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv8i16_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv8i16_nxv8i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: zext_nxv8i16_nxv8i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = COPY $v8
+ %0:_(<vscale x 8 x s16>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv8i32_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv8i32_nxv8i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: zext_nxv8i32_nxv8i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = COPY $v8
+ %0:_(<vscale x 8 x s32>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv8i64_nxv8i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32)
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV1]](s64)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: zext_nxv8i64_nxv8i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = COPY $v8
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv16i8_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv16i8_nxv16i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: zext_nxv16i8_nxv16i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = COPY $v8
+ %0:_(<vscale x 16 x s8>) = G_ZEXT %1(<vscale x 16 x s1>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv16i16_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv16i16_nxv16i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: zext_nxv16i16_nxv16i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = COPY $v8
+ %0:_(<vscale x 16 x s16>) = G_ZEXT %1(<vscale x 16 x s1>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv16i32_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv16i32_nxv16i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: zext_nxv16i32_nxv16i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = COPY $v8
+ %0:_(<vscale x 16 x s32>) = G_ZEXT %1(<vscale x 16 x s1>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv32i8_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv32i8_nxv32i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: zext_nxv32i8_nxv32i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = COPY $v8
+ %0:_(<vscale x 32 x s8>) = G_ZEXT %1(<vscale x 32 x s1>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv32i16_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv32i16_nxv32i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: zext_nxv32i16_nxv32i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = COPY $v8
+ %0:_(<vscale x 32 x s16>) = G_ZEXT %1(<vscale x 32 x s1>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv64i8_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv64i8_nxv64i1
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: zext_nxv64i8_nxv64i1
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = COPY $v8
+ %0:_(<vscale x 64 x s8>) = G_ZEXT %1(<vscale x 64 x s1>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s8 element vectors
+---
+name: zext_nxv1i16_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv1i16_nxv1i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv1i16_nxv1i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ %0:_(<vscale x 1 x s16>) = G_ZEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i32_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv1i32_nxv1i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv1i32_nxv1i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ %0:_(<vscale x 1 x s32>) = G_ZEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i64_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv1i64_nxv1i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv1i64_nxv1i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i16_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv2i16_nxv2i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv2i16_nxv2i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ %0:_(<vscale x 2 x s16>) = G_ZEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i32_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv2i32_nxv2i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv2i32_nxv2i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ %0:_(<vscale x 2 x s32>) = G_ZEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv2i64_nxv2i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV32-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: zext_nxv2i64_nxv2i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s8>)
+ ; RV64-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s8>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i16_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv4i16_nxv4i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv4i16_nxv4i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ %0:_(<vscale x 4 x s16>) = G_ZEXT %1(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv4i32_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv4i32_nxv4i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV32-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: zext_nxv4i32_nxv4i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV64-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ %0:_(<vscale x 4 x s32>) = G_ZEXT %1(<vscale x 4 x s8>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv4i64_nxv4i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV32-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: zext_nxv4i64_nxv4i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s8>)
+ ; RV64-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s8>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i16_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv8i16_nxv8i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV32-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: zext_nxv8i16_nxv8i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV64-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ %0:_(<vscale x 8 x s16>) = G_ZEXT %1(<vscale x 8 x s8>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv8i32_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv8i32_nxv8i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV32-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: zext_nxv8i32_nxv8i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV64-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ %0:_(<vscale x 8 x s32>) = G_ZEXT %1(<vscale x 8 x s8>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv8i64_nxv8i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV32-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: zext_nxv8i64_nxv8i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s8>)
+ ; RV64-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s8>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv16i16_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv16i16_nxv16i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ZEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV32-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: zext_nxv16i16_nxv16i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ZEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV64-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ %0:_(<vscale x 16 x s16>) = G_ZEXT %1(<vscale x 16 x s8>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv16i32_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv16i32_nxv16i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ZEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV32-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: zext_nxv16i32_nxv16i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ZEXT [[COPY]](<vscale x 16 x s8>)
+ ; RV64-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ %0:_(<vscale x 16 x s32>) = G_ZEXT %1(<vscale x 16 x s8>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv32i16_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv32i16_nxv32i8
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ZEXT [[COPY]](<vscale x 32 x s8>)
+ ; RV32-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: zext_nxv32i16_nxv32i8
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ZEXT [[COPY]](<vscale x 32 x s8>)
+ ; RV64-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s8>) = COPY $v8m4
+ %0:_(<vscale x 32 x s16>) = G_ZEXT %1(<vscale x 32 x s8>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s16 element vectors
+---
+name: zext_nxv1i32_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv1i32_nxv1i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ZEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV32-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv1i32_nxv1i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ZEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV64-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v8
+ %0:_(<vscale x 1 x s32>) = G_ZEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i64_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv1i64_nxv1i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV32-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv1i64_nxv1i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s16>)
+ ; RV64-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = COPY $v8
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i32_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv2i32_nxv2i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ZEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV32-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv2i32_nxv2i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ZEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV64-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s16>) = COPY $v8
+ %0:_(<vscale x 2 x s32>) = G_ZEXT %1(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv2i64_nxv2i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV32-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: zext_nxv2i64_nxv2i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s16>)
+ ; RV64-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s16>) = COPY $v8
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s16>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i32_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv4i32_nxv4i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ZEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV32-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: zext_nxv4i32_nxv4i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ZEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV64-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ %0:_(<vscale x 4 x s32>) = G_ZEXT %1(<vscale x 4 x s16>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv4i64_nxv4i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV32-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: zext_nxv4i64_nxv4i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s16>)
+ ; RV64-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s16>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i32_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv8i32_nxv8i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ZEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV32-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: zext_nxv8i32_nxv8i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ZEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV64-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s16>) = COPY $v8m2
+ %0:_(<vscale x 8 x s32>) = G_ZEXT %1(<vscale x 8 x s16>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv8i64_nxv8i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m4
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV32-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: zext_nxv8i64_nxv8i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m4
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s16>)
+ ; RV64-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s16>) = COPY $v8m4
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s16>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv16i32_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv16i32_nxv16i16
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ZEXT [[COPY]](<vscale x 16 x s16>)
+ ; RV32-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: zext_nxv16i32_nxv16i16
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ZEXT [[COPY]](<vscale x 16 x s16>)
+ ; RV64-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s16>) = COPY $v8m4
+ %0:_(<vscale x 16 x s32>) = G_ZEXT %1(<vscale x 16 x s16>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s32 element vectors
+---
+name: zext_nxv1i64_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv1i64_nxv1i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s32>)
+ ; RV32-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: zext_nxv1i64_nxv1i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ZEXT [[COPY]](<vscale x 1 x s32>)
+ ; RV64-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s32>) = COPY $v8
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv2i64_nxv2i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s32>)
+ ; RV32-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: zext_nxv2i64_nxv2i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ZEXT [[COPY]](<vscale x 2 x s32>)
+ ; RV64-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s32>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv4i64_nxv4i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s32>)
+ ; RV32-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: zext_nxv4i64_nxv4i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ZEXT [[COPY]](<vscale x 4 x s32>)
+ ; RV64-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s32>) = COPY $v8m2
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s32>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; RV32-LABEL: name: zext_nxv8i64_nxv8i32
+ ; RV32: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV32-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s32>)
+ ; RV32-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: zext_nxv8i64_nxv8i32
+ ; RV64: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ZEXT [[COPY]](<vscale x 8 x s32>)
+ ; RV64-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s32>) = COPY $v8m4
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s32>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/MachineVerifier/test_g_fcmp.mir b/llvm/test/MachineVerifier/test_g_fcmp.mir
index 9a73569cb4ab..17be746a63ff 100644
--- a/llvm/test/MachineVerifier/test_g_fcmp.mir
+++ b/llvm/test/MachineVerifier/test_g_fcmp.mir
@@ -24,17 +24,22 @@ body: |
%4:_(<2 x s32>) = G_IMPLICIT_DEF
%5:_(s1) = G_FCMP floatpred(oeq), %3, %4
- ; mismatched element count
+ ; mismatched fixed element count
; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
%6:_(<2 x s32>) = G_IMPLICIT_DEF
%7:_(<2 x s32>) = G_IMPLICIT_DEF
%8:_(<4 x s1>) = G_FCMP floatpred(oeq), %6, %7
+ ; mismatched scalable element count
+ ; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
+ %9:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %10:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %11:_(<vscale x 4 x s1>) = G_FCMP floatpred(oeq), %9, %10
; mismatched scalar element type
; CHECK: *** Bad machine code: Type mismatch in generic instruction ***
- %9:_(s32) = G_FCONSTANT float 0.0
- %10:_(s64) = G_FCONSTANT float 1.0
- %11:_(s1) = G_FCMP floatpred(oeq), %9, %10
+ %12:_(s32) = G_FCONSTANT float 0.0
+ %13:_(s64) = G_FCONSTANT float 1.0
+ %14:_(s1) = G_FCMP floatpred(oeq), %12, %13
...
diff --git a/llvm/test/MachineVerifier/test_g_icmp.mir b/llvm/test/MachineVerifier/test_g_icmp.mir
index 7c64e25b2252..74e3d34ebb57 100644
--- a/llvm/test/MachineVerifier/test_g_icmp.mir
+++ b/llvm/test/MachineVerifier/test_g_icmp.mir
@@ -24,17 +24,22 @@ body: |
%4:_(<2 x s32>) = G_IMPLICIT_DEF
%5:_(s1) = G_ICMP intpred(eq), %3, %4
- ; mismatched element count
+ ; mismatched fixed element count
; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
%6:_(<2 x s32>) = G_IMPLICIT_DEF
%7:_(<2 x s32>) = G_IMPLICIT_DEF
%8:_(<4 x s1>) = G_ICMP intpred(eq), %6, %7
+ ; mismatched scalable element count
+ ; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
+ %9:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %10:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %11:_(<vscale x 4 x s1>) = G_ICMP intpred(eq), %9, %10
; mismatched scalar element type
; CHECK: *** Bad machine code: Type mismatch in generic instruction ***
- %9:_(s32) = G_CONSTANT i32 0
- %10:_(s64) = G_CONSTANT i32 1
- %11:_(s1) = G_ICMP intpred(eq), %9, %10
+ %12:_(s32) = G_CONSTANT i32 0
+ %13:_(s64) = G_CONSTANT i32 1
+ %14:_(s1) = G_ICMP intpred(eq), %12, %13
...