diff options
author | Michael Maitland <michaeltmaitland@gmail.com> | 2024-03-15 07:52:41 -0700 |
---|---|---|
committer | Michael Maitland <michaeltmaitland@gmail.com> | 2024-04-03 15:56:08 -0700 |
commit | 63c925ca808f216f805b76873743450456e350f2 (patch) | |
tree | 79ab17303697e5d3abdfd4f47066b890f77ae6dc | |
parent | 188ca374ee601a50b6f5f6c1cf7e7dc3998e3a62 (diff) |
[RISCV][GISEL] Instruction selection for G_ZEXT, G_SEXT, and G_ANYEXT with scalable vector type
3 files changed, 2702 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/anyext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/anyext.mir new file mode 100644 index 000000000000..eda1180b8285 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/anyext.mir @@ -0,0 +1,902 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir \ +# RUN: -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s +# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir \ +# RUN: -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s + +--- +name: anyext_nxv1i16_nxv1i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv1i16_nxv1i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: anyext_nxv1i16_nxv1i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s8>) = COPY $v8 + %1:vrb(<vscale x 1 x s16>) = G_ANYEXT %0(<vscale x 1 x s8>) + $v8 = COPY %1(<vscale x 1 x s16>) + PseudoRET implicit $v8 + +... +--- +name: anyext_nxv1i32_nxv1i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv1i32_nxv1i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: anyext_nxv1i32_nxv1i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s8>) = COPY $v8 + %1:vrb(<vscale x 1 x s32>) = G_ANYEXT %0(<vscale x 1 x s8>) + $v8 = COPY %1(<vscale x 1 x s32>) + PseudoRET implicit $v8 + +... +--- +name: anyext_nxv1i64_nxv1i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF8_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF8_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s8>) = COPY $v8 + %1:vrb(<vscale x 1 x s64>) = G_ANYEXT %0(<vscale x 1 x s8>) + $v8 = COPY %1(<vscale x 1 x s64>) + PseudoRET implicit $v8 + +... +--- +name: anyext_nxv2i16_nxv2i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv2i16_nxv2i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: anyext_nxv2i16_nxv2i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 2 x s8>) = COPY $v8 + %1:vrb(<vscale x 2 x s16>) = G_ANYEXT %0(<vscale x 2 x s8>) + $v8 = COPY %1(<vscale x 2 x s16>) + PseudoRET implicit $v8 + +... +--- +name: anyext_nxv2i32_nxv2i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv2i32_nxv2i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: anyext_nxv2i32_nxv2i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 2 x s8>) = COPY $v8 + %1:vrb(<vscale x 2 x s32>) = G_ANYEXT %0(<vscale x 2 x s8>) + $v8 = COPY %1(<vscale x 2 x s32>) + PseudoRET implicit $v8 + +... +--- +name: anyext_nxv2i64_nxv2i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF8_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF8_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 2 x s8>) = COPY $v8 + %1:vrb(<vscale x 2 x s64>) = G_ANYEXT %0(<vscale x 2 x s8>) + $v8m2 = COPY %1(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 + +... +--- +name: anyext_nxv4i16_nxv4i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv4i16_nxv4i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: anyext_nxv4i16_nxv4i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 4 x s8>) = COPY $v8 + %1:vrb(<vscale x 4 x s16>) = G_ANYEXT %0(<vscale x 4 x s8>) + $v8 = COPY %1(<vscale x 4 x s16>) + PseudoRET implicit $v8 + +... +--- +name: anyext_nxv4i32_nxv4i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv4i32_nxv4i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: anyext_nxv4i32_nxv4i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 4 x s8>) = COPY $v8 + %1:vrb(<vscale x 4 x s32>) = G_ANYEXT %0(<vscale x 4 x s8>) + $v8m2 = COPY %1(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 + +... +--- +name: anyext_nxv4i64_nxv4i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF8_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF8_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 4 x s8>) = COPY $v8 + %1:vrb(<vscale x 4 x s64>) = G_ANYEXT %0(<vscale x 4 x s8>) + $v8m4 = COPY %1(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 + +... +--- +name: anyext_nxv8i16_nxv8i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv8i16_nxv8i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: anyext_nxv8i16_nxv8i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 8 x s8>) = COPY $v8 + %1:vrb(<vscale x 8 x s16>) = G_ANYEXT %0(<vscale x 8 x s8>) + $v8m2 = COPY %1(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 + +... +--- +name: anyext_nxv8i32_nxv8i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv8i32_nxv8i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: anyext_nxv8i32_nxv8i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 8 x s8>) = COPY $v8 + %1:vrb(<vscale x 8 x s32>) = G_ANYEXT %0(<vscale x 8 x s8>) + $v8m4 = COPY %1(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 + +... +--- +name: anyext_nxv8i64_nxv8i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF8_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF8_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 8 x s8>) = COPY $v8 + %1:vrb(<vscale x 8 x s64>) = G_ANYEXT %0(<vscale x 8 x s8>) + $v8m8 = COPY %1(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 + +... +--- +name: anyext_nxv16i16_nxv16i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv16i16_nxv16i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: anyext_nxv16i16_nxv16i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 16 x s8>) = COPY $v8m2 + %1:vrb(<vscale x 16 x s16>) = G_ANYEXT %0(<vscale x 16 x s8>) + $v8m4 = COPY %1(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 + +... +--- +name: anyext_nxv16i32_nxv16i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv16i32_nxv16i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: anyext_nxv16i32_nxv16i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 16 x s8>) = COPY $v8m4 + %1:vrb(<vscale x 16 x s32>) = G_ANYEXT %0(<vscale x 16 x s8>) + $v8m8 = COPY %1(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 + +... +--- +name: anyext_nxv32i16_nxv32i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv32i16_nxv32i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: anyext_nxv32i16_nxv32i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 32 x s8>) = COPY $v8m4 + %1:vrb(<vscale x 32 x s16>) = G_ANYEXT %0(<vscale x 32 x s8>) + $v8m8 = COPY %1(<vscale x 32 x s16>) + PseudoRET implicit $v8m8 + +... +--- +name: anyext_nxv1i32_nxv1i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv1i32_nxv1i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: anyext_nxv1i32_nxv1i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s16>) = COPY $v8 + %1:vrb(<vscale x 1 x s32>) = G_ANYEXT %0(<vscale x 1 x s16>) + $v8 = COPY %1(<vscale x 1 x s32>) + PseudoRET implicit $v8 + +... +--- +name: anyext_nxv1i64_nxv1i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s16>) = COPY $v8 + %1:vrb(<vscale x 1 x s64>) = G_ANYEXT %0(<vscale x 1 x s16>) + $v8 = COPY %1(<vscale x 1 x s64>) + PseudoRET implicit $v8 + +... +--- +name: anyext_nxv2i32_nxv2i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv2i32_nxv2i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: anyext_nxv2i32_nxv2i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 2 x s16>) = COPY $v8 + %1:vrb(<vscale x 2 x s32>) = G_ANYEXT %0(<vscale x 2 x s16>) + $v8 = COPY %1(<vscale x 2 x s32>) + PseudoRET implicit $v8 + +... +--- +name: anyext_nxv2i64_nxv2i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 2 x s16>) = COPY $v8 + %1:vrb(<vscale x 2 x s64>) = G_ANYEXT %0(<vscale x 2 x s16>) + $v8m2 = COPY %1(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 + +... +--- +name: anyext_nxv4i32_nxv4i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv4i32_nxv4i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: anyext_nxv4i32_nxv4i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 4 x s16>) = COPY $v8 + %1:vrb(<vscale x 4 x s32>) = G_ANYEXT %0(<vscale x 4 x s16>) + $v8m2 = COPY %1(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 + +... +--- +name: anyext_nxv4i64_nxv4i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 4 x s16>) = COPY $v8 + %1:vrb(<vscale x 4 x s64>) = G_ANYEXT %0(<vscale x 4 x s16>) + $v8m4 = COPY %1(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 + +... +--- +name: anyext_nxv8i32_nxv8i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv8i32_nxv8i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: anyext_nxv8i32_nxv8i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 8 x s16>) = COPY $v8m2 + %1:vrb(<vscale x 8 x s32>) = G_ANYEXT %0(<vscale x 8 x s16>) + $v8m4 = COPY %1(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 + +... +--- +name: anyext_nxv8i64_nxv8i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 8 x s16>) = COPY $v8m2 + %1:vrb(<vscale x 8 x s64>) = G_ANYEXT %0(<vscale x 8 x s16>) + $v8m8 = COPY %1(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 + +... +--- +name: anyext_nxv16i32_nxv16i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv16i32_nxv16i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: anyext_nxv16i32_nxv16i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 16 x s16>) = COPY $v8m4 + %1:vrb(<vscale x 16 x s32>) = G_ANYEXT %0(<vscale x 16 x s16>) + $v8m8 = COPY %1(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 + +... +--- +name: anyext_nxv1i64_nxv1i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s32>) = COPY $v8 + %1:vrb(<vscale x 1 x s64>) = G_ANYEXT %0(<vscale x 1 x s32>) + $v8 = COPY %1(<vscale x 1 x s64>) + PseudoRET implicit $v8 + +... +--- +name: anyext_nxv2i64_nxv2i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 2 x s32>) = COPY $v8 + %1:vrb(<vscale x 2 x s64>) = G_ANYEXT %0(<vscale x 2 x s32>) + $v8m2 = COPY %1(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 + +... +--- +name: anyext_nxv4i64_nxv4i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 4 x s32>) = COPY $v8m2 + %1:vrb(<vscale x 4 x s64>) = G_ANYEXT %0(<vscale x 4 x s32>) + $v8m4 = COPY %1(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 + +... +--- +name: anyext_nxv8i64_nxv8i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 8 x s32>) = COPY $v8m4 + %1:vrb(<vscale x 8 x s64>) = G_ANYEXT %0(<vscale x 8 x s32>) + $v8m8 = COPY %1(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 + +... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sext.mir new file mode 100644 index 000000000000..382166fb2054 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sext.mir @@ -0,0 +1,900 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s +# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s + +--- +name: sext_nxv1i16_nxv1i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv1i16_nxv1i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_MF4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: sext_nxv1i16_nxv1i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_MF4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s8>) = COPY $v8 + %1:vrb(<vscale x 1 x s16>) = G_SEXT %0(<vscale x 1 x s8>) + $v8 = COPY %1(<vscale x 1 x s16>) + PseudoRET implicit $v8 + +... +--- +name: sext_nxv1i32_nxv1i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv1i32_nxv1i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF4_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: sext_nxv1i32_nxv1i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF4_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s8>) = COPY $v8 + %1:vrb(<vscale x 1 x s32>) = G_SEXT %0(<vscale x 1 x s8>) + $v8 = COPY %1(<vscale x 1 x s32>) + PseudoRET implicit $v8 + +... +--- +name: sext_nxv1i64_nxv1i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv1i64_nxv1i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF8_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: sext_nxv1i64_nxv1i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF8_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s8>) = COPY $v8 + %1:vrb(<vscale x 1 x s64>) = G_SEXT %0(<vscale x 1 x s8>) + $v8 = COPY %1(<vscale x 1 x s64>) + PseudoRET implicit $v8 + +... +--- +name: sext_nxv2i16_nxv2i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv2i16_nxv2i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: sext_nxv2i16_nxv2i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 2 x s8>) = COPY $v8 + %1:vrb(<vscale x 2 x s16>) = G_SEXT %0(<vscale x 2 x s8>) + $v8 = COPY %1(<vscale x 2 x s16>) + PseudoRET implicit $v8 + +... +--- +name: sext_nxv2i32_nxv2i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv2i32_nxv2i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF4_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: sext_nxv2i32_nxv2i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF4_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 2 x s8>) = COPY $v8 + %1:vrb(<vscale x 2 x s32>) = G_SEXT %0(<vscale x 2 x s8>) + $v8 = COPY %1(<vscale x 2 x s32>) + PseudoRET implicit $v8 + +... +--- +name: sext_nxv2i64_nxv2i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv2i64_nxv2i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF8_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: sext_nxv2i64_nxv2i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF8_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 2 x s8>) = COPY $v8 + %1:vrb(<vscale x 2 x s64>) = G_SEXT %0(<vscale x 2 x s8>) + $v8m2 = COPY %1(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 + +... +--- +name: sext_nxv4i16_nxv4i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv4i16_nxv4i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_M1 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: sext_nxv4i16_nxv4i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_M1 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 4 x s8>) = COPY $v8 + %1:vrb(<vscale x 4 x s16>) = G_SEXT %0(<vscale x 4 x s8>) + $v8 = COPY %1(<vscale x 4 x s16>) + PseudoRET implicit $v8 + +... +--- +name: sext_nxv4i32_nxv4i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv4i32_nxv4i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF4_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: sext_nxv4i32_nxv4i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF4_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 4 x s8>) = COPY $v8 + %1:vrb(<vscale x 4 x s32>) = G_SEXT %0(<vscale x 4 x s8>) + $v8m2 = COPY %1(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 + +... +--- +name: sext_nxv4i64_nxv4i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv4i64_nxv4i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF8_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: sext_nxv4i64_nxv4i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF8_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 4 x s8>) = COPY $v8 + %1:vrb(<vscale x 4 x s64>) = G_SEXT %0(<vscale x 4 x s8>) + $v8m4 = COPY %1(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 + +... +--- +name: sext_nxv8i16_nxv8i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv8i16_nxv8i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF2_M2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: sext_nxv8i16_nxv8i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF2_M2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 8 x s8>) = COPY $v8 + %1:vrb(<vscale x 8 x s16>) = G_SEXT %0(<vscale x 8 x s8>) + $v8m2 = COPY %1(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 + +... +--- +name: sext_nxv8i32_nxv8i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv8i32_nxv8i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF4_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: sext_nxv8i32_nxv8i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF4_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 8 x s8>) = COPY $v8 + %1:vrb(<vscale x 8 x s32>) = G_SEXT %0(<vscale x 8 x s8>) + $v8m4 = COPY %1(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 + +... +--- +name: sext_nxv8i64_nxv8i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv8i64_nxv8i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF8_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: sext_nxv8i64_nxv8i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF8_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 8 x s8>) = COPY $v8 + %1:vrb(<vscale x 8 x s64>) = G_SEXT %0(<vscale x 8 x s8>) + $v8m8 = COPY %1(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 + +... +--- +name: sext_nxv16i16_nxv16i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv16i16_nxv16i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF2_M4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: sext_nxv16i16_nxv16i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF2_M4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 16 x s8>) = COPY $v8m2 + %1:vrb(<vscale x 16 x s16>) = G_SEXT %0(<vscale x 16 x s8>) + $v8m4 = COPY %1(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 + +... +--- +name: sext_nxv16i32_nxv16i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv16i32_nxv16i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF4_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: sext_nxv16i32_nxv16i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF4_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 16 x s8>) = COPY $v8m2 + %1:vrb(<vscale x 16 x s32>) = G_SEXT %0(<vscale x 16 x s8>) + $v8m8 = COPY %1(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 + +... +--- +name: sext_nxv32i16_nxv32i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv32i16_nxv32i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF2_M8 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: sext_nxv32i16_nxv32i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF2_M8 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 32 x s8>) = COPY $v8m4 + %1:vrb(<vscale x 32 x s16>) = G_SEXT %0(<vscale x 32 x s8>) + $v8m8 = COPY %1(<vscale x 32 x s16>) + PseudoRET implicit $v8m8 + +... +--- +name: sext_nxv1i32_nxv1i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv1i32_nxv1i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: sext_nxv1i32_nxv1i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s16>) = COPY $v8 + %1:vrb(<vscale x 1 x s32>) = G_SEXT %0(<vscale x 1 x s16>) + $v8 = COPY %1(<vscale x 1 x s32>) + PseudoRET implicit $v8 + +... +--- +name: sext_nxv1i64_nxv1i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv1i64_nxv1i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF4_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: sext_nxv1i64_nxv1i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF4_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s16>) = COPY $v8 + %1:vrb(<vscale x 1 x s64>) = G_SEXT %0(<vscale x 1 x s16>) + $v8 = COPY %1(<vscale x 1 x s64>) + PseudoRET implicit $v8 + +... +--- +name: sext_nxv2i32_nxv2i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv2i32_nxv2i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: sext_nxv2i32_nxv2i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 2 x s16>) = COPY $v8 + %1:vrb(<vscale x 2 x s32>) = G_SEXT %0(<vscale x 2 x s16>) + $v8 = COPY %1(<vscale x 2 x s32>) + PseudoRET implicit $v8 + +... +--- +name: sext_nxv2i64_nxv2i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv2i64_nxv2i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF4_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: sext_nxv2i64_nxv2i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF4_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 2 x s16>) = COPY $v8 + %1:vrb(<vscale x 2 x s64>) = G_SEXT %0(<vscale x 2 x s16>) + $v8m2 = COPY %1(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 + +... +--- +name: sext_nxv4i32_nxv4i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv4i32_nxv4i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF2_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: sext_nxv4i32_nxv4i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF2_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 4 x s16>) = COPY $v8 + %1:vrb(<vscale x 4 x s32>) = G_SEXT %0(<vscale x 4 x s16>) + $v8m2 = COPY %1(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 + +... +--- +name: sext_nxv4i64_nxv4i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv4i64_nxv4i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF4_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: sext_nxv4i64_nxv4i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF4_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 4 x s16>) = COPY $v8 + %1:vrb(<vscale x 4 x s64>) = G_SEXT %0(<vscale x 4 x s16>) + $v8m4 = COPY %1(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 + +... +--- +name: sext_nxv8i32_nxv8i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv8i32_nxv8i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF2_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: sext_nxv8i32_nxv8i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF2_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 8 x s16>) = COPY $v8m2 + %1:vrb(<vscale x 8 x s32>) = G_SEXT %0(<vscale x 8 x s16>) + $v8m4 = COPY %1(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 + +... +--- +name: sext_nxv8i64_nxv8i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv8i64_nxv8i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF4_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: sext_nxv8i64_nxv8i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF4_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 8 x s16>) = COPY $v8m2 + %1:vrb(<vscale x 8 x s64>) = G_SEXT %0(<vscale x 8 x s16>) + $v8m8 = COPY %1(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 + +... +--- +name: sext_nxv16i32_nxv16i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv16i32_nxv16i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF2_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: sext_nxv16i32_nxv16i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF2_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 16 x s16>) = COPY $v8m4 + %1:vrb(<vscale x 16 x s32>) = G_SEXT %0(<vscale x 16 x s16>) + $v8m8 = COPY %1(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 + +... +--- +name: sext_nxv1i64_nxv1i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv1i64_nxv1i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: sext_nxv1i64_nxv1i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVSEXT_VF2_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s32>) = COPY $v8 + %1:vrb(<vscale x 1 x s64>) = G_SEXT %0(<vscale x 1 x s32>) + $v8 = COPY %1(<vscale x 1 x s64>) + PseudoRET implicit $v8 + +... +--- +name: sext_nxv2i64_nxv2i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv2i64_nxv2i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF2_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: sext_nxv2i64_nxv2i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVSEXT_VF2_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 2 x s32>) = COPY $v8 + %1:vrb(<vscale x 2 x s64>) = G_SEXT %0(<vscale x 2 x s32>) + $v8m2 = COPY %1(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 + +... +--- +name: sext_nxv4i64_nxv4i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv4i64_nxv4i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF2_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: sext_nxv4i64_nxv4i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVSEXT_VF2_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 4 x s32>) = COPY $v8m2 + %1:vrb(<vscale x 4 x s64>) = G_SEXT %0(<vscale x 4 x s32>) + $v8m4 = COPY %1(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 + +... +--- +name: sext_nxv8i64_nxv8i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: sext_nxv8i64_nxv8i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF2_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: sext_nxv8i64_nxv8i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVSEXT_VF2_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 8 x s32>) = COPY $v8m4 + %1:vrb(<vscale x 8 x s64>) = G_SEXT %0(<vscale x 8 x s32>) + $v8m8 = COPY %1(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 + +... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/zext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/zext.mir new file mode 100644 index 000000000000..2fc9e05602a8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/zext.mir @@ -0,0 +1,900 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s +# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s + +--- +name: zext_nxv1i16_nxv1i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv1i16_nxv1i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: zext_nxv1i16_nxv1i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s8>) = COPY $v8 + %1:vrb(<vscale x 1 x s16>) = G_ZEXT %0(<vscale x 1 x s8>) + $v8 = COPY %1(<vscale x 1 x s16>) + PseudoRET implicit $v8 + +... +--- +name: zext_nxv1i32_nxv1i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv1i32_nxv1i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: zext_nxv1i32_nxv1i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s8>) = COPY $v8 + %1:vrb(<vscale x 1 x s32>) = G_ZEXT %0(<vscale x 1 x s8>) + $v8 = COPY %1(<vscale x 1 x s32>) + PseudoRET implicit $v8 + +... +--- +name: zext_nxv1i64_nxv1i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv1i64_nxv1i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF8_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: zext_nxv1i64_nxv1i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF8_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s8>) = COPY $v8 + %1:vrb(<vscale x 1 x s64>) = G_ZEXT %0(<vscale x 1 x s8>) + $v8 = COPY %1(<vscale x 1 x s64>) + PseudoRET implicit $v8 + +... +--- +name: zext_nxv2i16_nxv2i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv2i16_nxv2i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: zext_nxv2i16_nxv2i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 2 x s8>) = COPY $v8 + %1:vrb(<vscale x 2 x s16>) = G_ZEXT %0(<vscale x 2 x s8>) + $v8 = COPY %1(<vscale x 2 x s16>) + PseudoRET implicit $v8 + +... +--- +name: zext_nxv2i32_nxv2i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv2i32_nxv2i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: zext_nxv2i32_nxv2i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 2 x s8>) = COPY $v8 + %1:vrb(<vscale x 2 x s32>) = G_ZEXT %0(<vscale x 2 x s8>) + $v8 = COPY %1(<vscale x 2 x s32>) + PseudoRET implicit $v8 + +... +--- +name: zext_nxv2i64_nxv2i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv2i64_nxv2i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF8_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: zext_nxv2i64_nxv2i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF8_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 2 x s8>) = COPY $v8 + %1:vrb(<vscale x 2 x s64>) = G_ZEXT %0(<vscale x 2 x s8>) + $v8m2 = COPY %1(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 + +... +--- +name: zext_nxv4i16_nxv4i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv4i16_nxv4i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: zext_nxv4i16_nxv4i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 4 x s8>) = COPY $v8 + %1:vrb(<vscale x 4 x s16>) = G_ZEXT %0(<vscale x 4 x s8>) + $v8 = COPY %1(<vscale x 4 x s16>) + PseudoRET implicit $v8 + +... +--- +name: zext_nxv4i32_nxv4i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv4i32_nxv4i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: zext_nxv4i32_nxv4i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 4 x s8>) = COPY $v8 + %1:vrb(<vscale x 4 x s32>) = G_ZEXT %0(<vscale x 4 x s8>) + $v8m2 = COPY %1(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 + +... +--- +name: zext_nxv4i64_nxv4i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv4i64_nxv4i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF8_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: zext_nxv4i64_nxv4i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF8_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 4 x s8>) = COPY $v8 + %1:vrb(<vscale x 4 x s64>) = G_ZEXT %0(<vscale x 4 x s8>) + $v8m4 = COPY %1(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 + +... +--- +name: zext_nxv8i16_nxv8i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv8i16_nxv8i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: zext_nxv8i16_nxv8i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 8 x s8>) = COPY $v8 + %1:vrb(<vscale x 8 x s16>) = G_ZEXT %0(<vscale x 8 x s8>) + $v8m2 = COPY %1(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 + +... +--- +name: zext_nxv8i32_nxv8i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv8i32_nxv8i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: zext_nxv8i32_nxv8i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 8 x s8>) = COPY $v8 + %1:vrb(<vscale x 8 x s32>) = G_ZEXT %0(<vscale x 8 x s8>) + $v8m4 = COPY %1(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 + +... +--- +name: zext_nxv8i64_nxv8i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv8i64_nxv8i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF8_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: zext_nxv8i64_nxv8i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF8_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 8 x s8>) = COPY $v8 + %1:vrb(<vscale x 8 x s64>) = G_ZEXT %0(<vscale x 8 x s8>) + $v8m8 = COPY %1(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 + +... +--- +name: zext_nxv16i16_nxv16i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv16i16_nxv16i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: zext_nxv16i16_nxv16i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 16 x s8>) = COPY $v8m2 + %1:vrb(<vscale x 16 x s16>) = G_ZEXT %0(<vscale x 16 x s8>) + $v8m4 = COPY %1(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 + +... +--- +name: zext_nxv16i32_nxv16i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv16i32_nxv16i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: zext_nxv16i32_nxv16i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 16 x s8>) = COPY $v8m2 + %1:vrb(<vscale x 16 x s32>) = G_ZEXT %0(<vscale x 16 x s8>) + $v8m8 = COPY %1(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 + +... +--- +name: zext_nxv32i16_nxv32i8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv32i16_nxv32i8 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: zext_nxv32i16_nxv32i8 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 4 /* e16 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 32 x s8>) = COPY $v8m4 + %1:vrb(<vscale x 32 x s16>) = G_ZEXT %0(<vscale x 32 x s8>) + $v8m8 = COPY %1(<vscale x 32 x s16>) + PseudoRET implicit $v8m8 + +... +--- +name: zext_nxv1i32_nxv1i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv1i32_nxv1i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: zext_nxv1i32_nxv1i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s16>) = COPY $v8 + %1:vrb(<vscale x 1 x s32>) = G_ZEXT %0(<vscale x 1 x s16>) + $v8 = COPY %1(<vscale x 1 x s32>) + PseudoRET implicit $v8 + +... +--- +name: zext_nxv1i64_nxv1i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv1i64_nxv1i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: zext_nxv1i64_nxv1i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s16>) = COPY $v8 + %1:vrb(<vscale x 1 x s64>) = G_ZEXT %0(<vscale x 1 x s16>) + $v8 = COPY %1(<vscale x 1 x s64>) + PseudoRET implicit $v8 + +... +--- +name: zext_nxv2i32_nxv2i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv2i32_nxv2i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: zext_nxv2i32_nxv2i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 2 x s16>) = COPY $v8 + %1:vrb(<vscale x 2 x s32>) = G_ZEXT %0(<vscale x 2 x s16>) + $v8 = COPY %1(<vscale x 2 x s32>) + PseudoRET implicit $v8 + +... +--- +name: zext_nxv2i64_nxv2i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv2i64_nxv2i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: zext_nxv2i64_nxv2i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 2 x s16>) = COPY $v8 + %1:vrb(<vscale x 2 x s64>) = G_ZEXT %0(<vscale x 2 x s16>) + $v8m2 = COPY %1(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 + +... +--- +name: zext_nxv4i32_nxv4i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv4i32_nxv4i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: zext_nxv4i32_nxv4i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 4 x s16>) = COPY $v8 + %1:vrb(<vscale x 4 x s32>) = G_ZEXT %0(<vscale x 4 x s16>) + $v8m2 = COPY %1(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 + +... +--- +name: zext_nxv4i64_nxv4i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv4i64_nxv4i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: zext_nxv4i64_nxv4i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 4 x s16>) = COPY $v8 + %1:vrb(<vscale x 4 x s64>) = G_ZEXT %0(<vscale x 4 x s16>) + $v8m4 = COPY %1(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 + +... +--- +name: zext_nxv8i32_nxv8i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv8i32_nxv8i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: zext_nxv8i32_nxv8i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 8 x s16>) = COPY $v8m2 + %1:vrb(<vscale x 8 x s32>) = G_ZEXT %0(<vscale x 8 x s16>) + $v8m4 = COPY %1(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 + +... +--- +name: zext_nxv8i64_nxv8i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv8i64_nxv8i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: zext_nxv8i64_nxv8i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 8 x s16>) = COPY $v8m4 + %1:vrb(<vscale x 8 x s64>) = G_ZEXT %0(<vscale x 8 x s16>) + $v8m8 = COPY %1(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 + +... +--- +name: zext_nxv16i32_nxv16i16 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv16i32_nxv16i16 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: zext_nxv16i32_nxv16i16 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 16 x s16>) = COPY $v8m4 + %1:vrb(<vscale x 16 x s32>) = G_ZEXT %0(<vscale x 16 x s16>) + $v8m8 = COPY %1(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 + +... +--- +name: zext_nxv1i64_nxv1i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv1i64_nxv1i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8 + ; + ; RV64I-LABEL: name: zext_nxv1i64_nxv1i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8 + %0:vrb(<vscale x 1 x s32>) = COPY $v8 + %1:vrb(<vscale x 1 x s64>) = G_ZEXT %0(<vscale x 1 x s32>) + $v8 = COPY %1(<vscale x 1 x s64>) + PseudoRET implicit $v8 + +... +--- +name: zext_nxv2i64_nxv2i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv2i64_nxv2i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m2 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64I-LABEL: name: zext_nxv2i64_nxv2i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m2 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m2 + %0:vrb(<vscale x 2 x s32>) = COPY $v8 + %1:vrb(<vscale x 2 x s64>) = G_ZEXT %0(<vscale x 2 x s32>) + $v8m2 = COPY %1(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 + +... +--- +name: zext_nxv4i64_nxv4i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv4i64_nxv4i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m4 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64I-LABEL: name: zext_nxv4i64_nxv4i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m4 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m4 + %0:vrb(<vscale x 4 x s32>) = COPY $v8m2 + %1:vrb(<vscale x 4 x s64>) = G_ZEXT %0(<vscale x 4 x s32>) + $v8m4 = COPY %1(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 + +... +--- +name: zext_nxv8i64_nxv8i32 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + + ; RV32I-LABEL: name: zext_nxv8i64_nxv8i32 + ; RV32I: liveins: $v8 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV32I-NEXT: $v8m8 = COPY %1 + ; RV32I-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64I-LABEL: name: zext_nxv8i64_nxv8i32 + ; RV64I: liveins: $v8 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4 + ; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF + ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 3 /* ta, ma */ + ; RV64I-NEXT: $v8m8 = COPY %1 + ; RV64I-NEXT: PseudoRET implicit $v8m8 + %0:vrb(<vscale x 8 x s32>) = COPY $v8m4 + %1:vrb(<vscale x 8 x s64>) = G_ZEXT %0(<vscale x 8 x s32>) + $v8m8 = COPY %1(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 + +... |