summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2024-04-04 08:51:53 -0400
committerMatt Arsenault <arsenm2@gmail.com>2024-04-08 08:32:04 -0400
commit8cb642bf18bfd3e6e8576f4f090fa584f68bb0cc (patch)
tree9a05b4ab0b4f6d5b538e4a76d1bcac20cfdf1ff7
parent38f996bb2bc4f922c7b441d730ab3a3ad2fa1506 (diff)
GlobalISel: Regenerate test checks
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call-sret.ll92
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/constant-dbg-loc.ll41
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-store-metadata.ll44
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll20
4 files changed, 115 insertions, 82 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call-sret.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call-sret.ll
index ecd7c3ca71be..0ff6ae28279f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call-sret.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call-sret.ll
@@ -8,10 +8,11 @@ declare void @test_explicit_sret(ptr sret(i64))
define void @can_tail_call_forwarded_explicit_sret_ptr(ptr sret(i64) %arg) {
; CHECK-LABEL: name: can_tail_call_forwarded_explicit_sret_ptr
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x8
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x8
- ; CHECK: $x8 = COPY [[COPY]](p0)
- ; CHECK: TCRETURNdi @test_explicit_sret, 0, csr_darwin_aarch64_aapcs, implicit $sp, implicit $x8
+ ; CHECK-NEXT: liveins: $x8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x8
+ ; CHECK-NEXT: $x8 = COPY [[COPY]](p0)
+ ; CHECK-NEXT: TCRETURNdi @test_explicit_sret, 0, csr_darwin_aarch64_aapcs, implicit $sp, implicit $x8
tail call void @test_explicit_sret(ptr %arg)
ret void
}
@@ -20,13 +21,14 @@ define void @can_tail_call_forwarded_explicit_sret_ptr(ptr sret(i64) %arg) {
define void @test_call_explicit_sret(ptr sret(i64) %arg) {
; CHECK-LABEL: name: test_call_explicit_sret
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x8
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x8
- ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
- ; CHECK: $x8 = COPY [[COPY]](p0)
- ; CHECK: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
- ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: liveins: $x8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x8
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $x8 = COPY [[COPY]](p0)
+ ; CHECK-NEXT: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: RET_ReallyLR
call void @test_explicit_sret(ptr %arg)
ret void
}
@@ -34,12 +36,12 @@ define void @test_call_explicit_sret(ptr sret(i64) %arg) {
define void @dont_tail_call_explicit_sret_alloca_unused() {
; CHECK-LABEL: name: dont_tail_call_explicit_sret_alloca_unused
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.l
- ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
- ; CHECK: $x8 = COPY [[FRAME_INDEX]](p0)
- ; CHECK: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
- ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.l
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $x8 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: RET_ReallyLR
%l = alloca i64, align 8
tail call void @test_explicit_sret(ptr %l)
ret void
@@ -48,16 +50,17 @@ define void @dont_tail_call_explicit_sret_alloca_unused() {
define void @dont_tail_call_explicit_sret_alloca_dummyusers(ptr %ptr) {
; CHECK-LABEL: name: dont_tail_call_explicit_sret_alloca_dummyusers
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.l
- ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.ptr)
- ; CHECK: G_STORE [[LOAD]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %ir.l)
- ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
- ; CHECK: $x8 = COPY [[FRAME_INDEX]](p0)
- ; CHECK: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
- ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.l
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.ptr)
+ ; CHECK-NEXT: G_STORE [[LOAD]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %ir.l)
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $x8 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: RET_ReallyLR
%l = alloca i64, align 8
%r = load i64, ptr %ptr, align 8
store i64 %r, ptr %l, align 8
@@ -68,15 +71,16 @@ define void @dont_tail_call_explicit_sret_alloca_dummyusers(ptr %ptr) {
define void @dont_tail_call_tailcall_explicit_sret_gep(ptr %ptr) {
; CHECK-LABEL: name: dont_tail_call_tailcall_explicit_sret_gep
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
- ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
- ; CHECK: $x8 = COPY [[PTR_ADD]](p0)
- ; CHECK: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
- ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $x8 = COPY [[PTR_ADD]](p0)
+ ; CHECK-NEXT: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: RET_ReallyLR
%ptr2 = getelementptr i64, ptr %ptr, i32 1
tail call void @test_explicit_sret(ptr %ptr2)
ret void
@@ -85,14 +89,14 @@ define void @dont_tail_call_tailcall_explicit_sret_gep(ptr %ptr) {
define i64 @dont_tail_call_sret_alloca_returned() {
; CHECK-LABEL: name: dont_tail_call_sret_alloca_returned
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.l
- ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
- ; CHECK: $x8 = COPY [[FRAME_INDEX]](p0)
- ; CHECK: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
- ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
- ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.l)
- ; CHECK: $x0 = COPY [[LOAD]](s64)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.l
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $x8 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.l)
+ ; CHECK-NEXT: $x0 = COPY [[LOAD]](s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%l = alloca i64, align 8
tail call void @test_explicit_sret(ptr %l)
%r = load i64, ptr %l, align 8
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/constant-dbg-loc.ll b/llvm/test/CodeGen/AArch64/GlobalISel/constant-dbg-loc.ll
index 75865695ea20..5e667eba741a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/constant-dbg-loc.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/constant-dbg-loc.ll
@@ -10,24 +10,29 @@ target triple = "arm64-apple-ios5.0.0"
define i32 @main() #0 !dbg !14 {
; CHECK-LABEL: name: main
; CHECK: bb.1.entry:
- ; CHECK: successors: %bb.2(0x40000000), %bb.3(0x40000000)
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var1
- ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CHECK: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var2
- ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
- ; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %ir.retval)
- ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p0), debug-location !17 :: (dereferenceable load (s32) from @var1)
- ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LOAD]](s32), [[C1]], debug-location !19
- ; CHECK: G_BRCOND [[ICMP]](s1), %bb.2, debug-location !20
- ; CHECK: G_BR %bb.3, debug-location !20
- ; CHECK: bb.2.if.then:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: G_STORE [[C2]](s32), [[GV1]](p0), debug-location !21 :: (store (s32) into @var2)
- ; CHECK: bb.3.if.end:
- ; CHECK: $w0 = COPY [[C]](s32), debug-location !24
- ; CHECK: RET_ReallyLR implicit $w0, debug-location !24
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; CHECK-NEXT: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var2
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
+ ; CHECK-NEXT: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %ir.retval)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p0), debug-location !17 :: (dereferenceable load (s32) from @var1)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LOAD]](s32), [[C1]], debug-location !19
+ ; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.2, debug-location !20
+ ; CHECK-NEXT: G_BR %bb.3, debug-location !20
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.if.then:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: G_STORE [[C2]](s32), [[GV1]](p0), debug-location !21 :: (store (s32) into @var2)
+ ; CHECK-NEXT: G_BR %bb.3, debug-location !23
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.if.end:
+ ; CHECK-NEXT: $w0 = COPY [[C]](s32), debug-location !24
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0, debug-location !24
entry:
%retval = alloca i32, align 4
store i32 0, ptr %retval, align 4
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-store-metadata.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-store-metadata.ll
index f9f92b9e2190..baed1263008d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-store-metadata.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-store-metadata.ll
@@ -4,11 +4,12 @@
define void @store_nontemporal(ptr dereferenceable(4) %ptr) {
; CHECK-LABEL: name: store_nontemporal
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: (non-temporal store (s32) into %ir.ptr)
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: G_STORE [[C]](s32), [[COPY]](p0) :: (non-temporal store (s32) into %ir.ptr)
+ ; CHECK-NEXT: RET_ReallyLR
store i32 0, ptr %ptr, align 4, !nontemporal !0
ret void
}
@@ -16,11 +17,12 @@ define void @store_nontemporal(ptr dereferenceable(4) %ptr) {
define void @store_dereferenceable(ptr dereferenceable(4) %ptr) {
; CHECK-LABEL: name: store_dereferenceable
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: (store (s32) into %ir.ptr)
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: G_STORE [[C]](s32), [[COPY]](p0) :: (store (s32) into %ir.ptr)
+ ; CHECK-NEXT: RET_ReallyLR
store i32 0, ptr %ptr, align 4
ret void
}
@@ -28,11 +30,12 @@ define void @store_dereferenceable(ptr dereferenceable(4) %ptr) {
define void @store_volatile_dereferenceable(ptr dereferenceable(4) %ptr) {
; CHECK-LABEL: name: store_volatile_dereferenceable
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: (volatile store (s32) into %ir.ptr)
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: G_STORE [[C]](s32), [[COPY]](p0) :: (volatile store (s32) into %ir.ptr)
+ ; CHECK-NEXT: RET_ReallyLR
store volatile i32 0, ptr %ptr, align 4
ret void
}
@@ -40,11 +43,12 @@ define void @store_volatile_dereferenceable(ptr dereferenceable(4) %ptr) {
define void @store_falkor_strided_access(ptr %ptr) {
; CHECK-LABEL: name: store_falkor_strided_access
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: ("aarch64-strided-access" store (s32) into %ir.ptr)
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: G_STORE [[C]](s32), [[COPY]](p0) :: ("aarch64-strided-access" store (s32) into %ir.ptr)
+ ; CHECK-NEXT: RET_ReallyLR
store i32 0, ptr %ptr, align 4, !falkor.strided.access !0
ret void
}
diff --git a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
index d1a7339db9af..55e73dc5d29e 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
@@ -41,6 +41,7 @@ define i8 @test_i8_args_8(i8 %arg1, i8 %arg2, i8 %arg3, i8 %arg4, i8 %arg5, i8 %
; X86-NEXT: G_STORE [[TRUNC7]](s8), [[GV2]](p0) :: (store (s8) into @a8_8bit)
; X86-NEXT: $al = COPY [[TRUNC]](s8)
; X86-NEXT: RET 0, implicit $al
+ ;
; X64-LABEL: name: test_i8_args_8
; X64: bb.1.entry:
; X64-NEXT: liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d
@@ -109,6 +110,7 @@ define i32 @test_i32_args_8(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg
; X86-NEXT: G_STORE [[LOAD7]](s32), [[GV2]](p0) :: (store (s32) into @a8_32bit)
; X86-NEXT: $eax = COPY [[LOAD]](s32)
; X86-NEXT: RET 0, implicit $eax
+ ;
; X64-LABEL: name: test_i32_args_8
; X64: bb.1.entry:
; X64-NEXT: liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d
@@ -196,6 +198,7 @@ define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg
; X86-NEXT: $eax = COPY [[UV]](s32)
; X86-NEXT: $edx = COPY [[UV1]](s32)
; X86-NEXT: RET 0, implicit $eax, implicit $edx
+ ;
; X64-LABEL: name: test_i64_args_8
; X64: bb.1.entry:
; X64-NEXT: liveins: $rcx, $rdi, $rdx, $rsi, $r8, $r9
@@ -234,6 +237,7 @@ define float @test_float_args(float %arg1, float %arg2) {
; X86-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s32) from %fixed-stack.0)
; X86-NEXT: $fp0 = COPY [[LOAD1]](s32)
; X86-NEXT: RET 0, implicit $fp0
+ ;
; X64-LABEL: name: test_float_args
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $xmm0, $xmm1
@@ -254,6 +258,7 @@ define double @test_double_args(double %arg1, double %arg2) {
; X86-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s64) from %fixed-stack.0)
; X86-NEXT: $fp0 = COPY [[LOAD1]](s64)
; X86-NEXT: RET 0, implicit $fp0
+ ;
; X64-LABEL: name: test_double_args
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $xmm0, $xmm1
@@ -274,6 +279,7 @@ define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) {
; X86-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; X86-NEXT: $xmm0 = COPY [[COPY1]](<4 x s32>)
; X86-NEXT: RET 0, implicit $xmm0
+ ;
; X64-LABEL: name: test_v4i32_args
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $xmm0, $xmm1
@@ -297,6 +303,7 @@ define <8 x i32> @test_v8i32_args(<8 x i32> %arg1) {
; X86-NEXT: $xmm0 = COPY [[UV]](<4 x s32>)
; X86-NEXT: $xmm1 = COPY [[UV1]](<4 x s32>)
; X86-NEXT: RET 0, implicit $xmm0, implicit $xmm1
+ ;
; X64-LABEL: name: test_v8i32_args
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $xmm0, $xmm1
@@ -315,6 +322,7 @@ define void @test_void_return() {
; X86-LABEL: name: test_void_return
; X86: bb.1.entry:
; X86-NEXT: RET 0
+ ;
; X64-LABEL: name: test_void_return
; X64: bb.1.entry:
; X64-NEXT: RET 0
@@ -329,6 +337,7 @@ define ptr @test_memop_i32(ptr %p1) {
; X86-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.0, align 16)
; X86-NEXT: $eax = COPY [[LOAD]](p0)
; X86-NEXT: RET 0, implicit $eax
+ ;
; X64-LABEL: name: test_memop_i32
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $rdi
@@ -347,6 +356,7 @@ define void @test_trivial_call() {
; X86-NEXT: CALLpcrel32 @trivial_callee, csr_32, implicit $esp, implicit $ssp
; X86-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: RET 0
+ ;
; X64-LABEL: name: test_trivial_call
; X64: bb.1 (%ir-block.0):
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
@@ -377,6 +387,7 @@ define void @test_simple_arg(i32 %in0, i32 %in1) {
; X86-NEXT: CALLpcrel32 @simple_arg_callee, csr_32, implicit $esp, implicit $ssp
; X86-NEXT: ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: RET 0
+ ;
; X64-LABEL: name: test_simple_arg
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $edi, $esi
@@ -435,6 +446,7 @@ define void @test_simple_arg8_call(i32 %in0) {
; X86-NEXT: CALLpcrel32 @simple_arg8_callee, csr_32, implicit $esp, implicit $ssp
; X86-NEXT: ADJCALLSTACKUP32 32, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: RET 0
+ ;
; X64-LABEL: name: test_simple_arg8_call
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $edi
@@ -478,6 +490,7 @@ define i32 @test_simple_return_callee() {
; X86-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY1]]
; X86-NEXT: $eax = COPY [[ADD]](s32)
; X86-NEXT: RET 0, implicit $eax
+ ;
; X64-LABEL: name: test_simple_return_callee
; X64: bb.1 (%ir-block.0):
; X64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
@@ -521,6 +534,7 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
; X86-NEXT: $xmm0 = COPY [[UV2]](<4 x s32>)
; X86-NEXT: $xmm1 = COPY [[UV3]](<4 x s32>)
; X86-NEXT: RET 0, implicit $xmm0, implicit $xmm1
+ ;
; X64-LABEL: name: test_split_return_callee
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $xmm0, $xmm1, $xmm2, $xmm3
@@ -559,6 +573,7 @@ define void @test_indirect_call(ptr %func) {
; X86-NEXT: CALL32r [[LOAD]](p0), csr_32, implicit $esp, implicit $ssp
; X86-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: RET 0
+ ;
; X64-LABEL: name: test_indirect_call
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $rdi
@@ -603,6 +618,7 @@ define void @test_abi_exts_call(ptr %addr) {
; X86-NEXT: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
; X86-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: RET 0
+ ;
; X64-LABEL: name: test_abi_exts_call
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $rdi
@@ -654,6 +670,7 @@ define void @test_variadic_call_1(ptr %addr_ptr, ptr %val_ptr) {
; X86-NEXT: CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
; X86-NEXT: ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: RET 0
+ ;
; X64-LABEL: name: test_variadic_call_1
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $rdi, $rsi
@@ -696,6 +713,7 @@ define void @test_variadic_call_2(ptr %addr_ptr, ptr %val_ptr) {
; X86-NEXT: CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
; X86-NEXT: ADJCALLSTACKUP32 12, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: RET 0
+ ;
; X64-LABEL: name: test_variadic_call_2
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $rdi, $rsi
@@ -728,6 +746,7 @@ define <32 x float> @test_return_v32f32() {
; X86-NEXT: G_STORE [[BUILD_VECTOR]](<32 x s32>), [[LOAD]](p0) :: (store (<32 x s32>))
; X86-NEXT: $eax = COPY [[LOAD]](p0)
; X86-NEXT: RET 0
+ ;
; X64-LABEL: name: test_return_v32f32
; X64: bb.1 (%ir-block.0):
; X64-NEXT: liveins: $rdi
@@ -757,6 +776,7 @@ define float @test_call_v32f32() {
; X86-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[LOAD]](<32 x s32>), [[C]](s32)
; X86-NEXT: $fp0 = COPY [[EVEC]](s32)
; X86-NEXT: RET 0, implicit $fp0
+ ;
; X64-LABEL: name: test_call_v32f32
; X64: bb.1 (%ir-block.0):
; X64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 7