summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Kramer <benny.kra@googlemail.com>2012-11-19 17:32:14 +0000
committerBenjamin Kramer <benny.kra@googlemail.com>2012-11-19 17:32:14 +0000
commite8552efa949bad0fdf2f3221d37083c5056a0abf (patch)
treefa59cd888083c299fbae2ec5299a03ead0a9f166
parent7420b76579ea6658bd0019804dcd77a0c4fa0a08 (diff)
Merge r168260 from trunk:
Enable inlining of 4 byte atomic ops on ppc32, 8 byte atomic ops on ppc64. Also fixes a bit/byte mismatch when checking if a target supports atomic ops of a certain size. git-svn-id: https://llvm.org/svn/llvm-project/cfe/branches/release_32@168313 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Basic/Targets.cpp7
-rw-r--r--lib/CodeGen/CGExpr.cpp9
-rw-r--r--test/CodeGen/ppc-atomics.c35
3 files changed, 45 insertions, 6 deletions
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 26a4f41442..ab02e6f3b5 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -1037,6 +1037,9 @@ public:
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble;
}
+
+ // PPC32 supports atomics up to 4 bytes.
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
}
virtual BuiltinVaListKind getBuiltinVaListKind() const {
@@ -1065,7 +1068,9 @@ public:
DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
"v128:128:128-n32:64";
-
+
+ // PPC64 supports atomics up to 8 bytes.
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
}
virtual BuiltinVaListKind getBuiltinVaListKind() const {
return TargetInfo::CharPtrBuiltinVaList;
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index d1a2889f9a..63cc5b515d 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -3164,11 +3164,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
uint64_t Size = sizeChars.getQuantity();
CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
unsigned Align = alignChars.getQuantity();
- unsigned MaxInlineWidth =
- getContext().getTargetInfo().getMaxAtomicInlineWidth();
- bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
-
-
+ unsigned MaxInlineWidthInBits =
+ getContext().getTargetInfo().getMaxAtomicInlineWidth();
+ bool UseLibcall = (Size != Align ||
+ getContext().toBits(sizeChars) > MaxInlineWidthInBits);
llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
Ptr = EmitScalarExpr(E->getPtr());
diff --git a/test/CodeGen/ppc-atomics.c b/test/CodeGen/ppc-atomics.c
new file mode 100644
index 0000000000..3fcb0fbec9
--- /dev/null
+++ b/test/CodeGen/ppc-atomics.c
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 -triple powerpc-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=32
+// RUN: %clang_cc1 -triple powerpc64-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=64
+
+unsigned char c1, c2;
+unsigned short s1, s2;
+unsigned int i1, i2;
+unsigned long long ll1, ll2;
+
+enum memory_order {
+ memory_order_relaxed,
+ memory_order_consume,
+ memory_order_acquire,
+ memory_order_release,
+ memory_order_acq_rel,
+ memory_order_seq_cst
+};
+
+void test1(void) {
+ (void)__atomic_load(&c1, &c2, memory_order_seq_cst);
+ (void)__atomic_load(&s1, &s2, memory_order_seq_cst);
+ (void)__atomic_load(&i1, &i2, memory_order_seq_cst);
+ (void)__atomic_load(&ll1, &ll2, memory_order_seq_cst);
+
+// 32: define void @test1
+// 32: load atomic i8* @c1 seq_cst
+// 32: load atomic i16* @s1 seq_cst
+// 32: load atomic i32* @i1 seq_cst
+// 32: call void @__atomic_load(i32 8, i8* bitcast (i64* @ll1 to i8*)
+
+// 64: define void @test1
+// 64: load atomic i8* @c1 seq_cst
+// 64: load atomic i16* @s1 seq_cst
+// 64: load atomic i32* @i1 seq_cst
+// 64: load atomic i64* @ll1 seq_cst
+}