summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Smith <richard-llvm@metafoo.co.uk>2012-04-12 05:08:17 +0000
committerRichard Smith <richard-llvm@metafoo.co.uk>2012-04-12 05:08:17 +0000
commitff34d401ff385ef7173ca612432b4ea717fff690 (patch)
treede759a7c65405730906e7d4ffd5f25cbbd5bcf69
parentb92bd4b3271b7892abe9fd8c74fb54a27ad702ab (diff)
Implement support for 18 of the GNU-compatible __atomic builtins.
This is not quite sufficient for libstdc++'s <atomic>: we still need __atomic_test_and_set and __atomic_clear, and may need a more complete __atomic_is_lock_free implementation. We are also missing an implementation of __atomic_always_lock_free, __atomic_nand_fetch, and __atomic_fetch_nand, but those aren't needed for libstdc++. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@154579 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/clang/AST/Expr.h37
-rw-r--r--include/clang/Basic/Builtins.def66
-rw-r--r--include/clang/Basic/DiagnosticSemaKinds.td41
-rw-r--r--lib/AST/Expr.cpp46
-rw-r--r--lib/AST/StmtPrinter.cpp54
-rw-r--r--lib/CodeGen/CGExpr.cpp225
-rw-r--r--lib/Sema/SemaChecking.cpp310
-rw-r--r--test/CodeGen/atomic-ops.c143
-rw-r--r--test/CodeGen/atomic_init.c14
-rw-r--r--test/Misc/serialized-diags.c3
-rw-r--r--test/Sema/atomic-ops.c74
11 files changed, 732 insertions, 281 deletions
diff --git a/include/clang/AST/Expr.h b/include/clang/AST/Expr.h
index 0db9195a27..558bd00ba9 100644
--- a/include/clang/AST/Expr.h
+++ b/include/clang/AST/Expr.h
@@ -4470,14 +4470,21 @@ public:
/// AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*,
/// __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the
-/// similarly-named C++11 instructions. All of these instructions take one
-/// primary pointer and at least one memory order.
+/// similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>.
+/// All of these instructions take one primary pointer and at least one memory
+/// order.
class AtomicExpr : public Expr {
public:
- enum AtomicOp { Load, Store, CmpXchgStrong, CmpXchgWeak, Xchg,
- Add, Sub, And, Or, Xor, Init };
+ enum AtomicOp {
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) AO ## ID,
+#include "clang/Basic/Builtins.def"
+ // Avoid trailing comma
+ BI_First = 0
+ };
+
private:
- enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, END_EXPR };
+ enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, WEAK, END_EXPR };
Stmt* SubExprs[END_EXPR];
unsigned NumSubExprs;
SourceLocation BuiltinLoc, RParenLoc;
@@ -4503,19 +4510,25 @@ public:
return cast<Expr>(SubExprs[ORDER]);
}
Expr *getVal1() const {
- if (Op == Init)
+ if (Op == AO__c11_atomic_init)
return cast<Expr>(SubExprs[ORDER]);
- assert(NumSubExprs >= 3);
+ assert(NumSubExprs > VAL1);
return cast<Expr>(SubExprs[VAL1]);
}
Expr *getOrderFail() const {
- assert(NumSubExprs == 5);
+ assert(NumSubExprs > ORDER_FAIL);
return cast<Expr>(SubExprs[ORDER_FAIL]);
}
Expr *getVal2() const {
- assert(NumSubExprs == 5);
+ if (Op == AO__atomic_exchange)
+ return cast<Expr>(SubExprs[ORDER_FAIL]);
+ assert(NumSubExprs > VAL2);
return cast<Expr>(SubExprs[VAL2]);
}
+ Expr *getWeak() const {
+ assert(NumSubExprs > WEAK);
+ return cast<Expr>(SubExprs[WEAK]);
+ }
AtomicOp getOp() const { return Op; }
unsigned getNumSubExprs() { return NumSubExprs; }
@@ -4527,8 +4540,10 @@ public:
}
bool isCmpXChg() const {
- return getOp() == AtomicExpr::CmpXchgStrong ||
- getOp() == AtomicExpr::CmpXchgWeak;
+ return getOp() == AO__c11_atomic_compare_exchange_strong ||
+ getOp() == AO__c11_atomic_compare_exchange_weak ||
+ getOp() == AO__atomic_compare_exchange ||
+ getOp() == AO__atomic_compare_exchange_n;
}
SourceLocation getBuiltinLoc() const { return BuiltinLoc; }
diff --git a/include/clang/Basic/Builtins.def b/include/clang/Basic/Builtins.def
index 82f0463d8c..2823190ca4 100644
--- a/include/clang/Basic/Builtins.def
+++ b/include/clang/Basic/Builtins.def
@@ -594,37 +594,55 @@ BUILTIN(__sync_swap_4, "iiD*i.", "tn")
BUILTIN(__sync_swap_8, "LLiLLiD*LLi.", "tn")
BUILTIN(__sync_swap_16, "LLLiLLLiD*LLLi.", "tn")
+// Some of our atomics builtins are handled by AtomicExpr rather than
+// as normal builtin CallExprs. This macro is used for such builtins.
+#ifndef ATOMIC_BUILTIN
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
// C11 _Atomic operations for <stdatomic.h>.
-BUILTIN(__c11_atomic_load, "v.", "t")
-BUILTIN(__c11_atomic_store, "v.", "t")
-BUILTIN(__c11_atomic_exchange, "v.", "t")
-BUILTIN(__c11_atomic_compare_exchange_strong, "v.", "t")
-BUILTIN(__c11_atomic_compare_exchange_weak, "v.", "t")
-BUILTIN(__c11_atomic_fetch_add, "v.", "t")
-BUILTIN(__c11_atomic_fetch_sub, "v.", "t")
-BUILTIN(__c11_atomic_fetch_and, "v.", "t")
-BUILTIN(__c11_atomic_fetch_or, "v.", "t")
-BUILTIN(__c11_atomic_fetch_xor, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_init, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_load, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_store, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_exchange, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_compare_exchange_strong, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_compare_exchange_weak, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_add, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_sub, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_and, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_or, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_xor, "v.", "t")
BUILTIN(__c11_atomic_thread_fence, "vi", "n")
BUILTIN(__c11_atomic_signal_fence, "vi", "n")
-BUILTIN(__c11_atomic_init, "v.", "t")
BUILTIN(__c11_atomic_is_lock_free, "iz", "n")
-// FIXME: Convert these to implementing GNU atomic builtins.
-BUILTIN(__atomic_load, "v.", "t")
-BUILTIN(__atomic_store, "v.", "t")
-BUILTIN(__atomic_exchange, "v.", "t")
-BUILTIN(__atomic_compare_exchange_strong, "v.", "t")
-BUILTIN(__atomic_compare_exchange_weak, "v.", "t")
-BUILTIN(__atomic_fetch_add, "v.", "t")
-BUILTIN(__atomic_fetch_sub, "v.", "t")
-BUILTIN(__atomic_fetch_and, "v.", "t")
-BUILTIN(__atomic_fetch_or, "v.", "t")
-BUILTIN(__atomic_fetch_xor, "v.", "t")
+// GNU atomic builtins.
+ATOMIC_BUILTIN(__atomic_load, "v.", "t")
+ATOMIC_BUILTIN(__atomic_load_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_store, "v.", "t")
+ATOMIC_BUILTIN(__atomic_store_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_exchange, "v.", "t")
+ATOMIC_BUILTIN(__atomic_exchange_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_compare_exchange, "v.", "t")
+ATOMIC_BUILTIN(__atomic_compare_exchange_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_add, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_sub, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_and, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_or, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_xor, "v.", "t")
+ATOMIC_BUILTIN(__atomic_add_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_sub_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_and_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_or_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_xor_fetch, "v.", "t")
+BUILTIN(__atomic_test_and_set, "vv*i", "n")
+BUILTIN(__atomic_clear, "vb*i", "n")
BUILTIN(__atomic_thread_fence, "vi", "n")
BUILTIN(__atomic_signal_fence, "vi", "n")
-BUILTIN(__atomic_init, "v.", "t")
-BUILTIN(__atomic_is_lock_free, "iz", "n")
+BUILTIN(__atomic_always_lock_free, "izv*", "n")
+BUILTIN(__atomic_is_lock_free, "izv*", "n")
+
+#undef ATOMIC_BUILTIN
// Non-overloaded atomic builtins.
BUILTIN(__sync_synchronize, "v.", "n")
diff --git a/include/clang/Basic/DiagnosticSemaKinds.td b/include/clang/Basic/DiagnosticSemaKinds.td
index 39bd5ac94e..86d139da0c 100644
--- a/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/include/clang/Basic/DiagnosticSemaKinds.td
@@ -4367,22 +4367,22 @@ def ext_typecheck_convert_pointer_int : ExtWarn<
"%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
" %0 "
"%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1; "
- "%select{|dereference with *|"
- "take the address with &|"
- "remove *|"
- "remove &}3">,
+ "with an expression of type|to parameter of type|to type}2 %1"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">,
InGroup<IntConversion>;
def ext_typecheck_convert_int_pointer : ExtWarn<
"incompatible integer to pointer conversion "
"%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
" %0 "
"%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1; "
- "%select{|dereference with *|"
- "take the address with &|"
- "remove *|"
- "remove &}3">,
+ "with an expression of type|to parameter of type|to type}2 %1"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">,
InGroup<IntConversion>;
def ext_typecheck_convert_pointer_void_func : Extension<
"%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
@@ -4403,10 +4403,10 @@ def ext_typecheck_convert_incompatible_pointer : ExtWarn<
" %0 "
"%select{from|to parameter of type|from a function with result type|to type|"
"with an expression of type|to parameter of type|to type}2 %1"
- "%select{|dereference with *|"
- "take the address with &|"
- "remove *|"
- "remove &}3">,
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">,
InGroup<IncompatiblePointerTypes>;
def ext_typecheck_convert_discards_qualifiers : ExtWarn<
"%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
@@ -4522,12 +4522,15 @@ def err_atomic_builtin_pointer_size : Error<
def err_atomic_op_needs_atomic : Error<
"first argument to atomic operation must be a pointer to _Atomic "
"type (%0 invalid)">;
+def err_atomic_op_needs_trivial_copy : Error<
+ "first argument to atomic operation must be a pointer to a trivially-copyable"
+ " type (%0 invalid)">;
def err_atomic_op_needs_atomic_int_or_ptr : Error<
- "first argument to atomic operation must be a pointer to atomic "
- "integer or pointer (%0 invalid)">;
-def err_atomic_op_logical_needs_atomic_int : Error<
- "first argument to logical atomic operation must be a pointer to atomic "
- "integer (%0 invalid)">;
+ "first argument to atomic operation must be a pointer to %select{|atomic }0"
+ "integer or pointer (%1 invalid)">;
+def err_atomic_op_bitwise_needs_atomic_int : Error<
+ "first argument to bitwise atomic operation must be a pointer to "
+ "%select{|atomic }0integer (%1 invalid)">;
def err_deleted_function_use : Error<"attempt to use a deleted function">;
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 2bb79a0a4a..eb185b2c5a 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -3858,20 +3858,44 @@ AtomicExpr::AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr,
unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
switch (Op) {
- case Init:
- case Load:
+ case AO__c11_atomic_init:
+ case AO__c11_atomic_load:
+ case AO__atomic_load_n:
return 2;
- case Store:
- case Xchg:
- case Add:
- case Sub:
- case And:
- case Or:
- case Xor:
+
+ case AO__c11_atomic_store:
+ case AO__c11_atomic_exchange:
+ case AO__atomic_load:
+ case AO__atomic_store:
+ case AO__atomic_store_n:
+ case AO__atomic_exchange_n:
+ case AO__c11_atomic_fetch_add:
+ case AO__c11_atomic_fetch_sub:
+ case AO__c11_atomic_fetch_and:
+ case AO__c11_atomic_fetch_or:
+ case AO__c11_atomic_fetch_xor:
+ case AO__atomic_fetch_add:
+ case AO__atomic_fetch_sub:
+ case AO__atomic_fetch_and:
+ case AO__atomic_fetch_or:
+ case AO__atomic_fetch_xor:
+ case AO__atomic_add_fetch:
+ case AO__atomic_sub_fetch:
+ case AO__atomic_and_fetch:
+ case AO__atomic_or_fetch:
+ case AO__atomic_xor_fetch:
return 3;
- case CmpXchgStrong:
- case CmpXchgWeak:
+
+ case AO__atomic_exchange:
+ return 4;
+
+ case AO__c11_atomic_compare_exchange_strong:
+ case AO__c11_atomic_compare_exchange_weak:
return 5;
+
+ case AO__atomic_compare_exchange:
+ case AO__atomic_compare_exchange_n:
+ return 6;
}
llvm_unreachable("unknown atomic op");
}
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 651b88b5d3..3a44183e20 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -1108,52 +1108,34 @@ void StmtPrinter::VisitPseudoObjectExpr(PseudoObjectExpr *Node) {
void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
const char *Name = 0;
switch (Node->getOp()) {
- case AtomicExpr::Init:
- Name = "__c11_atomic_init(";
- break;
- case AtomicExpr::Load:
- Name = "__c11_atomic_load(";
- break;
- case AtomicExpr::Store:
- Name = "__c11_atomic_store(";
- break;
- case AtomicExpr::CmpXchgStrong:
- Name = "__c11_atomic_compare_exchange_strong(";
- break;
- case AtomicExpr::CmpXchgWeak:
- Name = "__c11_atomic_compare_exchange_weak(";
- break;
- case AtomicExpr::Xchg:
- Name = "__c11_atomic_exchange(";
- break;
- case AtomicExpr::Add:
- Name = "__c11_atomic_fetch_add(";
- break;
- case AtomicExpr::Sub:
- Name = "__c11_atomic_fetch_sub(";
- break;
- case AtomicExpr::And:
- Name = "__c11_atomic_fetch_and(";
- break;
- case AtomicExpr::Or:
- Name = "__c11_atomic_fetch_or(";
- break;
- case AtomicExpr::Xor:
- Name = "__c11_atomic_fetch_xor(";
- break;
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case AtomicExpr::AO ## ID: \
+ Name = #ID "("; \
+ break;
+#include "clang/Basic/Builtins.def"
}
OS << Name;
+
+ // AtomicExpr stores its subexpressions in a permuted order.
PrintExpr(Node->getPtr());
OS << ", ";
- if (Node->getOp() != AtomicExpr::Load) {
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_load &&
+ Node->getOp() != AtomicExpr::AO__atomic_load_n) {
PrintExpr(Node->getVal1());
OS << ", ";
}
- if (Node->isCmpXChg()) {
+ if (Node->getOp() == AtomicExpr::AO__atomic_exchange ||
+ Node->isCmpXChg()) {
PrintExpr(Node->getVal2());
OS << ", ";
}
- if (Node->getOp() != AtomicExpr::Init)
+ if (Node->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
+ Node->getOp() == AtomicExpr::AO__atomic_compare_exchange_n) {
+ PrintExpr(Node->getWeak());
+ OS << ", ";
+ }
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_init)
PrintExpr(Node->getOrder());
if (Node->isCmpXChg()) {
OS << ", ";
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 260fa5b529..147e7276bc 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -2688,7 +2688,17 @@ static void
EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
- if (E->isCmpXChg()) {
+ llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
+ llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n: {
// Note that cmpxchg only supports specifying one ordering and
// doesn't support weak cmpxchg, at least at the moment.
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
@@ -2705,7 +2715,9 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- if (E->getOp() == AtomicExpr::Load) {
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load: {
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
Load->setAtomic(Order);
Load->setAlignment(Size);
@@ -2715,7 +2727,9 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- if (E->getOp() == AtomicExpr::Store) {
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n: {
assert(!Dest && "Store does not return a value");
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
LoadVal1->setAlignment(Align);
@@ -2726,26 +2740,66 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
- switch (E->getOp()) {
- case AtomicExpr::CmpXchgWeak:
- case AtomicExpr::CmpXchgStrong:
- case AtomicExpr::Store:
- case AtomicExpr::Init:
- case AtomicExpr::Load: assert(0 && "Already handled!");
- case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break;
- case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break;
- case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break;
- case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break;
- case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break;
- case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break;
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ Op = llvm::AtomicRMWInst::Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_add_fetch:
+ PostOp = llvm::Instruction::Add;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ Op = llvm::AtomicRMWInst::Add;
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ PostOp = llvm::Instruction::Sub;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ Op = llvm::AtomicRMWInst::Sub;
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ Op = llvm::AtomicRMWInst::And;
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ PostOp = llvm::Instruction::Or;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ Op = llvm::AtomicRMWInst::Or;
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ PostOp = llvm::Instruction::Xor;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ Op = llvm::AtomicRMWInst::Xor;
+ break;
}
+
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
LoadVal1->setAlignment(Align);
llvm::AtomicRMWInst *RMWI =
CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
RMWI->setVolatile(E->isVolatile());
- llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest);
+
+ // For __atomic_*_fetch operations, perform the operation again to
+ // determine the value which was written.
+ llvm::Value *Result = RMWI;
+ if (PostOp)
+ Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
+
+ llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
StoreDest->setAlignment(Align);
}
@@ -2770,7 +2824,9 @@ static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
- QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType();
+ QualType MemTy = AtomicTy;
+ if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
+ MemTy = AT->getValueType();
CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
uint64_t Size = sizeChars.getQuantity();
CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
@@ -2784,7 +2840,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
Ptr = EmitScalarExpr(E->getPtr());
- if (E->getOp() == AtomicExpr::Init) {
+ if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
assert(!Dest && "Init does not return a value");
if (!hasAggregateLLVMType(E->getVal1()->getType())) {
llvm::StoreInst *Store =
@@ -2805,26 +2861,80 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
}
Order = EmitScalarExpr(E->getOrder());
- if (E->isCmpXChg()) {
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ break;
+
+ case AtomicExpr::AO__atomic_load:
+ Dest = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store:
+ Val1 = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
+ Val1 = EmitScalarExpr(E->getVal1());
+ Dest = EmitScalarExpr(E->getVal2());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange:
Val1 = EmitScalarExpr(E->getVal1());
- Val2 = EmitValToTemp(*this, E->getVal2());
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
+ Val2 = EmitScalarExpr(E->getVal2());
+ else
+ Val2 = EmitValToTemp(*this, E->getVal2());
OrderFail = EmitScalarExpr(E->getOrderFail());
- } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) &&
- MemTy->isPointerType()) {
- // For pointers, we're required to do a bit of math: adding 1 to an int*
- // is not the same as adding 1 to a uintptr_t.
- QualType Val1Ty = E->getVal1()->getType();
- llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
- CharUnits PointeeIncAmt =
- getContext().getTypeSizeInChars(MemTy->getPointeeType());
- Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
- Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
- EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
- } else if (E->getOp() != AtomicExpr::Load) {
+ // Evaluate and discard the 'weak' argument.
+ if (E->getNumSubExprs() == 6)
+ EmitScalarExpr(E->getWeak());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ if (MemTy->isPointerType()) {
+ // For pointer arithmetic, we're required to do a bit of math:
+ // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
+ QualType Val1Ty = E->getVal1()->getType();
+ llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
+ CharUnits PointeeIncAmt =
+ getContext().getTypeSizeInChars(MemTy->getPointeeType());
+ Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
+ Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
+ EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
+ break;
+ }
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
Val1 = EmitValToTemp(*this, E->getVal1());
+ break;
}
- if (E->getOp() != AtomicExpr::Store && !Dest)
+ if (!E->getType()->isVoidType() && !Dest)
Dest = CreateMemTemp(E->getType(), ".atomicdst");
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
@@ -2846,9 +2956,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// optimisation benefit possible from a libcall version of a weak compare
// and exchange.
// bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
- // void *desired, int success, int failure)
- case AtomicExpr::CmpXchgWeak:
- case AtomicExpr::CmpXchgStrong:
+ // void *desired, int success, int failure)
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
LibCallName = "__atomic_compare_exchange";
RetTy = getContext().BoolTy;
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
@@ -2861,7 +2973,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
break;
// void __atomic_exchange(size_t size, void *mem, void *val, void *return,
// int order)
- case AtomicExpr::Xchg:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
LibCallName = "__atomic_exchange";
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
getContext().VoidPtrTy);
@@ -2869,13 +2983,17 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
getContext().VoidPtrTy);
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
- case AtomicExpr::Store:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
LibCallName = "__atomic_store";
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
getContext().VoidPtrTy);
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
- case AtomicExpr::Load:
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
LibCallName = "__atomic_load";
Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
getContext().VoidPtrTy);
@@ -2903,7 +3021,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
if (E->isCmpXChg())
return Res;
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), Dest);
}
@@ -2943,24 +3061,31 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
default: // invalid order
// We should not ever get here normally, but it's hard to
// enforce that in general.
- break;
+ break;
}
- if (E->getOp() == AtomicExpr::Store || E->getOp() == AtomicExpr::Init)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), OrigDest);
}
// Long case, when Order isn't obviously constant.
+ bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store_n;
+ bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load_n;
+
// Create all the relevant BB's
llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
*AcqRelBB = 0, *SeqCstBB = 0;
MonotonicBB = createBasicBlock("monotonic", CurFn);
- if (E->getOp() != AtomicExpr::Store)
+ if (!IsStore)
AcquireBB = createBasicBlock("acquire", CurFn);
- if (E->getOp() != AtomicExpr::Load)
+ if (!IsLoad)
ReleaseBB = createBasicBlock("release", CurFn);
- if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store)
+ if (!IsLoad && !IsStore)
AcqRelBB = createBasicBlock("acqrel", CurFn);
SeqCstBB = createBasicBlock("seqcst", CurFn);
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
@@ -2977,7 +3102,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Monotonic);
Builder.CreateBr(ContBB);
- if (E->getOp() != AtomicExpr::Store) {
+ if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Acquire);
@@ -2985,14 +3110,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
SI->addCase(Builder.getInt32(1), AcquireBB);
SI->addCase(Builder.getInt32(2), AcquireBB);
}
- if (E->getOp() != AtomicExpr::Load) {
+ if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Release);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(3), ReleaseBB);
}
- if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) {
+ if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::AcquireRelease);
@@ -3007,7 +3132,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Cleanup and return
Builder.SetInsertPoint(ContBB);
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), OrigDest);
}
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index c4ed0b0e52..1606e336ee 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -250,41 +250,11 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
return SemaBuiltinAtomicOverloaded(move(TheCallResult));
- case Builtin::BI__atomic_load:
- case Builtin::BI__c11_atomic_load:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Load);
- case Builtin::BI__atomic_store:
- case Builtin::BI__c11_atomic_store:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Store);
- case Builtin::BI__atomic_init:
- case Builtin::BI__c11_atomic_init:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Init);
- case Builtin::BI__atomic_exchange:
- case Builtin::BI__c11_atomic_exchange:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Xchg);
- case Builtin::BI__atomic_compare_exchange_strong:
- case Builtin::BI__c11_atomic_compare_exchange_strong:
- return SemaAtomicOpsOverloaded(move(TheCallResult),
- AtomicExpr::CmpXchgStrong);
- case Builtin::BI__atomic_compare_exchange_weak:
- case Builtin::BI__c11_atomic_compare_exchange_weak:
- return SemaAtomicOpsOverloaded(move(TheCallResult),
- AtomicExpr::CmpXchgWeak);
- case Builtin::BI__atomic_fetch_add:
- case Builtin::BI__c11_atomic_fetch_add:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Add);
- case Builtin::BI__atomic_fetch_sub:
- case Builtin::BI__c11_atomic_fetch_sub:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Sub);
- case Builtin::BI__atomic_fetch_and:
- case Builtin::BI__c11_atomic_fetch_and:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::And);
- case Builtin::BI__atomic_fetch_or:
- case Builtin::BI__c11_atomic_fetch_or:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Or);
- case Builtin::BI__atomic_fetch_xor:
- case Builtin::BI__c11_atomic_fetch_xor:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Xor);
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case Builtin::BI##ID: \
+ return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::AO##ID);
+#include "clang/Basic/Builtins.def"
case Builtin::BI__builtin_annotation:
if (CheckBuiltinAnnotationString(*this, TheCall->getArg(1)))
return ExprError();
@@ -515,75 +485,175 @@ bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) {
return false;
}
-ExprResult
-Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op) {
+ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
+ AtomicExpr::AtomicOp Op) {
CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
- // All these operations take one of the following four forms:
- // T __c11_atomic_load(_Atomic(T)*, int) (loads)
- // T* __c11_atomic_add(_Atomic(T*)*, ptrdiff_t, int) (pointer add/sub)
- // int __c11_atomic_compare_exchange_strong(_Atomic(T)*, T*, T, int, int)
- // (cmpxchg)
- // T __c11_atomic_exchange(_Atomic(T)*, T, int) (everything else)
- // where T is an appropriate type, and the int paremeterss are for orderings.
- unsigned NumVals = 1;
- unsigned NumOrders = 1;
- if (Op == AtomicExpr::Load) {
- NumVals = 0;
- } else if (Op == AtomicExpr::CmpXchgWeak || Op == AtomicExpr::CmpXchgStrong) {
- NumVals = 2;
- NumOrders = 2;
- }
- if (Op == AtomicExpr::Init)
- NumOrders = 0;
-
- if (TheCall->getNumArgs() < NumVals+NumOrders+1) {
+ // All these operations take one of the following forms:
+ enum {
+ // C __c11_atomic_init(A *, C)
+ Init,
+ // C __c11_atomic_load(A *, int)
+ Load,
+ // void __atomic_load(A *, CP, int)
+ Copy,
+ // C __c11_atomic_add(A *, M, int)
+ Arithmetic,
+ // C __atomic_exchange_n(A *, CP, int)
+ Xchg,
+ // void __atomic_exchange(A *, C *, CP, int)
+ GNUXchg,
+ // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
+ C11CmpXchg,
+ // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
+ GNUCmpXchg
+ } Form = Init;
+ const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 4, 5, 6 };
+ const unsigned NumVals[] = { 1, 0, 1, 1, 1, 2, 2, 3 };
+ // where:
+ // C is an appropriate type,
+ // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
+ // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
+ // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
+ // the int parameters are for orderings.
+
+ assert(AtomicExpr::AO__c11_atomic_init == 0 &&
+ AtomicExpr::AO__c11_atomic_fetch_xor + 1 == AtomicExpr::AO__atomic_load
+ && "need to update code for modified C11 atomics");
+ bool IsC11 = Op >= AtomicExpr::AO__c11_atomic_init &&
+ Op <= AtomicExpr::AO__c11_atomic_fetch_xor;
+ bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
+ Op == AtomicExpr::AO__atomic_store_n ||
+ Op == AtomicExpr::AO__atomic_exchange_n ||
+ Op == AtomicExpr::AO__atomic_compare_exchange_n;
+ bool IsAddSub = false;
+
+ switch (Op) {
+ case AtomicExpr::AO__c11_atomic_init:
+ Form = Init;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ Form = Load;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ Form = Copy;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ IsAddSub = true;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ Form = Arithmetic;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ Form = Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
+ Form = GNUXchg;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ Form = C11CmpXchg;
+ break;
+
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ Form = GNUCmpXchg;
+ break;
+ }
+
+ // Check we have the right number of arguments.
+ if (TheCall->getNumArgs() < NumArgs[Form]) {
Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
- << 0 << NumVals+NumOrders+1 << TheCall->getNumArgs()
+ << 0 << NumArgs[Form] << TheCall->getNumArgs()
<< TheCall->getCallee()->getSourceRange();
return ExprError();
- } else if (TheCall->getNumArgs() > NumVals+NumOrders+1) {
- Diag(TheCall->getArg(NumVals+NumOrders+1)->getLocStart(),
+ } else if (TheCall->getNumArgs() > NumArgs[Form]) {
+ Diag(TheCall->getArg(NumArgs[Form])->getLocStart(),
diag::err_typecheck_call_too_many_args)
- << 0 << NumVals+NumOrders+1 << TheCall->getNumArgs()
+ << 0 << NumArgs[Form] << TheCall->getNumArgs()
<< TheCall->getCallee()->getSourceRange();
return ExprError();
}
- // Inspect the first argument of the atomic operation. This should always be
- // a pointer to an _Atomic type.
+ // Inspect the first argument of the atomic operation.
Expr *Ptr = TheCall->getArg(0);
Ptr = DefaultFunctionArrayLvalueConversion(Ptr).get();
const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
if (!pointerType) {
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
<< Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
- QualType AtomTy = pointerType->getPointeeType();
- if (!AtomTy->isAtomicType()) {
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
- << Ptr->getType() << Ptr->getSourceRange();
- return ExprError();
+ // For a __c11 builtin, this should be a pointer to an _Atomic type.
+ QualType AtomTy = pointerType->getPointeeType(); // 'A'
+ QualType ValType = AtomTy; // 'C'
+ if (IsC11) {
+ if (!AtomTy->isAtomicType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ ValType = AtomTy->getAs<AtomicType>()->getValueType();
}
- QualType ValType = AtomTy->getAs<AtomicType>()->getValueType();
- if ((Op == AtomicExpr::Add || Op == AtomicExpr::Sub) &&
- !ValType->isIntegerType() && !ValType->isPointerType()) {
+ // For an arithmetic operation, the implied arithmetic must be well-formed.
+ if (Form == Arithmetic) {
+ // gcc does not enforce these rules for GNU atomics, but we do so for sanity.
+ if (IsAddSub && !ValType->isIntegerType() && !ValType->isPointerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ if (!IsAddSub && !ValType->isIntegerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
+ // For __atomic_*_n operations, the value type must be a scalar integral or
+ // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
- << Ptr->getType() << Ptr->getSourceRange();
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
- if (!ValType->isIntegerType() &&
- (Op == AtomicExpr::And || Op == AtomicExpr::Or || Op == AtomicExpr::Xor)){
- Diag(DRE->getLocStart(), diag::err_atomic_op_logical_needs_atomic_int)
+ if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context)) {
+ // For GNU atomics, require a trivially-copyable type. This is not part of
+ // the GNU atomics specification, but we enforce it for sanity.
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_trivial_copy)
<< Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
+ // FIXME: For any builtin other than a load, the ValType must not be
+ // const-qualified.
+
switch (ValType.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
@@ -593,63 +663,107 @@ Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op)
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
+ // FIXME: Can this happen? By this point, ValType should be known
+ // to be trivially copyable.
Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
<< ValType << Ptr->getSourceRange();
return ExprError();
}
QualType ResultType = ValType;
- if (Op == AtomicExpr::Store || Op == AtomicExpr::Init)
+ if (Form == Copy || Form == GNUXchg || Form == Init)
ResultType = Context.VoidTy;
- else if (Op == AtomicExpr::CmpXchgWeak || Op == AtomicExpr::CmpXchgStrong)
+ else if (Form == C11CmpXchg || Form == GNUCmpXchg)
ResultType = Context.BoolTy;
+ // The type of a parameter passed 'by value'. In the GNU atomics, such
+ // arguments are actually passed as pointers.
+ QualType ByValType = ValType; // 'CP'
+ if (!IsC11 && !IsN)
+ ByValType = Ptr->getType();
+
// The first argument --- the pointer --- has a fixed type; we
// deduce the types of the rest of the arguments accordingly. Walk
// the remaining arguments, converting them to the deduced value type.
- for (unsigned i = 1; i != NumVals+NumOrders+1; ++i) {
- ExprResult Arg = TheCall->getArg(i);
+ for (unsigned i = 1; i != NumArgs[Form]; ++i) {
QualType Ty;
- if (i < NumVals+1) {
- // The second argument to a cmpxchg is a pointer to the data which will
- // be exchanged. The second argument to a pointer add/subtract is the
- // amount to add/subtract, which must be a ptrdiff_t. The third
- // argument to a cmpxchg and the second argument in all other cases
- // is the type of the value.
- if (i == 1 && (Op == AtomicExpr::CmpXchgWeak ||
- Op == AtomicExpr::CmpXchgStrong))
- Ty = Context.getPointerType(ValType.getUnqualifiedType());
- else if (!ValType->isIntegerType() &&
- (Op == AtomicExpr::Add || Op == AtomicExpr::Sub))
- Ty = Context.getPointerDiffType();
- else
- Ty = ValType;
+ if (i < NumVals[Form] + 1) {
+ switch (i) {
+ case 1:
+ // The second argument is the non-atomic operand. For arithmetic, this
+ // is always passed by value, and for a compare_exchange it is always
+ // passed by address. For the rest, GNU uses by-address and C11 uses
+ // by-value.
+ assert(Form != Load);
+ if (Form == Init || (Form == Arithmetic && ValType->isIntegerType()))
+ Ty = ValType;
+ else if (Form == Copy || Form == Xchg)
+ Ty = ByValType;
+ else if (Form == Arithmetic)
+ Ty = Context.getPointerDiffType();
+ else
+ Ty = Context.getPointerType(ValType.getUnqualifiedType());
+ break;
+ case 2:
+ // The third argument to compare_exchange / GNU exchange is a
+ // (pointer to a) desired value.
+ Ty = ByValType;
+ break;
+ case 3:
+ // The fourth argument to GNU compare_exchange is a 'weak' flag.
+ Ty = Context.BoolTy;
+ break;
+ }
} else {
// The order(s) are always converted to int.
Ty = Context.IntTy;
}
+
InitializedEntity Entity =
InitializedEntity::InitializeParameter(Context, Ty, false);
+ ExprResult Arg = TheCall->getArg(i);
Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
if (Arg.isInvalid())
return true;
TheCall->setArg(i, Arg.get());
}
+ // Permute the arguments into a 'consistent' order.
SmallVector<Expr*, 5> SubExprs;
SubExprs.push_back(Ptr);
- if (Op == AtomicExpr::Load) {
- SubExprs.push_back(TheCall->getArg(1)); // Order
- } else if (Op == AtomicExpr::Init) {
+ switch (Form) {
+ case Init:
+ // Note, AtomicExpr::getVal1() has a special case for this atomic.
SubExprs.push_back(TheCall->getArg(1)); // Val1
- } else if (Op != AtomicExpr::CmpXchgWeak && Op != AtomicExpr::CmpXchgStrong) {
+ break;
+ case Load:
+ SubExprs.push_back(TheCall->getArg(1)); // Order
+ break;
+ case Copy:
+ case Arithmetic:
+ case Xchg:
SubExprs.push_back(TheCall->getArg(2)); // Order
SubExprs.push_back(TheCall->getArg(1)); // Val1
- } else {
+ break;
+ case GNUXchg:
+ // Note, AtomicExpr::getVal2() has a special case for this atomic.
+ SubExprs.push_back(TheCall->getArg(3)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ SubExprs.push_back(TheCall->getArg(2)); // Val2
+ break;
+ case C11CmpXchg:
SubExprs.push_back(TheCall->getArg(3)); // Order
SubExprs.push_back(TheCall->getArg(1)); // Val1
SubExprs.push_back(TheCall->getArg(4)); // OrderFail
SubExprs.push_back(TheCall->getArg(2)); // Val2
+ break;
+ case GNUCmpXchg:
+ SubExprs.push_back(TheCall->getArg(4)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ SubExprs.push_back(TheCall->getArg(5)); // OrderFail
+ SubExprs.push_back(TheCall->getArg(2)); // Val2
+ SubExprs.push_back(TheCall->getArg(3)); // Weak
+ break;
}
return Owned(new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
diff --git a/test/CodeGen/atomic-ops.c b/test/CodeGen/atomic-ops.c
index c94f14023e..24692c7585 100644
--- a/test/CodeGen/atomic-ops.c
+++ b/test/CodeGen/atomic-ops.c
@@ -22,23 +22,80 @@ int fi1(_Atomic(int) *i) {
return __c11_atomic_load(i, memory_order_seq_cst);
}
+int fi1a(int *i) {
+ // CHECK: @fi1a
+ // CHECK: load atomic i32* {{.*}} seq_cst
+ int v;
+ __atomic_load(i, &v, memory_order_seq_cst);
+ return v;
+}
+
+int fi1b(int *i) {
+ // CHECK: @fi1b
+ // CHECK: load atomic i32* {{.*}} seq_cst
+ return __atomic_load_n(i, memory_order_seq_cst);
+}
+
void fi2(_Atomic(int) *i) {
// CHECK: @fi2
// CHECK: store atomic i32 {{.*}} seq_cst
__c11_atomic_store(i, 1, memory_order_seq_cst);
}
-void fi3(_Atomic(int) *i) {
+void fi2a(int *i) {
+ // CHECK: @fi2a
+ // CHECK: store atomic i32 {{.*}} seq_cst
+ int v = 1;
+ __atomic_store(i, &v, memory_order_seq_cst);
+}
+
+void fi2b(int *i) {
+ // CHECK: @fi2b
+ // CHECK: store atomic i32 {{.*}} seq_cst
+ __atomic_store_n(i, 1, memory_order_seq_cst);
+}
+
+int fi3(_Atomic(int) *i) {
// CHECK: @fi3
// CHECK: atomicrmw and
- __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
+ // CHECK-NOT: and
+ return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
+}
+
+int fi3a(int *i) {
+ // CHECK: @fi3a
+ // CHECK: atomicrmw xor
+ // CHECK-NOT: xor
+ return __atomic_fetch_xor(i, 1, memory_order_seq_cst);
+}
+
+int fi3b(int *i) {
+ // CHECK: @fi3b
+ // CHECK: atomicrmw add
+ // CHECK: add
+ return __atomic_add_fetch(i, 1, memory_order_seq_cst);
+}
+
+_Bool fi4(_Atomic(int) *i) {
+ // CHECK: @fi4
+ // CHECK: cmpxchg i32*
+ int cmp = 0;
+ return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
+}
+
+_Bool fi4a(int *i) {
+ // CHECK: @fi4
+ // CHECK: cmpxchg i32*
+ int cmp = 0;
+ int desired = 1;
+ return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire);
}
-void fi4(_Atomic(int) *i) {
+_Bool fi4b(int *i) {
// CHECK: @fi4
// CHECK: cmpxchg i32*
int cmp = 0;
- __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
+ return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire);
}
float ff1(_Atomic(float) *d) {
@@ -70,6 +127,13 @@ int* fp2(_Atomic(int*) *p) {
return __c11_atomic_fetch_add(p, 1, memory_order_relaxed);
}
+int *fp2a(int **p) {
+ // CHECK: @fp2a
+ // CHECK: store i32 4
+ // CHECK: atomicrmw sub {{.*}} monotonic
+ return __atomic_fetch_sub(p, 1, memory_order_relaxed);
+}
+
_Complex float fc(_Atomic(_Complex float) *c) {
// CHECK: @fc
// CHECK: atomicrmw xchg i64*
@@ -83,6 +147,20 @@ X fs(_Atomic(X) *c) {
return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
}
+X fsa(X *c, X *d) {
+ // CHECK: @fsa
+ // CHECK: atomicrmw xchg i32*
+ X ret;
+ __atomic_exchange(c, d, &ret, memory_order_seq_cst);
+ return ret;
+}
+
+_Bool fsb(_Bool *c) {
+ // CHECK: @fsb
+ // CHECK: atomicrmw xchg i8*
+ return __atomic_exchange_n(c, 1, memory_order_seq_cst);
+}
+
int lock_free() {
// CHECK: @lock_free
// CHECK: ret i32 1
@@ -96,33 +174,80 @@ int lock_free() {
struct foo {
int big[128];
};
+struct bar {
+ char c[3];
+};
+struct bar smallThing, thing1, thing2;
+struct foo bigThing;
_Atomic(struct foo) bigAtomic;
void structAtomicStore() {
// CHECK: @structAtomicStore
struct foo f = {0};
__c11_atomic_store(&bigAtomic, f, 5);
- // CHECK: call void @__atomic_store(i32 512, i8* bitcast (%struct.foo* @bigAtomic to i8*),
+ // CHECK: call void @__atomic_store(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
+
+ struct bar b = {0};
+ __atomic_store(&smallThing, &b, 5);
+ // CHECK: call void @__atomic_store(i32 3, i8* {{.*}} @smallThing
+
+ __atomic_store(&bigThing, &f, 5);
+ // CHECK: call void @__atomic_store(i32 512, i8* {{.*}} @bigThing
}
void structAtomicLoad() {
// CHECK: @structAtomicLoad
struct foo f = __c11_atomic_load(&bigAtomic, 5);
- // CHECK: call void @__atomic_load(i32 512, i8* bitcast (%struct.foo* @bigAtomic to i8*),
+ // CHECK: call void @__atomic_load(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
+
+ struct bar b;
+ __atomic_load(&smallThing, &b, 5);
+ // CHECK: call void @__atomic_load(i32 3, i8* {{.*}} @smallThing
+
+ __atomic_load(&bigThing, &f, 5);
+ // CHECK: call void @__atomic_load(i32 512, i8* {{.*}} @bigThing
}
struct foo structAtomicExchange() {
// CHECK: @structAtomicExchange
struct foo f = {0};
+ struct foo old;
+ __atomic_exchange(&f, &bigThing, &old, 5);
+ // CHECK: call void @__atomic_exchange(i32 512, {{.*}}, i8* bitcast ({{.*}} @bigThing to i8*),
+
return __c11_atomic_exchange(&bigAtomic, f, 5);
- // CHECK: call void @__atomic_exchange(i32 512, i8* bitcast (%struct.foo* @bigAtomic to i8*),
+ // CHECK: call void @__atomic_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
}
int structAtomicCmpExchange() {
// CHECK: @structAtomicCmpExchange
+ _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
+ // CHECK: call zeroext i1 @__atomic_compare_exchange(i32 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
+
struct foo f = {0};
struct foo g = {0};
g.big[12] = 12;
- return __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
- // CHECK: call zeroext i1 @__atomic_compare_exchange(i32 512, i8* bitcast (%struct.foo* @bigAtomic to i8*),
+ return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
+ // CHECK: call zeroext i1 @__atomic_compare_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
+}
+
+// Check that no atomic operations are used in any initialisation of _Atomic
+// types.
+_Atomic(int) atomic_init_i = 42;
+
+// CHECK: @atomic_init_foo
+void atomic_init_foo()
+{
+ // CHECK-NOT: }
+ // CHECK-NOT: atomic
+ // CHECK: store
+ _Atomic(int) j = 12;
+
+ // CHECK-NOT: }
+ // CHECK-NOT: atomic
+ // CHECK: store
+ __c11_atomic_init(&j, 42);
+
+ // CHECK-NOT: atomic
+ // CHECK: }
}
#endif
diff --git a/test/CodeGen/atomic_init.c b/test/CodeGen/atomic_init.c
deleted file mode 100644
index 6f773bef48..0000000000
--- a/test/CodeGen/atomic_init.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s
-
-// Check that no atomic operations are used in any initialisation of _Atomic
-// types.
-
-_Atomic(int) i = 42;
-
-void foo()
-{
- _Atomic(int) j = 12; // CHECK: store
- // CHECK-NOT: atomic
- __c11_atomic_init(&j, 42); // CHECK: store
- // CHECK-NOT: atomic
-}
diff --git a/test/Misc/serialized-diags.c b/test/Misc/serialized-diags.c
index 73ac0507fc..ae4611ba66 100644
--- a/test/Misc/serialized-diags.c
+++ b/test/Misc/serialized-diags.c
@@ -58,11 +58,10 @@ void rdar11040133() {
// CHECK: +-Range: {{.*[/\\]}}serialized-diags.c:22:3 {{.*[/\\]}}serialized-diags.c:22:6
// CHECK: +-Range: {{.*[/\\]}}serialized-diags.c:20:15 {{.*[/\\]}}serialized-diags.c:20:16
// CHECK: +-{{.*[/\\]}}serialized-diags.c:19:1: note: 'taz' declared here []
-// CHECK: {{.*[/\\]}}serialized-diags.h:5:7: warning: incompatible integer to pointer conversion initializing 'char *' with an expression of type 'int'; [-Wint-conversion]
+// CHECK: {{.*[/\\]}}serialized-diags.h:5:7: warning: incompatible integer to pointer conversion initializing 'char *' with an expression of type 'int' [-Wint-conversion]
// CHECK: Range: {{.*[/\\]}}serialized-diags.h:5:16 {{.*[/\\]}}serialized-diags.h:5:17
// CHECK: +-{{.*[/\\]}}serialized-diags.c:26:10: note: in file included from {{.*[/\\]}}serialized-diags.c:26: []
// CHECK: Number FIXITs = 0
// CHECK: {{.*[/\\]}}serialized-diags.c:30:12: warning: unused variable 'x'
// CHECK: Number FIXITs = 0
// CHECK: Number of diagnostics: 6
-
diff --git a/test/Sema/atomic-ops.c b/test/Sema/atomic-ops.c
index 0560a747ab..0e5634f1d5 100644
--- a/test/Sema/atomic-ops.c
+++ b/test/Sema/atomic-ops.c
@@ -2,36 +2,96 @@
// Basic parsing/Sema tests for __c11_atomic_*
-// FIXME: Need to implement __c11_atomic_is_lock_free
+// FIXME: Need to implement:
+// __c11_atomic_is_lock_free
+// __atomic_is_lock_free
+// __atomic_always_lock_free
+// __atomic_test_and_set
+// __atomic_clear
typedef enum memory_order {
memory_order_relaxed, memory_order_consume, memory_order_acquire,
memory_order_release, memory_order_acq_rel, memory_order_seq_cst
} memory_order;
-void f(_Atomic(int) *i, _Atomic(int*) *p, _Atomic(float) *d) {
+struct S { char c[3]; };
+
+void f(_Atomic(int) *i, _Atomic(int*) *p, _Atomic(float) *d,
+ int *I, int **P, float *D, struct S *s1, struct S *s2) {
+ __c11_atomic_init(I, 5); // expected-error {{pointer to _Atomic}}
__c11_atomic_load(0); // expected-error {{too few arguments to function}}
__c11_atomic_load(0,0,0); // expected-error {{too many arguments to function}}
- __c11_atomic_store(0,0,0); // expected-error {{first argument to atomic operation}}
- __c11_atomic_store((int*)0,0,0); // expected-error {{first argument to atomic operation}}
+ __c11_atomic_store(0,0,0); // expected-error {{first argument to atomic builtin must be a pointer}}
+ __c11_atomic_store((int*)0,0,0); // expected-error {{first argument to atomic operation must be a pointer to _Atomic}}
__c11_atomic_load(i, memory_order_seq_cst);
__c11_atomic_load(p, memory_order_seq_cst);
__c11_atomic_load(d, memory_order_seq_cst);
+ int load_n_1 = __atomic_load_n(I, memory_order_relaxed);
+ int *load_n_2 = __atomic_load_n(P, memory_order_relaxed);
+ float load_n_3 = __atomic_load_n(D, memory_order_relaxed); // expected-error {{must be a pointer to integer or pointer}}
+ __atomic_load_n(s1, memory_order_relaxed); // expected-error {{must be a pointer to integer or pointer}}
+
+ __atomic_load(i, I, memory_order_relaxed); // expected-error {{must be a pointer to a trivially-copyable type}}
+ __atomic_load(I, i, memory_order_relaxed); // expected-warning {{passing '_Atomic(int) *' to parameter of type 'int *'}}
+ __atomic_load(I, *P, memory_order_relaxed);
+ __atomic_load(I, *P, memory_order_relaxed, 42); // expected-error {{too many arguments}}
+ (int)__atomic_load(I, I, memory_order_seq_cst); // expected-error {{operand of type 'void'}}
+ __atomic_load(s1, s2, memory_order_acquire);
+
__c11_atomic_store(i, 1, memory_order_seq_cst);
__c11_atomic_store(p, 1, memory_order_seq_cst); // expected-warning {{incompatible integer to pointer conversion}}
(int)__c11_atomic_store(d, 1, memory_order_seq_cst); // expected-error {{operand of type 'void'}}
+ __atomic_store_n(I, 4, memory_order_release);
+ __atomic_store_n(I, 4.0, memory_order_release);
+ __atomic_store_n(I, P, memory_order_release); // expected-warning {{parameter of type 'int'}}
+ __atomic_store_n(i, 1, memory_order_release); // expected-error {{must be a pointer to integer or pointer}}
+ __atomic_store_n(s1, *s2, memory_order_release); // expected-error {{must be a pointer to integer or pointer}}
+
+ __atomic_store(I, *P, memory_order_release);
+ __atomic_store(s1, s2, memory_order_release);
+ __atomic_store(i, I, memory_order_release); // expected-error {{trivially-copyable}}
+
+ int exchange_1 = __c11_atomic_exchange(i, 1, memory_order_seq_cst);
+ int exchange_2 = __c11_atomic_exchange(I, 1, memory_order_seq_cst); // expected-error {{must be a pointer to _Atomic}}
+ int exchange_3 = __atomic_exchange_n(i, 1, memory_order_seq_cst); // expected-error {{must be a pointer to integer or pointer}}
+ int exchange_4 = __atomic_exchange_n(I, 1, memory_order_seq_cst);
+
+ __atomic_exchange(s1, s2, s2, memory_order_seq_cst);
+ __atomic_exchange(s1, I, P, memory_order_seq_cst); // expected-warning 2{{parameter of type 'struct S *'}}
+ (int)__atomic_exchange(s1, s2, s2, memory_order_seq_cst); // expected-error {{operand of type 'void'}}
+
__c11_atomic_fetch_add(i, 1, memory_order_seq_cst);
__c11_atomic_fetch_add(p, 1, memory_order_seq_cst);
__c11_atomic_fetch_add(d, 1, memory_order_seq_cst); // expected-error {{must be a pointer to atomic integer or pointer}}
+ __atomic_fetch_add(i, 3, memory_order_seq_cst); // expected-error {{pointer to integer or pointer}}
+ __atomic_fetch_sub(I, 3, memory_order_seq_cst);
+ __atomic_fetch_sub(P, 3, memory_order_seq_cst);
+ __atomic_fetch_sub(D, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer or pointer}}
+ __atomic_fetch_sub(s1, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer or pointer}}
+
__c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
__c11_atomic_fetch_and(p, 1, memory_order_seq_cst); // expected-error {{must be a pointer to atomic integer}}
__c11_atomic_fetch_and(d, 1, memory_order_seq_cst); // expected-error {{must be a pointer to atomic integer}}
- __c11_atomic_compare_exchange_strong(i, 0, 1, memory_order_seq_cst, memory_order_seq_cst);
- __c11_atomic_compare_exchange_strong(p, 0, (int*)1, memory_order_seq_cst, memory_order_seq_cst);
- __c11_atomic_compare_exchange_strong(d, (int*)0, 1, memory_order_seq_cst, memory_order_seq_cst); // expected-warning {{incompatible pointer types}}
+ __atomic_fetch_and(i, 3, memory_order_seq_cst); // expected-error {{pointer to integer}}
+ __atomic_fetch_or(I, 3, memory_order_seq_cst);
+ __atomic_fetch_xor(P, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer}}
+ __atomic_fetch_or(D, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer}}
+ __atomic_fetch_and(s1, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer}}
+
+ _Bool cmpexch_1 = __c11_atomic_compare_exchange_strong(i, 0, 1, memory_order_seq_cst, memory_order_seq_cst);
+ _Bool cmpexch_2 = __c11_atomic_compare_exchange_strong(p, 0, (int*)1, memory_order_seq_cst, memory_order_seq_cst);
+ _Bool cmpexch_3 = __c11_atomic_compare_exchange_strong(d, (int*)0, 1, memory_order_seq_cst, memory_order_seq_cst); // expected-warning {{incompatible pointer types}}
+
+ _Bool cmpexch_4 = __atomic_compare_exchange_n(I, I, 5, 1, memory_order_seq_cst, memory_order_seq_cst);
+ _Bool cmpexch_5 = __atomic_compare_exchange_n(I, P, 5, 0, memory_order_seq_cst, memory_order_seq_cst); // expected-warning {{; dereference with *}}
+ _Bool cmpexch_6 = __atomic_compare_exchange_n(I, I, P, 0, memory_order_seq_cst, memory_order_seq_cst); // expected-warning {{passing 'int **' to parameter of type 'int'}}
+
+ _Bool cmpexch_7 = __atomic_compare_exchange(I, I, 5, 1, memory_order_seq_cst, memory_order_seq_cst); // expected-warning {{passing 'int' to parameter of type 'int *'}}
+ _Bool cmpexch_8 = __atomic_compare_exchange(I, P, I, 0, memory_order_seq_cst, memory_order_seq_cst); // expected-warning {{; dereference with *}}
+ _Bool cmpexch_9 = __atomic_compare_exchange(I, I, I, 0, memory_order_seq_cst, memory_order_seq_cst);
}