summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/CGObjC.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/CGObjC.cpp')
-rw-r--r--lib/CodeGen/CGObjC.cpp305
1 files changed, 188 insertions, 117 deletions
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 9c66ff0e8f..d5906cf994 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -1,9 +1,8 @@
//===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,6 +14,7 @@
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantEmitter.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
@@ -22,7 +22,6 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
using namespace clang;
@@ -62,7 +61,12 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// Get the method.
const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
const Expr *SubExpr = E->getSubExpr();
- assert(BoxingMethod && "BoxingMethod is null");
+
+ if (E->isExpressibleAsConstantInitializer()) {
+ ConstantEmitter ConstEmitter(CGM);
+ return ConstEmitter.tryEmitAbstract(E, E->getType());
+ }
+
assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
Selector Sel = BoxingMethod->getSelector();
@@ -160,9 +164,8 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
if (ALE) {
// Emit the element and store it to the appropriate array slot.
const Expr *Rhs = ALE->getElement(i);
- LValue LV = MakeAddrLValue(
- Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
- ElementType, AlignmentSource::Decl);
+ LValue LV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i),
+ ElementType, AlignmentSource::Decl);
llvm::Value *value = EmitScalarExpr(Rhs);
EmitStoreThroughLValue(RValue::get(value), LV, true);
@@ -172,17 +175,15 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
} else {
// Emit the key and store it to the appropriate array slot.
const Expr *Key = DLE->getKeyValueElement(i).Key;
- LValue KeyLV = MakeAddrLValue(
- Builder.CreateConstArrayGEP(Keys, i, getPointerSize()),
- ElementType, AlignmentSource::Decl);
+ LValue KeyLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Keys, i),
+ ElementType, AlignmentSource::Decl);
llvm::Value *keyValue = EmitScalarExpr(Key);
EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
// Emit the value and store it to the appropriate array slot.
const Expr *Value = DLE->getKeyValueElement(i).Value;
- LValue ValueLV = MakeAddrLValue(
- Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
- ElementType, AlignmentSource::Decl);
+ LValue ValueLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i),
+ ElementType, AlignmentSource::Decl);
llvm::Value *valueValue = EmitScalarExpr(Value);
EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
if (TrackNeededObjects) {
@@ -427,6 +428,41 @@ tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType,
return None;
}
+/// Instead of '[[MyClass alloc] init]', try to generate
+/// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the
+/// caller side, as well as the optimized objc_alloc.
+static Optional<llvm::Value *>
+tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) {
+ auto &Runtime = CGF.getLangOpts().ObjCRuntime;
+ if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit())
+ return None;
+
+ // Match the exact pattern '[[MyClass alloc] init]'.
+ Selector Sel = OME->getSelector();
+ if (OME->getReceiverKind() != ObjCMessageExpr::Instance ||
+ !OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() ||
+ Sel.getNameForSlot(0) != "init")
+ return None;
+
+ // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]'.
+ auto *SubOME =
+ dyn_cast<ObjCMessageExpr>(OME->getInstanceReceiver()->IgnoreParens());
+ if (!SubOME)
+ return None;
+ Selector SubSel = SubOME->getSelector();
+ if (SubOME->getReceiverKind() != ObjCMessageExpr::Class ||
+ !SubOME->getType()->isObjCObjectPointerType() ||
+ !SubSel.isUnarySelector() || SubSel.getNameForSlot(0) != "alloc")
+ return None;
+
+ QualType ReceiverType = SubOME->getClassReceiver();
+ const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
+ const ObjCInterfaceDecl *ID = ObjTy->getInterface();
+ assert(ID && "null interface should be impossible here");
+ llvm::Value *Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, ID);
+ return CGF.EmitObjCAllocInit(Receiver, CGF.ConvertType(OME->getType()));
+}
+
RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return) {
// Only the lookup mechanism and first two arguments of the method
@@ -448,6 +484,9 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
}
}
+ if (Optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E))
+ return AdjustObjCObjectType(*this, E->getType(), RValue::get(*Val));
+
// We don't retain the receiver in delegate init calls, and this is
// safe because the receiver value is always loaded from 'self',
// which we zero out. We don't want to Block_copy block receivers,
@@ -685,7 +724,7 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
- llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
+ llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args),
callee, ReturnValueSlot(), args);
@@ -949,8 +988,8 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
// Third argument is the helper function.
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
- llvm::Constant *copyCppAtomicObjectFn =
- CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
+ llvm::FunctionCallee copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
@@ -1026,8 +1065,8 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
}
case PropertyImplStrategy::GetSetProperty: {
- llvm::Constant *getPropertyFn =
- CGM.getObjCRuntime().GetPropertyGetFunction();
+ llvm::FunctionCallee getPropertyFn =
+ CGM.getObjCRuntime().GetPropertyGetFunction();
if (!getPropertyFn) {
CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
return;
@@ -1052,10 +1091,10 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
- llvm::Instruction *CallInstruction;
- RValue RV = EmitCall(
- getTypes().arrangeBuiltinFunctionCall(propType, args),
- callee, ReturnValueSlot(), args, &CallInstruction);
+ llvm::CallBase *CallInstruction;
+ RValue RV = EmitCall(getTypes().arrangeBuiltinFunctionCall(
+ getContext().getObjCIdType(), args),
+ callee, ReturnValueSlot(), args, &CallInstruction);
if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
call->setTailCall();
@@ -1170,7 +1209,7 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
// FIXME: should this really always be false?
args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
- llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
+ llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
@@ -1207,8 +1246,8 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
// Third argument is the helper function.
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
- llvm::Constant *fn =
- CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
+ llvm::FunctionCallee fn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
@@ -1302,14 +1341,13 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
case PropertyImplStrategy::GetSetProperty:
case PropertyImplStrategy::SetPropertyAndExpressionGet: {
- llvm::Constant *setOptimizedPropertyFn = nullptr;
- llvm::Constant *setPropertyFn = nullptr;
+ llvm::FunctionCallee setOptimizedPropertyFn = nullptr;
+ llvm::FunctionCallee setPropertyFn = nullptr;
if (UseOptimizedSetter(CGM)) {
// 10.8 and iOS 6.0 code and GC is off
setOptimizedPropertyFn =
- CGM.getObjCRuntime()
- .GetOptimizedPropertySetFunction(strategy.isAtomic(),
- strategy.isCopy());
+ CGM.getObjCRuntime().GetOptimizedPropertySetFunction(
+ strategy.isAtomic(), strategy.isCopy());
if (!setOptimizedPropertyFn) {
CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
return;
@@ -1560,8 +1598,8 @@ QualType CodeGenFunction::TypeOfSelfObject() {
}
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
- llvm::Constant *EnumerationMutationFnPtr =
- CGM.getObjCRuntime().EnumerationMutationFunction();
+ llvm::FunctionCallee EnumerationMutationFnPtr =
+ CGM.getObjCRuntime().EnumerationMutationFunction();
if (!EnumerationMutationFnPtr) {
CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
return;
@@ -1669,8 +1707,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Save the initial mutations value. This is the value at an
// address that was written into the state object by
// countByEnumeratingWithState:objects:count:.
- Address StateMutationsPtrPtr = Builder.CreateStructGEP(
- StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr");
+ Address StateMutationsPtrPtr =
+ Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
llvm::Value *StateMutationsPtr
= Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
@@ -1751,8 +1789,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Fetch the buffer out of the enumeration state.
// TODO: this pointer should actually be invariant between
// refreshes, which would help us do certain loop optimizations.
- Address StateItemsPtr = Builder.CreateStructGEP(
- StatePtr, 1, getPointerSize(), "stateitems.ptr");
+ Address StateItemsPtr =
+ Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
llvm::Value *EnumStateItems =
Builder.CreateLoad(StateItemsPtr, "stateitems");
@@ -1891,7 +1929,7 @@ llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
/// Given a number of pointers, inform the optimizer that they're
/// being intrinsically used up until this point in the program.
void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
- llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use;
+ llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use;
if (!fn)
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use);
@@ -1900,8 +1938,7 @@ void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
EmitNounwindRuntimeCall(fn, values);
}
-static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
- llvm::Constant *RTF) {
+static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) {
if (auto *F = dyn_cast<llvm::Function>(RTF)) {
// If the target runtime doesn't naturally support ARC, emit weak
// references to the runtime support library. We don't really
@@ -1913,15 +1950,18 @@ static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
}
}
+static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
+ llvm::FunctionCallee RTF) {
+ setARCRuntimeFunctionLinkage(CGM, RTF.getCallee());
+}
+
/// Perform an operation having the signature
/// i8* (i8*)
/// where a null input causes a no-op and returns null.
-static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
- llvm::Value *value,
- llvm::Type *returnType,
- llvm::Constant *&fn,
- llvm::Intrinsic::ID IntID,
- bool isTailCall = false) {
+static llvm::Value *emitARCValueOperation(
+ CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType,
+ llvm::Function *&fn, llvm::Intrinsic::ID IntID,
+ llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) {
if (isa<llvm::ConstantPointerNull>(value))
return value;
@@ -1936,8 +1976,7 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
// Call the function.
llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
- if (isTailCall)
- call->setTailCall();
+ call->setTailCallKind(tailKind);
// Cast the result back to the original type.
return CGF.Builder.CreateBitCast(call, origType);
@@ -1945,9 +1984,8 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// i8* (i8**)
-static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
- Address addr,
- llvm::Constant *&fn,
+static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr,
+ llvm::Function *&fn,
llvm::Intrinsic::ID IntID) {
if (!fn) {
fn = CGF.CGM.getIntrinsic(IntID);
@@ -1970,10 +2008,9 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// i8* (i8**, i8*)
-static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
- Address addr,
+static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr,
llvm::Value *value,
- llvm::Constant *&fn,
+ llvm::Function *&fn,
llvm::Intrinsic::ID IntID,
bool ignored) {
assert(addr.getElementType() == value->getType());
@@ -1998,10 +2035,8 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// void (i8**, i8**)
-static void emitARCCopyOperation(CodeGenFunction &CGF,
- Address dst,
- Address src,
- llvm::Constant *&fn,
+static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src,
+ llvm::Function *&fn,
llvm::Intrinsic::ID IntID) {
assert(dst.getType() == src.getType());
@@ -2023,8 +2058,8 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
llvm::Value *value,
llvm::Type *returnType,
- llvm::Constant *&fn,
- StringRef fnName) {
+ llvm::FunctionCallee &fn,
+ StringRef fnName, bool MayThrow) {
if (isa<llvm::ConstantPointerNull>(value))
return value;
@@ -2034,7 +2069,7 @@ static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName);
// We have Native ARC, so set nonlazybind attribute for performance
- if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee()))
if (fnName == "objc_retain")
f->addFnAttr(llvm::Attribute::NonLazyBind);
}
@@ -2044,10 +2079,14 @@ static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
// Call the function.
- llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
+ llvm::CallBase *Inst = nullptr;
+ if (MayThrow)
+ Inst = CGF.EmitCallOrInvoke(fn, value);
+ else
+ Inst = CGF.EmitNounwindRuntimeCall(fn, value);
// Cast the result back to the original type.
- return CGF.Builder.CreateBitCast(call, origType);
+ return CGF.Builder.CreateBitCast(Inst, origType);
}
/// Produce the code to do a retain. Based on the type, calls one of:
@@ -2122,14 +2161,10 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
// with this marker yet, so leave a breadcrumb for the ARC
// optimizer to pick up.
} else {
- llvm::NamedMDNode *metadata =
- CGF.CGM.getModule().getOrInsertNamedMetadata(
- "clang.arc.retainAutoreleasedReturnValueMarker");
- assert(metadata->getNumOperands() <= 1);
- if (metadata->getNumOperands() == 0) {
- auto &ctx = CGF.getLLVMContext();
- metadata->addOperand(llvm::MDNode::get(ctx,
- llvm::MDString::get(ctx, assembly)));
+ const char *markerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
+ if (!CGF.CGM.getModule().getModuleFlag(markerKey)) {
+ auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly);
+ CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, markerKey, str);
}
}
}
@@ -2147,9 +2182,15 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
emitAutoreleasedReturnValueMarker(*this);
- return emitARCValueOperation(*this, value, nullptr,
- CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
- llvm::Intrinsic::objc_retainAutoreleasedReturnValue);
+ llvm::CallInst::TailCallKind tailKind =
+ CGM.getTargetCodeGenInfo()
+ .shouldSuppressTailCallsOfRetainAutoreleasedReturnValue()
+ ? llvm::CallInst::TCK_NoTail
+ : llvm::CallInst::TCK_None;
+ return emitARCValueOperation(
+ *this, value, nullptr,
+ CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
+ llvm::Intrinsic::objc_retainAutoreleasedReturnValue, tailKind);
}
/// Claim a possibly-autoreleased return value at +0. This is only
@@ -2173,7 +2214,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value,
ARCPreciseLifetime_t precise) {
if (isa<llvm::ConstantPointerNull>(value)) return;
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
+ llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release;
if (!fn) {
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_release);
setARCRuntimeFunctionLinkage(CGM, fn);
@@ -2219,7 +2260,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
bool ignored) {
assert(addr.getElementType() == value->getType());
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
+ llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
if (!fn) {
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_storeStrong);
setARCRuntimeFunctionLinkage(CGM, fn);
@@ -2286,7 +2327,7 @@ CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
llvm::Intrinsic::objc_autoreleaseReturnValue,
- /*isTailCall*/ true);
+ llvm::CallInst::TCK_Tail);
}
/// Do a fused retain/autorelease of the given object.
@@ -2296,7 +2337,7 @@ CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
llvm::Intrinsic::objc_retainAutoreleaseReturnValue,
- /*isTailCall*/ true);
+ llvm::CallInst::TCK_Tail);
}
/// Do a fused retain/autorelease of the given object.
@@ -2375,7 +2416,7 @@ void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
/// void \@objc_destroyWeak(i8** %addr)
/// Essentially objc_storeWeak(addr, nil).
void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
+ llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
if (!fn) {
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_destroyWeak);
setARCRuntimeFunctionLinkage(CGM, fn);
@@ -2423,7 +2464,7 @@ void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr,
/// Produce the code to do a objc_autoreleasepool_push.
/// call i8* \@objc_autoreleasePoolPush(void)
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
+ llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
if (!fn) {
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush);
setARCRuntimeFunctionLinkage(CGM, fn);
@@ -2439,8 +2480,8 @@ void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
if (getInvokeDest()) {
// Call the runtime method not the intrinsic if we are handling exceptions
- llvm::Constant *&fn =
- CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke;
+ llvm::FunctionCallee &fn =
+ CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
@@ -2451,7 +2492,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
// objc_autoreleasePoolPop can throw.
EmitRuntimeCallOrInvoke(fn, value);
} else {
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
+ llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
if (!fn) {
fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop);
setARCRuntimeFunctionLinkage(CGM, fn);
@@ -2495,7 +2536,7 @@ llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value,
llvm::Type *resultType) {
return emitObjCValueOperation(*this, value, resultType,
CGM.getObjCEntrypoints().objc_alloc,
- "objc_alloc");
+ "objc_alloc", /*MayThrow=*/true);
}
/// Allocate the given objc object.
@@ -2504,7 +2545,14 @@ llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value,
llvm::Type *resultType) {
return emitObjCValueOperation(*this, value, resultType,
CGM.getObjCEntrypoints().objc_allocWithZone,
- "objc_allocWithZone");
+ "objc_allocWithZone", /*MayThrow=*/true);
+}
+
+llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value,
+ llvm::Type *resultType) {
+ return emitObjCValueOperation(*this, value, resultType,
+ CGM.getObjCEntrypoints().objc_alloc_init,
+ "objc_alloc_init", /*MayThrow=*/true);
}
/// Produce the code to do a primitive release.
@@ -2545,18 +2593,20 @@ void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr,
/// call i8* \@objc_autorelease(i8* %value)
llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value,
llvm::Type *returnType) {
- return emitObjCValueOperation(*this, value, returnType,
- CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction,
- "objc_autorelease");
+ return emitObjCValueOperation(
+ *this, value, returnType,
+ CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction,
+ "objc_autorelease", /*MayThrow=*/false);
}
/// Retain the given object, with normal retain semantics.
/// call i8* \@objc_retain(i8* %value)
llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value,
llvm::Type *returnType) {
- return emitObjCValueOperation(*this, value, returnType,
- CGM.getObjCEntrypoints().objc_retainRuntimeFunction,
- "objc_retain");
+ return emitObjCValueOperation(
+ *this, value, returnType,
+ CGM.getObjCEntrypoints().objc_retainRuntimeFunction, "objc_retain",
+ /*MayThrow=*/false);
}
/// Release the given object.
@@ -2565,17 +2615,16 @@ void CodeGenFunction::EmitObjCRelease(llvm::Value *value,
ARCPreciseLifetime_t precise) {
if (isa<llvm::ConstantPointerNull>(value)) return;
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
+ llvm::FunctionCallee &fn =
+ CGM.getObjCEntrypoints().objc_releaseRuntimeFunction;
if (!fn) {
- if (!fn) {
- llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
- fn = CGM.CreateRuntimeFunction(fnType, "objc_release");
- setARCRuntimeFunctionLinkage(CGM, fn);
- // We have Native ARC, so set nonlazybind attribute for performance
- if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
- f->addFnAttr(llvm::Attribute::NonLazyBind);
- }
+ fn = CGM.CreateRuntimeFunction(fnType, "objc_release");
+ setARCRuntimeFunctionLinkage(CGM, fn);
+ // We have Native ARC, so set nonlazybind attribute for performance
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee()))
+ f->addFnAttr(llvm::Attribute::NonLazyBind);
}
// Cast the argument to 'id'.
@@ -2829,6 +2878,7 @@ public:
Result visit(const Expr *e);
Result visitCastExpr(const CastExpr *e);
Result visitPseudoObjectExpr(const PseudoObjectExpr *e);
+ Result visitBlockExpr(const BlockExpr *e);
Result visitBinaryOperator(const BinaryOperator *e);
Result visitBinAssign(const BinaryOperator *e);
Result visitBinAssignUnsafeUnretained(const BinaryOperator *e);
@@ -2905,6 +2955,12 @@ ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) {
}
template <typename Impl, typename Result>
+Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) {
+ // The default implementation just forwards the expression to visitExpr.
+ return asImpl().visitExpr(e);
+}
+
+template <typename Impl, typename Result>
Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) {
switch (e->getCastKind()) {
@@ -3047,7 +3103,8 @@ Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) {
// Look through pseudo-object expressions.
} else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
return asImpl().visitPseudoObjectExpr(pseudo);
- }
+ } else if (auto *be = dyn_cast<BlockExpr>(e))
+ return asImpl().visitBlockExpr(be);
return asImpl().visitExpr(e);
}
@@ -3082,6 +3139,15 @@ struct ARCRetainExprEmitter :
return TryEmitResult(result, true);
}
+ TryEmitResult visitBlockExpr(const BlockExpr *e) {
+ TryEmitResult result = visitExpr(e);
+ // Avoid the block-retain if this is a block literal that doesn't need to be
+ // copied to the heap.
+ if (e->getBlockDecl()->canAvoidCopyToHeap())
+ result.setInt(true);
+ return result;
+ }
+
/// Block extends are net +0. Naively, we could just recurse on
/// the subexpression, but actually we need to ensure that the
/// value is copied as a block, so there's a little filter here.
@@ -3384,11 +3450,10 @@ void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
// We just use an inline assembly.
llvm::FunctionType *extenderType
= llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
- llvm::Value *extender
- = llvm::InlineAsm::get(extenderType,
- /* assembly */ "",
- /* constraints */ "r",
- /* side effects */ true);
+ llvm::InlineAsm *extender = llvm::InlineAsm::get(extenderType,
+ /* assembly */ "",
+ /* constraints */ "r",
+ /* side effects */ true);
object = Builder.CreateBitCast(object, VoidPtrTy);
EmitNounwindRuntimeCall(extender, object);
@@ -3647,19 +3712,25 @@ void CodeGenModule::emitAtAvailableLinkGuard() {
// CoreFoundation is linked into the final binary.
llvm::FunctionType *FTy =
llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false);
- llvm::Constant *CFFunc =
+ llvm::FunctionCallee CFFunc =
CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber");
llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false);
- llvm::Function *CFLinkCheckFunc = cast<llvm::Function>(CreateBuiltinFunction(
- CheckFTy, "__clang_at_available_requires_core_foundation_framework"));
- CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
- CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CodeGenFunction CGF(*this);
- CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc));
- CGF.EmitNounwindRuntimeCall(CFFunc, llvm::Constant::getNullValue(VoidPtrTy));
- CGF.Builder.CreateUnreachable();
- addCompilerUsedGlobal(CFLinkCheckFunc);
+ llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction(
+ CheckFTy, "__clang_at_available_requires_core_foundation_framework",
+ llvm::AttributeList(), /*IsLocal=*/true);
+ llvm::Function *CFLinkCheckFunc =
+ cast<llvm::Function>(CFLinkCheckFuncRef.getCallee()->stripPointerCasts());
+ if (CFLinkCheckFunc->empty()) {
+ CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
+ CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CodeGenFunction CGF(*this);
+ CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc));
+ CGF.EmitNounwindRuntimeCall(CFFunc,
+ llvm::Constant::getNullValue(VoidPtrTy));
+ CGF.Builder.CreateUnreachable();
+ addCompilerUsedGlobal(CFLinkCheckFunc);
+ }
}
CGObjCRuntime::~CGObjCRuntime() {}