summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/CGCall.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/CGCall.cpp')
-rw-r--r--lib/CodeGen/CGCall.cpp361
1 files changed, 180 insertions, 181 deletions
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 7d494bb1f1..bc9be14ede 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -1,9 +1,8 @@
//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -32,7 +31,6 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
@@ -69,12 +67,19 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
}
/// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
-/// qualification.
-static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD,
- const CXXMethodDecl *MD) {
- QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+/// qualification. Either or both of RD and MD may be null. A null RD indicates
+/// that there is no meaningful 'this' type, and a null MD can occur when
+/// calling a method pointer.
+CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
+ const CXXMethodDecl *MD) {
+ QualType RecTy;
+ if (RD)
+ RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ else
+ RecTy = Context.VoidTy;
+
if (MD)
- RecTy = Context.getAddrSpaceQualType(RecTy, MD->getTypeQualifiers().getAddressSpace());
+ RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
}
@@ -169,11 +174,9 @@ static void appendParameterTypes(const CodeGenTypes &CGT,
static const CGFunctionInfo &
arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP,
- const FunctionDecl *FD) {
+ CanQual<FunctionProtoType> FTP) {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
- RequiredArgs Required =
- RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
+ RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
// FIXME: Kill copy.
appendParameterTypes(CGT, prefix, paramInfos, FTP);
CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
@@ -187,11 +190,10 @@ arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
/// Arrange the argument and result information for a value of the
/// given freestanding function type.
const CGFunctionInfo &
-CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
- const FunctionDecl *FD) {
+CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
SmallVector<CanQualType, 16> argTypes;
return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
- FTP, FD);
+ FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
@@ -240,7 +242,7 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
/// Arrange the argument and result information for a call to an
/// unknown C++ non-static member function of the given abstract type.
-/// (Zero value of RD means we don't have any meaningful "this" argument type,
+/// (A null RD means we don't have any meaningful "this" argument type,
/// so fall back to a generic pointer type).
/// The member function must be an ordinary function, i.e. not a
/// constructor or destructor.
@@ -251,14 +253,11 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
SmallVector<CanQualType, 16> argTypes;
// Add the 'this' pointer.
- if (RD)
- argTypes.push_back(GetThisType(Context, RD, MD));
- else
- argTypes.push_back(Context.VoidPtrTy);
+ argTypes.push_back(DeriveThisType(RD, MD));
return ::arrangeLLVMFunctionInfo(
*this, true, argTypes,
- FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
/// Set calling convention for CUDA/HIP kernel.
@@ -290,7 +289,7 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
}
- return arrangeFreeFunctionType(prototype, MD);
+ return arrangeFreeFunctionType(prototype);
}
bool CodeGenTypes::inheritingCtorHasParams(
@@ -300,29 +299,23 @@ bool CodeGenTypes::inheritingCtorHasParams(
return Type == Ctor_Complete ||
!Inherited.getShadowDecl()->constructsVirtualBase() ||
!Target.getCXXABI().hasConstructorVariants();
- }
+}
const CGFunctionInfo &
-CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
- StructorType Type) {
+CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
+ auto *MD = cast<CXXMethodDecl>(GD.getDecl());
SmallVector<CanQualType, 16> argTypes;
SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
- argTypes.push_back(GetThisType(Context, MD->getParent(), MD));
+ argTypes.push_back(DeriveThisType(MD->getParent(), MD));
bool PassParams = true;
- GlobalDecl GD;
if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
- GD = GlobalDecl(CD, toCXXCtorType(Type));
-
// A base class inheriting constructor doesn't get forwarded arguments
// needed to construct a virtual base (or base class thereof).
if (auto Inherited = CD->getInheritedConstructor())
- PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
- } else {
- auto *DD = dyn_cast<CXXDestructorDecl>(MD);
- GD = GlobalDecl(DD, toCXXDtorType(Type));
+ PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
}
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
@@ -332,7 +325,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
appendParameterTypes(*this, argTypes, paramInfos, FTP);
CGCXXABI::AddedStructorArgs AddedArgs =
- TheCXXABI.buildStructorSignature(MD, Type, argTypes);
+ TheCXXABI.buildStructorSignature(GD, argTypes);
if (!paramInfos.empty()) {
// Note: prefix implies after the first param.
if (AddedArgs.Prefix)
@@ -408,8 +401,11 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
CanQual<FunctionProtoType> FPT = GetFormalType(D);
- RequiredArgs Required =
- RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
+ RequiredArgs Required = PassProtoArgs
+ ? RequiredArgs::forPrototypePlus(
+ FPT, TotalPrefixArgs + ExtraSuffixArgs)
+ : RequiredArgs::All;
+
GlobalDecl GD(D, CtorKind);
CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
? ArgTypes.front()
@@ -452,7 +448,7 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
/*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
}
- return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD);
+ return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
}
/// Arrange the argument and result information for the declaration or
@@ -517,11 +513,9 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
// FIXME: Do we need to handle ObjCMethodDecl?
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
- return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
-
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
- return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
+ if (isa<CXXConstructorDecl>(GD.getDecl()) ||
+ isa<CXXDestructorDecl>(GD.getDecl()))
+ return arrangeCXXStructorDeclaration(GD);
return arrangeFunctionDeclaration(FD);
}
@@ -535,7 +529,7 @@ const CGFunctionInfo &
CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
assert(MD->isVirtual() && "only methods have thunks");
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
- CanQualType ArgTys[] = { GetThisType(Context, MD->getParent(), MD) };
+ CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
/*chainCall=*/false, ArgTys,
FTP->getExtInfo(), {}, RequiredArgs(1));
@@ -549,7 +543,7 @@ CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
CanQual<FunctionProtoType> FTP = GetFormalType(CD);
SmallVector<CanQualType, 2> ArgTys;
const CXXRecordDecl *RD = CD->getParent();
- ArgTys.push_back(GetThisType(Context, RD, CD));
+ ArgTys.push_back(DeriveThisType(RD, CD));
if (CT == Ctor_CopyingClosure)
ArgTys.push_back(*FTP->param_type_begin());
if (RD->getNumVBases() > 0)
@@ -582,7 +576,7 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
// extra prefix plus the arguments in the prototype.
if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
if (proto->isVariadic())
- required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
+ required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
if (proto->hasExtParameterInfos())
addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
@@ -635,11 +629,10 @@ CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
auto argTypes = getArgTypesForDeclaration(Context, params);
- return arrangeLLVMFunctionInfo(
- GetReturnType(proto->getReturnType()),
- /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
- proto->getExtInfo(), paramInfos,
- RequiredArgs::forPrototypePlus(proto, 1, nullptr));
+ return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
+ /*instanceMethod*/ false, /*chainCall*/ false,
+ argTypes, proto->getExtInfo(), paramInfos,
+ RequiredArgs::forPrototypePlus(proto, 1));
}
const CGFunctionInfo &
@@ -808,6 +801,8 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
ArrayRef<CanQualType> argTypes,
RequiredArgs required) {
assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
+ assert(!required.allowsOptionalArgs() ||
+ required.getNumRequiredArgs() <= argTypes.size());
void *buffer =
operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
@@ -1148,7 +1143,7 @@ EnterStructPointerForCoercedAccess(Address SrcPtr,
return SrcPtr;
// GEP into the first element.
- SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
+ SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
// If the first element is a struct, recurse.
llvm::Type *SrcTy = SrcPtr.getElementType();
@@ -1276,12 +1271,8 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
// Prefer scalar stores to first-class aggregate stores.
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(Val->getType())) {
- const llvm::StructLayout *Layout =
- CGF.CGM.getDataLayout().getStructLayout(STy);
-
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
- Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
+ Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
}
@@ -1682,13 +1673,7 @@ llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
if (!isFuncTypeConvertible(FPT))
return llvm::StructType::get(getLLVMContext());
- const CGFunctionInfo *Info;
- if (isa<CXXDestructorDecl>(MD))
- Info =
- &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
- else
- Info = &arrangeCXXMethodDeclaration(MD);
- return GetFunctionType(*Info);
+ return GetFunctionType(GD);
}
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
@@ -1793,8 +1778,6 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
if (CodeGenOpts.Backchain)
FuncAttrs.addAttribute("backchain");
- // FIXME: The interaction of this attribute with the SLH command line flag
- // has not been determined.
if (CodeGenOpts.SpeculativeLoadHardening)
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
}
@@ -1826,8 +1809,7 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
llvm::AttrBuilder FuncAttrs;
- ConstructDefaultFnAttrList(F.getName(),
- F.hasFnAttribute(llvm::Attribute::OptimizeNone),
+ ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
/* AttrOnCallsite = */ false, FuncAttrs);
F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
}
@@ -1864,8 +1846,6 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
if (TargetDecl->hasAttr<ConvergentAttr>())
FuncAttrs.addAttribute(llvm::Attribute::Convergent);
- if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
- FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
AddAttributesFromFunctionProtoType(
@@ -1910,6 +1890,16 @@ void CodeGenModule::ConstructAttributeList(
ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
+ // This must run after constructing the default function attribute list
+ // to ensure that the speculative load hardening attribute is removed
+ // in the case where the -mspeculative-load-hardening flag was passed.
+ if (TargetDecl) {
+ if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
+ FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
+ if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
+ }
+
if (CodeGenOpts.EnableSegmentedStacks &&
!(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
FuncAttrs.addAttribute("split-stack");
@@ -2009,8 +1999,7 @@ void CodeGenModule::ConstructAttributeList(
// Attach attributes to sret.
if (IRFunctionArgs.hasSRetArg()) {
llvm::AttrBuilder SRETAttrs;
- if (!RetAI.getSuppressSRet())
- SRETAttrs.addAttribute(llvm::Attribute::StructRet);
+ SRETAttrs.addAttribute(llvm::Attribute::StructRet);
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
@@ -2262,9 +2251,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
Address ArgStruct = Address::invalid();
- const llvm::StructLayout *ArgStructLayout = nullptr;
if (IRFunctionArgs.hasInallocaArg()) {
- ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
FI.getArgStructAlignment());
@@ -2313,10 +2300,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::InAlloca: {
assert(NumIRArgs == 0);
auto FieldIndex = ArgI.getInAllocaFieldIndex();
- CharUnits FieldOffset =
- CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
- Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
- Arg->getName());
+ Address V =
+ Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
ArgVals.push_back(ParamValue::forIndirect(V));
break;
}
@@ -2476,7 +2461,6 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
- auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
llvm::Type *DstTy = Ptr.getElementType();
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
@@ -2493,9 +2477,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + ".coerce" + Twine(i));
- auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
- Address EltPtr =
- Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
+ Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
Builder.CreateStore(AI, EltPtr);
}
@@ -2531,7 +2513,6 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
auto coercionType = ArgI.getCoerceAndExpandType();
alloca = Builder.CreateElementBitCast(alloca, coercionType);
- auto layout = CGM.getDataLayout().getStructLayout(coercionType);
unsigned argIndex = FirstIRArg;
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
@@ -2539,7 +2520,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
continue;
- auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
+ auto eltAddr = Builder.CreateStructGEP(alloca, i);
auto elt = FnArgs[argIndex++];
Builder.CreateStore(elt, eltAddr);
}
@@ -2891,15 +2872,6 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
RV = SI->getValueOperand();
SI->eraseFromParent();
- // If that was the only use of the return value, nuke it as well now.
- auto returnValueInst = ReturnValue.getPointer();
- if (returnValueInst->use_empty()) {
- if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
- alloca->eraseFromParent();
- ReturnValue = Address::invalid();
- }
- }
-
// Otherwise, we have to do a simple load.
} else {
RV = Builder.CreateLoad(ReturnValue);
@@ -2944,7 +2916,6 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
case ABIArgInfo::CoerceAndExpand: {
auto coercionType = RetAI.getCoerceAndExpandType();
- auto layout = CGM.getDataLayout().getStructLayout(coercionType);
// Load all of the coerced elements out into results.
llvm::SmallVector<llvm::Value*, 4> results;
@@ -2954,7 +2925,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
continue;
- auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
+ auto eltAddr = Builder.CreateStructGEP(addr, i);
auto elt = Builder.CreateLoad(eltAddr);
results.push_back(elt);
}
@@ -3368,7 +3339,7 @@ void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
if (StackBase) {
// Restore the stack after the call.
- llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
CGF.Builder.CreateCall(F, StackBase);
}
}
@@ -3455,7 +3426,8 @@ void CodeGenFunction::EmitCallArgs(
auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
- EmittedArg.getScalarVal());
+ EmittedArg.getScalarVal(),
+ PS->isDynamic());
Args.add(RValue::get(V), SizeTy);
// If we're emitting args in reverse, be sure to do so with
// pass_object_size, as well.
@@ -3678,15 +3650,15 @@ CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
/// Emits a call to the given no-arguments nounwind runtime function.
llvm::CallInst *
-CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
+CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
const llvm::Twine &name) {
return EmitNounwindRuntimeCall(callee, None, name);
}
/// Emits a call to the given nounwind runtime function.
llvm::CallInst *
-CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
- ArrayRef<llvm::Value*> args,
+CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<llvm::Value *> args,
const llvm::Twine &name) {
llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
call->setDoesNotThrow();
@@ -3695,9 +3667,8 @@ CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
/// Emits a simple call (never an invoke) to the given no-arguments
/// runtime function.
-llvm::CallInst *
-CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
- const llvm::Twine &name) {
+llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
+ const llvm::Twine &name) {
return EmitRuntimeCall(callee, None, name);
}
@@ -3721,21 +3692,20 @@ CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
}
/// Emits a simple call (never an invoke) to the given runtime function.
-llvm::CallInst *
-CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
- ArrayRef<llvm::Value*> args,
- const llvm::Twine &name) {
- llvm::CallInst *call =
- Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
+llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<llvm::Value *> args,
+ const llvm::Twine &name) {
+ llvm::CallInst *call = Builder.CreateCall(
+ callee, args, getBundlesForFunclet(callee.getCallee()), name);
call->setCallingConv(getRuntimeCC());
return call;
}
/// Emits a call or invoke to the given noreturn runtime function.
-void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
- ArrayRef<llvm::Value*> args) {
+void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
+ llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
SmallVector<llvm::OperandBundleDef, 1> BundleList =
- getBundlesForFunclet(callee);
+ getBundlesForFunclet(callee.getCallee());
if (getInvokeDest()) {
llvm::InvokeInst *invoke =
@@ -3755,33 +3725,32 @@ void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
}
/// Emits a call or invoke instruction to the given nullary runtime function.
-llvm::CallSite
-CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
+llvm::CallBase *
+CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
const Twine &name) {
return EmitRuntimeCallOrInvoke(callee, None, name);
}
/// Emits a call or invoke instruction to the given runtime function.
-llvm::CallSite
-CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
- ArrayRef<llvm::Value*> args,
+llvm::CallBase *
+CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
+ ArrayRef<llvm::Value *> args,
const Twine &name) {
- llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
- callSite.setCallingConv(getRuntimeCC());
- return callSite;
+ llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
+ call->setCallingConv(getRuntimeCC());
+ return call;
}
/// Emits a call or invoke instruction to the given function, depending
/// on the current state of the EH stack.
-llvm::CallSite
-CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
- ArrayRef<llvm::Value *> Args,
- const Twine &Name) {
+llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name) {
llvm::BasicBlock *InvokeDest = getInvokeDest();
SmallVector<llvm::OperandBundleDef, 1> BundleList =
- getBundlesForFunclet(Callee);
+ getBundlesForFunclet(Callee.getCallee());
- llvm::Instruction *Inst;
+ llvm::CallBase *Inst;
if (!InvokeDest)
Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
else {
@@ -3796,7 +3765,7 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
if (CGM.getLangOpts().ObjCAutoRefCount)
AddObjCARCExceptionMetadata(Inst);
- return llvm::CallSite(Inst);
+ return Inst;
}
void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
@@ -3808,7 +3777,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const CGCallee &Callee,
ReturnValueSlot ReturnValue,
const CallArgList &CallArgs,
- llvm::Instruction **callOrInvoke,
+ llvm::CallBase **callOrInvoke,
SourceLocation Loc) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
@@ -3819,17 +3788,36 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
- llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
+ llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
+
+ const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
+
+#ifndef NDEBUG
+ if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
+ // For an inalloca varargs function, we don't expect CallInfo to match the
+ // function pointer's type, because the inalloca struct a will have extra
+ // fields in it for the varargs parameters. Code later in this function
+ // bitcasts the function pointer to the type derived from CallInfo.
+ //
+ // In other cases, we assert that the types match up (until pointers stop
+ // having pointee types).
+ llvm::Type *TypeFromVal;
+ if (Callee.isVirtual())
+ TypeFromVal = Callee.getVirtualFunctionType();
+ else
+ TypeFromVal =
+ Callee.getFunctionPointer()->getType()->getPointerElementType();
+ assert(IRFuncTy == TypeFromVal);
+ }
+#endif
// 1. Set up the arguments.
// If we're using inalloca, insert the allocation after the stack save.
// FIXME: Do this earlier rather than hacking it in here!
Address ArgMemory = Address::invalid();
- const llvm::StructLayout *ArgMemoryLayout = nullptr;
if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
const llvm::DataLayout &DL = CGM.getDataLayout();
- ArgMemoryLayout = DL.getStructLayout(ArgStruct);
llvm::Instruction *IP = CallArgs.getStackBase();
llvm::AllocaInst *AI;
if (IP) {
@@ -3846,13 +3834,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
ArgMemory = Address(AI, Align);
}
- // Helper function to drill into the inalloca allocation.
- auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
- auto FieldOffset =
- CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
- return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
- };
-
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
@@ -3875,7 +3856,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (IRFunctionArgs.hasSRetArg()) {
IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
} else if (RetAI.isInAlloca()) {
- Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
+ Address Addr =
+ Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
Builder.CreateStore(SRetPtr.getPointer(), Addr);
}
}
@@ -3913,12 +3895,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
cast<llvm::Instruction>(Addr.getPointer());
CGBuilderTy::InsertPoint IP = Builder.saveIP();
Builder.SetInsertPoint(Placeholder);
- Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+ Addr =
+ Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
Builder.restoreIP(IP);
deferPlaceholderReplacement(Placeholder, Addr.getPointer());
} else {
// Store the RValue into the argument struct.
- Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+ Address Addr =
+ Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
unsigned AS = Addr.getType()->getPointerAddressSpace();
llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
// There are some cases where a trivial bitcast is not avoidable. The
@@ -4099,11 +4083,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
STy->getPointerTo(Src.getAddressSpace()));
}
- auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
assert(NumIRArgs == STy->getNumElements());
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
- Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
+ Address EltPtr = Builder.CreateStructGEP(Src, i);
llvm::Value *LI = Builder.CreateLoad(EltPtr);
IRCallArgs[FirstIRArg + i] = LI;
}
@@ -4153,7 +4135,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
llvm::Type *eltType = coercionType->getElementType(i);
if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
- Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
+ Address eltAddr = Builder.CreateStructGEP(addr, i);
llvm::Value *elt = Builder.CreateLoad(eltAddr);
IRCallArgs[IRArgPos++] = elt;
}
@@ -4186,8 +4168,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// cases, we can't do any parameter mismatch checks. Give up and bitcast
// the callee.
unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
- auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
- CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
+ CalleePtr =
+ Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
} else {
llvm::Type *LastParamTy =
IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
@@ -4219,19 +4201,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
//
// This makes the IR nicer, but more importantly it ensures that we
// can inline the function at -O0 if it is marked always_inline.
- auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
- llvm::FunctionType *CalleeFT =
- cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
+ auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
+ llvm::Value *Ptr) -> llvm::Function * {
if (!CalleeFT->isVarArg())
- return Ptr;
+ return nullptr;
- llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
- if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
- return Ptr;
+ // Get underlying value if it's a bitcast
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
+ if (CE->getOpcode() == llvm::Instruction::BitCast)
+ Ptr = CE->getOperand(0);
+ }
- llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
+ llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
if (!OrigFn)
- return Ptr;
+ return nullptr;
llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
@@ -4240,15 +4223,19 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (OrigFT->isVarArg() ||
OrigFT->getNumParams() != CalleeFT->getNumParams() ||
OrigFT->getReturnType() != CalleeFT->getReturnType())
- return Ptr;
+ return nullptr;
for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
- return Ptr;
+ return nullptr;
return OrigFn;
};
- CalleePtr = simplifyVariadicCallee(CalleePtr);
+
+ if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
+ CalleePtr = OrigFn;
+ IRFuncTy = OrigFn->getFunctionType();
+ }
// 3. Perform the actual call.
@@ -4293,11 +4280,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Apply always_inline to all calls within flatten functions.
// FIXME: should this really take priority over __try, below?
if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
- !(Callee.getAbstractInfo().getCalleeDecl().getDecl() &&
- Callee.getAbstractInfo()
- .getCalleeDecl()
- .getDecl()
- ->hasAttr<NoInlineAttr>())) {
+ !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
Attrs =
Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::AlwaysInline);
@@ -4341,22 +4324,21 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
getBundlesForFunclet(CalleePtr);
// Emit the actual call/invoke instruction.
- llvm::CallSite CS;
+ llvm::CallBase *CI;
if (!InvokeDest) {
- CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
+ CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
+ CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
BundleList);
EmitBlock(Cont);
}
- llvm::Instruction *CI = CS.getInstruction();
if (callOrInvoke)
*callOrInvoke = CI;
// Apply the attributes and calling convention.
- CS.setAttributes(Attrs);
- CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+ CI->setAttributes(Attrs);
+ CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
// Apply various metadata.
@@ -4371,7 +4353,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of
// IPVK_IndirectCallTarget in InstrProfData.inc.
- if (!CS.getCalledFunction())
+ if (!CI->getCalledFunction())
PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
CI, CalleePtr);
@@ -4382,26 +4364,45 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Suppress tail calls if requested.
if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
- const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
}
+ // Add metadata for calls to MSAllocator functions
+ if (getDebugInfo() && TargetDecl &&
+ TargetDecl->hasAttr<MSAllocatorAttr>())
+ getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc);
+
// 4. Finish the call.
// If the call doesn't return, finish the basic block and clear the
// insertion point; this allows the rest of IRGen to discard
// unreachable code.
- if (CS.doesNotReturn()) {
+ if (CI->doesNotReturn()) {
if (UnusedReturnSizePtr)
PopCleanupBlock();
// Strip away the noreturn attribute to better diagnose unreachable UB.
if (SanOpts.has(SanitizerKind::Unreachable)) {
- if (auto *F = CS.getCalledFunction())
+ // Also remove from function since CallBase::hasFnAttr additionally checks
+ // attributes of the called function.
+ if (auto *F = CI->getCalledFunction())
F->removeFnAttr(llvm::Attribute::NoReturn);
- CS.removeAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoReturn);
+ CI->removeAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoReturn);
+
+ // Avoid incompatibility with ASan which relies on the `noreturn`
+ // attribute to insert handler calls.
+ if (SanOpts.hasOneOf(SanitizerKind::Address |
+ SanitizerKind::KernelAddress)) {
+ SanitizerScope SanScope(this);
+ llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
+ Builder.SetInsertPoint(CI);
+ auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ llvm::FunctionCallee Fn =
+ CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
+ EmitNounwindRuntimeCall(Fn);
+ }
}
EmitUnreachable(Loc);
@@ -4436,7 +4437,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
switch (RetAI.getKind()) {
case ABIArgInfo::CoerceAndExpand: {
auto coercionType = RetAI.getCoerceAndExpandType();
- auto layout = CGM.getDataLayout().getStructLayout(coercionType);
Address addr = SRetPtr;
addr = Builder.CreateElementBitCast(addr, coercionType);
@@ -4448,7 +4448,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
llvm::Type *eltType = coercionType->getElementType(i);
if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
- Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
+ Address eltAddr = Builder.CreateStructGEP(addr, i);
llvm::Value *elt = CI;
if (requiresExtract)
elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
@@ -4529,7 +4529,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} ();
// Emit the assume_aligned check on the return value.
- const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
if (Ret.isScalar() && TargetDecl) {
if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
llvm::Value *OffsetValue = nullptr;
@@ -4556,7 +4555,7 @@ CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
if (isVirtual()) {
const CallExpr *CE = getVirtualCallExpr();
return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
- CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(),
+ CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
CE ? CE->getBeginLoc() : SourceLocation());
}