diff options
author | Artem Belevich <tra@google.com> | 2015-11-17 22:28:40 +0000 |
---|---|---|
committer | Artem Belevich <tra@google.com> | 2015-11-17 22:28:40 +0000 |
commit | 556fea86b474f5910814ebd1b643fefcd3064500 (patch) | |
tree | cc531183c239e78925def91f4ac5380a7439fd3b /lib/Driver/Action.cpp | |
parent | f418a27719a23e48e656f21488728f5c638bad72 (diff) |
[CUDA] use -aux-triple to pass target triple of opposite side of compilation
Clang needs to know target triple for both sides of compilation so that
preprocessor macros and target builtins from both sides are available.
This change augments Compilation class to carry information about
toolchains used during different CUDA compilation passes and refactors
BuildActions to use it when it constructs CUDA jobs.
Removed DeviceTriple from CudaHostAction/CudaDeviceAction as it's no
longer needed.
Differential Revision: http://reviews.llvm.org/D13144
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@253385 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Driver/Action.cpp')
-rw-r--r-- | lib/Driver/Action.cpp | 11 |
1 files changed, 4 insertions, 7 deletions
diff --git a/lib/Driver/Action.cpp b/lib/Driver/Action.cpp index fdbae113ff..49dccd224b 100644 --- a/lib/Driver/Action.cpp +++ b/lib/Driver/Action.cpp @@ -58,18 +58,15 @@ BindArchAction::BindArchAction(std::unique_ptr<Action> Input, void CudaDeviceAction::anchor() {} CudaDeviceAction::CudaDeviceAction(std::unique_ptr<Action> Input, - const char *ArchName, - const char *DeviceTriple, bool AtTopLevel) + const char *ArchName, bool AtTopLevel) : Action(CudaDeviceClass, std::move(Input)), GpuArchName(ArchName), - DeviceTriple(DeviceTriple), AtTopLevel(AtTopLevel) {} + AtTopLevel(AtTopLevel) {} void CudaHostAction::anchor() {} CudaHostAction::CudaHostAction(std::unique_ptr<Action> Input, - const ActionList &DeviceActions, - const char *DeviceTriple) - : Action(CudaHostClass, std::move(Input)), DeviceActions(DeviceActions), - DeviceTriple(DeviceTriple) {} + const ActionList &DeviceActions) + : Action(CudaHostClass, std::move(Input)), DeviceActions(DeviceActions) {} CudaHostAction::~CudaHostAction() { for (auto &DA : DeviceActions) |