Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h
Warning:line 85, column 47
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CoroFrame.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -D PIC -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -D_RET_PROTECTOR -ret-protector -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Coroutines/CoroFrame.cpp

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Transforms/Coroutines/CoroFrame.cpp

1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// there from sue to definition that crosses a suspend block.
10//
11// Using the information discovered we form a Coroutine Frame structure to
12// contain those values. All uses of those values are replaced with appropriate
13// GEP + load from the coroutine frame. At the point of the definition we spill
14// the value into the coroutine frame.
15//===----------------------------------------------------------------------===//
16
17#include "CoroInternal.h"
18#include "llvm/ADT/BitVector.h"
19#include "llvm/ADT/SmallString.h"
20#include "llvm/Analysis/PtrUseVisitor.h"
21#include "llvm/Analysis/StackLifetime.h"
22#include "llvm/Config/llvm-config.h"
23#include "llvm/IR/CFG.h"
24#include "llvm/IR/DIBuilder.h"
25#include "llvm/IR/Dominators.h"
26#include "llvm/IR/IRBuilder.h"
27#include "llvm/IR/InstIterator.h"
28#include "llvm/Support/CommandLine.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/MathExtras.h"
31#include "llvm/Support/OptimizedStructLayout.h"
32#include "llvm/Support/circular_raw_ostream.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/Transforms/Utils/BasicBlockUtils.h"
35#include "llvm/Transforms/Utils/Local.h"
36#include "llvm/Transforms/Utils/PromoteMemToReg.h"
37#include <algorithm>
38
39using namespace llvm;
40
41// The "coro-suspend-crossing" flag is very noisy. There is another debug type,
42// "coro-frame", which results in leaner debug spew.
43#define DEBUG_TYPE"coro-frame" "coro-suspend-crossing"
44
45static cl::opt<bool> EnableReuseStorageInFrame(
46 "reuse-storage-in-coroutine-frame", cl::Hidden,
47 cl::desc(
48 "Enable the optimization which would reuse the storage in the coroutine \
49 frame for allocas whose liferanges are not overlapped, for testing purposes"),
50 llvm::cl::init(false));
51
52enum { SmallVectorThreshold = 32 };
53
54// Provides two way mapping between the blocks and numbers.
55namespace {
56class BlockToIndexMapping {
57 SmallVector<BasicBlock *, SmallVectorThreshold> V;
58
59public:
60 size_t size() const { return V.size(); }
61
62 BlockToIndexMapping(Function &F) {
63 for (BasicBlock &BB : F)
64 V.push_back(&BB);
65 llvm::sort(V);
66 }
67
68 size_t blockToIndex(BasicBlock *BB) const {
69 auto *I = llvm::lower_bound(V, BB);
70 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block")((void)0);
71 return I - V.begin();
72 }
73
74 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
75};
76} // end anonymous namespace
77
78// The SuspendCrossingInfo maintains data that allows to answer a question
79// whether given two BasicBlocks A and B there is a path from A to B that
80// passes through a suspend point.
81//
82// For every basic block 'i' it maintains a BlockData that consists of:
83// Consumes: a bit vector which contains a set of indices of blocks that can
84// reach block 'i'
85// Kills: a bit vector which contains a set of indices of blocks that can
86// reach block 'i', but one of the path will cross a suspend point
87// Suspend: a boolean indicating whether block 'i' contains a suspend point.
88// End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
89//
90namespace {
91struct SuspendCrossingInfo {
92 BlockToIndexMapping Mapping;
93
94 struct BlockData {
95 BitVector Consumes;
96 BitVector Kills;
97 bool Suspend = false;
98 bool End = false;
99 };
100 SmallVector<BlockData, SmallVectorThreshold> Block;
101
102 iterator_range<succ_iterator> successors(BlockData const &BD) const {
103 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
104 return llvm::successors(BB);
105 }
106
107 BlockData &getBlockData(BasicBlock *BB) {
108 return Block[Mapping.blockToIndex(BB)];
109 }
110
111 void dump() const;
112 void dump(StringRef Label, BitVector const &BV) const;
113
114 SuspendCrossingInfo(Function &F, coro::Shape &Shape);
115
116 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
117 size_t const DefIndex = Mapping.blockToIndex(DefBB);
118 size_t const UseIndex = Mapping.blockToIndex(UseBB);
119
120 bool const Result = Block[UseIndex].Kills[DefIndex];
121 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()do { } while (false)
122 << " answer is " << Result << "\n")do { } while (false);
123 return Result;
124 }
125
126 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
127 auto *I = cast<Instruction>(U);
128
129 // We rewrote PHINodes, so that only the ones with exactly one incoming
130 // value need to be analyzed.
131 if (auto *PN = dyn_cast<PHINode>(I))
132 if (PN->getNumIncomingValues() > 1)
133 return false;
134
135 BasicBlock *UseBB = I->getParent();
136
137 // As a special case, treat uses by an llvm.coro.suspend.retcon or an
138 // llvm.coro.suspend.async as if they were uses in the suspend's single
139 // predecessor: the uses conceptually occur before the suspend.
140 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
141 UseBB = UseBB->getSinglePredecessor();
142 assert(UseBB && "should have split coro.suspend into its own block")((void)0);
143 }
144
145 return hasPathCrossingSuspendPoint(DefBB, UseBB);
146 }
147
148 bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
149 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
150 }
151
152 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
153 auto *DefBB = I.getParent();
154
155 // As a special case, treat values produced by an llvm.coro.suspend.*
156 // as if they were defined in the single successor: the uses
157 // conceptually occur after the suspend.
158 if (isa<AnyCoroSuspendInst>(I)) {
159 DefBB = DefBB->getSingleSuccessor();
160 assert(DefBB && "should have split coro.suspend into its own block")((void)0);
161 }
162
163 return isDefinitionAcrossSuspend(DefBB, U);
164 }
165
166 bool isDefinitionAcrossSuspend(Value &V, User *U) const {
167 if (auto *Arg = dyn_cast<Argument>(&V))
168 return isDefinitionAcrossSuspend(*Arg, U);
169 if (auto *Inst = dyn_cast<Instruction>(&V))
170 return isDefinitionAcrossSuspend(*Inst, U);
171
172 llvm_unreachable(__builtin_unreachable()
173 "Coroutine could only collect Argument and Instruction now.")__builtin_unreachable();
174 }
175};
176} // end anonymous namespace
177
178#if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP)
179LLVM_DUMP_METHOD__attribute__((noinline)) void SuspendCrossingInfo::dump(StringRef Label,
180 BitVector const &BV) const {
181 dbgs() << Label << ":";
182 for (size_t I = 0, N = BV.size(); I < N; ++I)
183 if (BV[I])
184 dbgs() << " " << Mapping.indexToBlock(I)->getName();
185 dbgs() << "\n";
186}
187
188LLVM_DUMP_METHOD__attribute__((noinline)) void SuspendCrossingInfo::dump() const {
189 for (size_t I = 0, N = Block.size(); I < N; ++I) {
190 BasicBlock *const B = Mapping.indexToBlock(I);
191 dbgs() << B->getName() << ":\n";
192 dump(" Consumes", Block[I].Consumes);
193 dump(" Kills", Block[I].Kills);
194 }
195 dbgs() << "\n";
196}
197#endif
198
199SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
200 : Mapping(F) {
201 const size_t N = Mapping.size();
202 Block.resize(N);
203
204 // Initialize every block so that it consumes itself
205 for (size_t I = 0; I < N; ++I) {
206 auto &B = Block[I];
207 B.Consumes.resize(N);
208 B.Kills.resize(N);
209 B.Consumes.set(I);
210 }
211
212 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
213 // the code beyond coro.end is reachable during initial invocation of the
214 // coroutine.
215 for (auto *CE : Shape.CoroEnds)
216 getBlockData(CE->getParent()).End = true;
217
218 // Mark all suspend blocks and indicate that they kill everything they
219 // consume. Note, that crossing coro.save also requires a spill, as any code
220 // between coro.save and coro.suspend may resume the coroutine and all of the
221 // state needs to be saved by that time.
222 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
223 BasicBlock *SuspendBlock = BarrierInst->getParent();
224 auto &B = getBlockData(SuspendBlock);
225 B.Suspend = true;
226 B.Kills |= B.Consumes;
227 };
228 for (auto *CSI : Shape.CoroSuspends) {
229 markSuspendBlock(CSI);
230 if (auto *Save = CSI->getCoroSave())
231 markSuspendBlock(Save);
232 }
233
234 // Iterate propagating consumes and kills until they stop changing.
235 int Iteration = 0;
236 (void)Iteration;
237
238 bool Changed;
239 do {
240 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration)do { } while (false);
241 LLVM_DEBUG(dbgs() << "==============\n")do { } while (false);
242
243 Changed = false;
244 for (size_t I = 0; I < N; ++I) {
245 auto &B = Block[I];
246 for (BasicBlock *SI : successors(B)) {
247
248 auto SuccNo = Mapping.blockToIndex(SI);
249
250 // Saved Consumes and Kills bitsets so that it is easy to see
251 // if anything changed after propagation.
252 auto &S = Block[SuccNo];
253 auto SavedConsumes = S.Consumes;
254 auto SavedKills = S.Kills;
255
256 // Propagate Kills and Consumes from block B into its successor S.
257 S.Consumes |= B.Consumes;
258 S.Kills |= B.Kills;
259
260 // If block B is a suspend block, it should propagate kills into the
261 // its successor for every block B consumes.
262 if (B.Suspend) {
263 S.Kills |= B.Consumes;
264 }
265 if (S.Suspend) {
266 // If block S is a suspend block, it should kill all of the blocks it
267 // consumes.
268 S.Kills |= S.Consumes;
269 } else if (S.End) {
270 // If block S is an end block, it should not propagate kills as the
271 // blocks following coro.end() are reached during initial invocation
272 // of the coroutine while all the data are still available on the
273 // stack or in the registers.
274 S.Kills.reset();
275 } else {
276 // This is reached when S block it not Suspend nor coro.end and it
277 // need to make sure that it is not in the kill set.
278 S.Kills.reset(SuccNo);
279 }
280
281 // See if anything changed.
282 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
283
284 if (S.Kills != SavedKills) {
285 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()do { } while (false)
286 << "\n")do { } while (false);
287 LLVM_DEBUG(dump("S.Kills", S.Kills))do { } while (false);
288 LLVM_DEBUG(dump("SavedKills", SavedKills))do { } while (false);
289 }
290 if (S.Consumes != SavedConsumes) {
291 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n")do { } while (false);
292 LLVM_DEBUG(dump("S.Consume", S.Consumes))do { } while (false);
293 LLVM_DEBUG(dump("SavedCons", SavedConsumes))do { } while (false);
294 }
295 }
296 }
297 } while (Changed);
298 LLVM_DEBUG(dump())do { } while (false);
299}
300
301#undef DEBUG_TYPE"coro-frame" // "coro-suspend-crossing"
302#define DEBUG_TYPE"coro-frame" "coro-frame"
303
304namespace {
305class FrameTypeBuilder;
306// Mapping from the to-be-spilled value to all the users that need reload.
307using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
308struct AllocaInfo {
309 AllocaInst *Alloca;
310 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
311 bool MayWriteBeforeCoroBegin;
312 AllocaInfo(AllocaInst *Alloca,
313 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
314 bool MayWriteBeforeCoroBegin)
315 : Alloca(Alloca), Aliases(std::move(Aliases)),
316 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
317};
318struct FrameDataInfo {
319 // All the values (that are not allocas) that needs to be spilled to the
320 // frame.
321 SpillInfo Spills;
322 // Allocas contains all values defined as allocas that need to live in the
323 // frame.
324 SmallVector<AllocaInfo, 8> Allocas;
325
326 SmallVector<Value *, 8> getAllDefs() const {
327 SmallVector<Value *, 8> Defs;
328 for (const auto &P : Spills)
329 Defs.push_back(P.first);
330 for (const auto &A : Allocas)
331 Defs.push_back(A.Alloca);
332 return Defs;
333 }
334
335 uint32_t getFieldIndex(Value *V) const {
336 auto Itr = FieldIndexMap.find(V);
337 assert(Itr != FieldIndexMap.end() &&((void)0)
338 "Value does not have a frame field index")((void)0);
339 return Itr->second;
340 }
341
342 void setFieldIndex(Value *V, uint32_t Index) {
343 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&((void)0)
344 "Cannot set the index for the same field twice.")((void)0);
345 FieldIndexMap[V] = Index;
346 }
347
348 uint64_t getAlign(Value *V) const {
349 auto Iter = FieldAlignMap.find(V);
350 assert(Iter != FieldAlignMap.end())((void)0);
351 return Iter->second;
352 }
353
354 void setAlign(Value *V, uint64_t Align) {
355 assert(FieldAlignMap.count(V) == 0)((void)0);
356 FieldAlignMap.insert({V, Align});
357 }
358
359 uint64_t getOffset(Value *V) const {
360 auto Iter = FieldOffsetMap.find(V);
361 assert(Iter != FieldOffsetMap.end())((void)0);
362 return Iter->second;
363 }
364
365 void setOffset(Value *V, uint64_t Offset) {
366 assert(FieldOffsetMap.count(V) == 0)((void)0);
367 FieldOffsetMap.insert({V, Offset});
368 }
369
370 // Remap the index of every field in the frame, using the final layout index.
371 void updateLayoutIndex(FrameTypeBuilder &B);
372
373private:
374 // LayoutIndexUpdateStarted is used to avoid updating the index of any field
375 // twice by mistake.
376 bool LayoutIndexUpdateStarted = false;
377 // Map from values to their slot indexes on the frame. They will be first set
378 // with their original insertion field index. After the frame is built, their
379 // indexes will be updated into the final layout index.
380 DenseMap<Value *, uint32_t> FieldIndexMap;
381 // Map from values to their alignment on the frame. They would be set after
382 // the frame is built.
383 DenseMap<Value *, uint64_t> FieldAlignMap;
384 // Map from values to their offset on the frame. They would be set after
385 // the frame is built.
386 DenseMap<Value *, uint64_t> FieldOffsetMap;
387};
388} // namespace
389
390#ifndef NDEBUG1
391static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
392 dbgs() << "------------- " << Title << "--------------\n";
393 for (const auto &E : Spills) {
394 E.first->dump();
395 dbgs() << " user: ";
396 for (auto *I : E.second)
397 I->dump();
398 }
399}
400
401static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
402 dbgs() << "------------- Allocas --------------\n";
403 for (const auto &A : Allocas) {
404 A.Alloca->dump();
405 }
406}
407#endif
408
409namespace {
410using FieldIDType = size_t;
411// We cannot rely solely on natural alignment of a type when building a
412// coroutine frame and if the alignment specified on the Alloca instruction
413// differs from the natural alignment of the alloca type we will need to insert
414// padding.
415class FrameTypeBuilder {
416private:
417 struct Field {
418 uint64_t Size;
419 uint64_t Offset;
420 Type *Ty;
421 FieldIDType LayoutFieldIndex;
422 Align Alignment;
423 Align TyAlignment;
424 };
425
426 const DataLayout &DL;
427 LLVMContext &Context;
428 uint64_t StructSize = 0;
429 Align StructAlign;
430 bool IsFinished = false;
431
432 Optional<Align> MaxFrameAlignment;
433
434 SmallVector<Field, 8> Fields;
435 DenseMap<Value*, unsigned> FieldIndexByKey;
436
437public:
438 FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL,
439 Optional<Align> MaxFrameAlignment)
440 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
441
442 /// Add a field to this structure for the storage of an `alloca`
443 /// instruction.
444 LLVM_NODISCARD[[clang::warn_unused_result]] FieldIDType addFieldForAlloca(AllocaInst *AI,
445 bool IsHeader = false) {
446 Type *Ty = AI->getAllocatedType();
447
448 // Make an array type if this is a static array allocation.
449 if (AI->isArrayAllocation()) {
1
Assuming the condition is false
2
Taking false branch
450 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
451 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
452 else
453 report_fatal_error("Coroutines cannot handle non static allocas yet");
454 }
455
456 return addField(Ty, AI->getAlign(), IsHeader);
3
Calling 'FrameTypeBuilder::addField'
457 }
458
459 /// We want to put the allocas whose lifetime-ranges are not overlapped
460 /// into one slot of coroutine frame.
461 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
462 ///
463 /// cppcoro::task<void> alternative_paths(bool cond) {
464 /// if (cond) {
465 /// big_structure a;
466 /// process(a);
467 /// co_await something();
468 /// } else {
469 /// big_structure b;
470 /// process2(b);
471 /// co_await something();
472 /// }
473 /// }
474 ///
475 /// We want to put variable a and variable b in the same slot to
476 /// reduce the size of coroutine frame.
477 ///
478 /// This function use StackLifetime algorithm to partition the AllocaInsts in
479 /// Spills to non-overlapped sets in order to put Alloca in the same
480 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
481 /// field for the allocas in the same non-overlapped set by using the largest
482 /// type as the field type.
483 ///
484 /// Side Effects: Because We sort the allocas, the order of allocas in the
485 /// frame may be different with the order in the source code.
486 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
487 coro::Shape &Shape);
488
489 /// Add a field to this structure.
490 LLVM_NODISCARD[[clang::warn_unused_result]] FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
491 bool IsHeader = false,
492 bool IsSpillOfValue = false) {
493 assert(!IsFinished && "adding fields to a finished builder")((void)0);
494 assert(Ty && "must provide a type for a field")((void)0);
495
496 // The field size is always the alloc size of the type.
497 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
498
499 // For an alloca with size=0, we don't need to add a field and they
500 // can just point to any index in the frame. Use index 0.
501 if (FieldSize == 0) {
4
Assuming 'FieldSize' is not equal to 0
5
Taking false branch
502 return 0;
503 }
504
505 // The field alignment might not be the type alignment, but we need
506 // to remember the type alignment anyway to build the type.
507 // If we are spilling values we don't need to worry about ABI alignment
508 // concerns.
509 auto ABIAlign = DL.getABITypeAlign(Ty);
510 Align TyAlignment =
511 (IsSpillOfValue
5.1
'IsSpillOfValue' is false
5.1
'IsSpillOfValue' is false
&& MaxFrameAlignment)
512 ? (*MaxFrameAlignment < ABIAlign ? *MaxFrameAlignment : ABIAlign)
513 : ABIAlign;
514 if (!FieldAlignment) {
6
Taking false branch
515 FieldAlignment = TyAlignment;
516 }
517
518 // Lay out header fields immediately.
519 uint64_t Offset;
520 if (IsHeader) {
7
Assuming 'IsHeader' is true
8
Taking true branch
521 Offset = alignTo(StructSize, FieldAlignment);
9
Calling 'alignTo'
522 StructSize = Offset + FieldSize;
523
524 // Everything else has a flexible offset.
525 } else {
526 Offset = OptimizedStructLayoutField::FlexibleOffset;
527 }
528
529 Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
530 return Fields.size() - 1;
531 }
532
533 /// Finish the layout and set the body on the given type.
534 void finish(StructType *Ty);
535
536 uint64_t getStructSize() const {
537 assert(IsFinished && "not yet finished!")((void)0);
538 return StructSize;
539 }
540
541 Align getStructAlign() const {
542 assert(IsFinished && "not yet finished!")((void)0);
543 return StructAlign;
544 }
545
546 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
547 assert(IsFinished && "not yet finished!")((void)0);
548 return Fields[Id].LayoutFieldIndex;
549 }
550
551 Field getLayoutField(FieldIDType Id) const {
552 assert(IsFinished && "not yet finished!")((void)0);
553 return Fields[Id];
554 }
555};
556} // namespace
557
558void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
559 auto Updater = [&](Value *I) {
560 auto Field = B.getLayoutField(getFieldIndex(I));
561 setFieldIndex(I, Field.LayoutFieldIndex);
562 setAlign(I, Field.Alignment.value());
563 setOffset(I, Field.Offset);
564 };
565 LayoutIndexUpdateStarted = true;
566 for (auto &S : Spills)
567 Updater(S.first);
568 for (const auto &A : Allocas)
569 Updater(A.Alloca);
570 LayoutIndexUpdateStarted = false;
571}
572
573void FrameTypeBuilder::addFieldForAllocas(const Function &F,
574 FrameDataInfo &FrameData,
575 coro::Shape &Shape) {
576 using AllocaSetType = SmallVector<AllocaInst *, 4>;
577 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
578
579 // We need to add field for allocas at the end of this function. However, this
580 // function has multiple exits, so we use this helper to avoid redundant code.
581 struct RTTIHelper {
582 std::function<void()> func;
583 RTTIHelper(std::function<void()> &&func) : func(func) {}
584 ~RTTIHelper() { func(); }
585 } Helper([&]() {
586 for (auto AllocaList : NonOverlapedAllocas) {
587 auto *LargestAI = *AllocaList.begin();
588 FieldIDType Id = addFieldForAlloca(LargestAI);
589 for (auto *Alloca : AllocaList)
590 FrameData.setFieldIndex(Alloca, Id);
591 }
592 });
593
594 if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) {
595 for (const auto &A : FrameData.Allocas) {
596 AllocaInst *Alloca = A.Alloca;
597 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
598 }
599 return;
600 }
601
602 // Because there are pathes from the lifetime.start to coro.end
603 // for each alloca, the liferanges for every alloca is overlaped
604 // in the blocks who contain coro.end and the successor blocks.
605 // So we choose to skip there blocks when we calculates the liferange
606 // for each alloca. It should be reasonable since there shouldn't be uses
607 // in these blocks and the coroutine frame shouldn't be used outside the
608 // coroutine body.
609 //
610 // Note that the user of coro.suspend may not be SwitchInst. However, this
611 // case seems too complex to handle. And it is harmless to skip these
612 // patterns since it just prevend putting the allocas to live in the same
613 // slot.
614 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
615 for (auto CoroSuspendInst : Shape.CoroSuspends) {
616 for (auto U : CoroSuspendInst->users()) {
617 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
618 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
619 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
620 SWI->setDefaultDest(SWI->getSuccessor(1));
621 }
622 }
623 }
624
625 auto ExtractAllocas = [&]() {
626 AllocaSetType Allocas;
627 Allocas.reserve(FrameData.Allocas.size());
628 for (const auto &A : FrameData.Allocas)
629 Allocas.push_back(A.Alloca);
630 return Allocas;
631 };
632 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
633 StackLifetime::LivenessType::May);
634 StackLifetimeAnalyzer.run();
635 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
636 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
637 StackLifetimeAnalyzer.getLiveRange(AI2));
638 };
639 auto GetAllocaSize = [&](const AllocaInfo &A) {
640 Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
641 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n")((void)0);
642 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported")((void)0);
643 return RetSize->getFixedSize();
644 };
645 // Put larger allocas in the front. So the larger allocas have higher
646 // priority to merge, which can save more space potentially. Also each
647 // AllocaSet would be ordered. So we can get the largest Alloca in one
648 // AllocaSet easily.
649 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
650 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
651 });
652 for (const auto &A : FrameData.Allocas) {
653 AllocaInst *Alloca = A.Alloca;
654 bool Merged = false;
655 // Try to find if the Alloca is not inferenced with any existing
656 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
657 // NonOverlappedAllocaSet.
658 for (auto &AllocaSet : NonOverlapedAllocas) {
659 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n")((void)0);
660 bool NoInference = none_of(AllocaSet, [&](auto Iter) {
661 return IsAllocaInferenre(Alloca, Iter);
662 });
663 // If the alignment of A is multiple of the alignment of B, the address
664 // of A should satisfy the requirement for aligning for B.
665 //
666 // There may be other more fine-grained strategies to handle the alignment
667 // infomation during the merging process. But it seems hard to handle
668 // these strategies and benefit little.
669 bool Alignable = [&]() -> bool {
670 auto *LargestAlloca = *AllocaSet.begin();
671 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
672 0;
673 }();
674 bool CouldMerge = NoInference && Alignable;
675 if (!CouldMerge)
676 continue;
677 AllocaSet.push_back(Alloca);
678 Merged = true;
679 break;
680 }
681 if (!Merged) {
682 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
683 }
684 }
685 // Recover the default target destination for each Switch statement
686 // reserved.
687 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
688 SwitchInst *SWI = SwitchAndDefaultDest.first;
689 BasicBlock *DestBB = SwitchAndDefaultDest.second;
690 SWI->setDefaultDest(DestBB);
691 }
692 // This Debug Info could tell us which allocas are merged into one slot.
693 LLVM_DEBUG(for (auto &AllocaSetdo { } while (false)
694 : NonOverlapedAllocas) {do { } while (false)
695 if (AllocaSet.size() > 1) {do { } while (false)
696 dbgs() << "In Function:" << F.getName() << "\n";do { } while (false)
697 dbgs() << "Find Union Set "do { } while (false)
698 << "\n";do { } while (false)
699 dbgs() << "\tAllocas are \n";do { } while (false)
700 for (auto Alloca : AllocaSet)do { } while (false)
701 dbgs() << "\t\t" << *Alloca << "\n";do { } while (false)
702 }do { } while (false)
703 })do { } while (false);
704}
705
706void FrameTypeBuilder::finish(StructType *Ty) {
707 assert(!IsFinished && "already finished!")((void)0);
708
709 // Prepare the optimal-layout field array.
710 // The Id in the layout field is a pointer to our Field for it.
711 SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
712 LayoutFields.reserve(Fields.size());
713 for (auto &Field : Fields) {
714 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
715 Field.Offset);
716 }
717
718 // Perform layout.
719 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
720 StructSize = SizeAndAlign.first;
721 StructAlign = SizeAndAlign.second;
722
723 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
724 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
725 };
726
727 // We need to produce a packed struct type if there's a field whose
728 // assigned offset isn't a multiple of its natural type alignment.
729 bool Packed = [&] {
730 for (auto &LayoutField : LayoutFields) {
731 auto &F = getField(LayoutField);
732 if (!isAligned(F.TyAlignment, LayoutField.Offset))
733 return true;
734 }
735 return false;
736 }();
737
738 // Build the struct body.
739 SmallVector<Type*, 16> FieldTypes;
740 FieldTypes.reserve(LayoutFields.size() * 3 / 2);
741 uint64_t LastOffset = 0;
742 for (auto &LayoutField : LayoutFields) {
743 auto &F = getField(LayoutField);
744
745 auto Offset = LayoutField.Offset;
746
747 // Add a padding field if there's a padding gap and we're either
748 // building a packed struct or the padding gap is more than we'd
749 // get from aligning to the field type's natural alignment.
750 assert(Offset >= LastOffset)((void)0);
751 if (Offset != LastOffset) {
752 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
753 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
754 Offset - LastOffset));
755 }
756
757 F.Offset = Offset;
758 F.LayoutFieldIndex = FieldTypes.size();
759
760 FieldTypes.push_back(F.Ty);
761 LastOffset = Offset + F.Size;
762 }
763
764 Ty->setBody(FieldTypes, Packed);
765
766#ifndef NDEBUG1
767 // Check that the IR layout matches the offsets we expect.
768 auto Layout = DL.getStructLayout(Ty);
769 for (auto &F : Fields) {
770 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty)((void)0);
771 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset)((void)0);
772 }
773#endif
774
775 IsFinished = true;
776}
777
778static void cacheDIVar(FrameDataInfo &FrameData,
779 DenseMap<Value *, DILocalVariable *> &DIVarCache) {
780 for (auto *V : FrameData.getAllDefs()) {
781 if (DIVarCache.find(V) != DIVarCache.end())
782 continue;
783
784 auto DDIs = FindDbgDeclareUses(V);
785 auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
786 return DDI->getExpression()->getNumElements() == 0;
787 });
788 if (I != DDIs.end())
789 DIVarCache.insert({V, (*I)->getVariable()});
790 }
791}
792
793/// Create name for Type. It uses MDString to store new created string to
794/// avoid memory leak.
795static StringRef solveTypeName(Type *Ty) {
796 if (Ty->isIntegerTy()) {
797 // The longest name in common may be '__int_128', which has 9 bits.
798 SmallString<16> Buffer;
799 raw_svector_ostream OS(Buffer);
800 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
801 auto *MDName = MDString::get(Ty->getContext(), OS.str());
802 return MDName->getString();
803 }
804
805 if (Ty->isFloatingPointTy()) {
806 if (Ty->isFloatTy())
807 return "__float_";
808 if (Ty->isDoubleTy())
809 return "__double_";
810 return "__floating_type_";
811 }
812
813 if (Ty->isPointerTy()) {
814 auto *PtrTy = cast<PointerType>(Ty);
815 Type *PointeeTy = PtrTy->getElementType();
816 auto Name = solveTypeName(PointeeTy);
817 if (Name == "UnknownType")
818 return "PointerType";
819 SmallString<16> Buffer;
820 Twine(Name + "_Ptr").toStringRef(Buffer);
821 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
822 return MDName->getString();
823 }
824
825 if (Ty->isStructTy()) {
826 if (!cast<StructType>(Ty)->hasName())
827 return "__LiteralStructType_";
828
829 auto Name = Ty->getStructName();
830
831 SmallString<16> Buffer(Name);
832 for_each(Buffer, [](auto &Iter) {
833 if (Iter == '.' || Iter == ':')
834 Iter = '_';
835 });
836 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
837 return MDName->getString();
838 }
839
840 return "UnknownType";
841}
842
843static DIType *solveDIType(DIBuilder &Builder, Type *Ty, DataLayout &Layout,
844 DIScope *Scope, unsigned LineNum,
845 DenseMap<Type *, DIType *> &DITypeCache) {
846 if (DIType *DT = DITypeCache.lookup(Ty))
847 return DT;
848
849 StringRef Name = solveTypeName(Ty);
850
851 DIType *RetType = nullptr;
852
853 if (Ty->isIntegerTy()) {
854 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
855 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
856 llvm::DINode::FlagArtificial);
857 } else if (Ty->isFloatingPointTy()) {
858 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
859 dwarf::DW_ATE_float,
860 llvm::DINode::FlagArtificial);
861 } else if (Ty->isPointerTy()) {
862 // Construct BasicType instead of PointerType to avoid infinite
863 // search problem.
864 // For example, we would be in trouble if we traverse recursively:
865 //
866 // struct Node {
867 // Node* ptr;
868 // };
869 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
870 dwarf::DW_ATE_address,
871 llvm::DINode::FlagArtificial);
872 } else if (Ty->isStructTy()) {
873 auto *DIStruct = Builder.createStructType(
874 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
875 Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr,
876 llvm::DINodeArray());
877
878 auto *StructTy = cast<StructType>(Ty);
879 SmallVector<Metadata *, 16> Elements;
880 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
881 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
882 Scope, LineNum, DITypeCache);
883 assert(DITy)((void)0);
884 Elements.push_back(Builder.createMemberType(
885 Scope, DITy->getName(), Scope->getFile(), LineNum,
886 DITy->getSizeInBits(), DITy->getAlignInBits(),
887 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
888 llvm::DINode::FlagArtificial, DITy));
889 }
890
891 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
892
893 RetType = DIStruct;
894 } else {
895 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";)do { } while (false);
896 SmallString<32> Buffer;
897 raw_svector_ostream OS(Buffer);
898 OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty);
899 RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty),
900 dwarf::DW_ATE_address,
901 llvm::DINode::FlagArtificial);
902 }
903
904 DITypeCache.insert({Ty, RetType});
905 return RetType;
906}
907
908/// Build artificial debug info for C++ coroutine frames to allow users to
909/// inspect the contents of the frame directly
910///
911/// Create Debug information for coroutine frame with debug name "__coro_frame".
912/// The debug information for the fields of coroutine frame is constructed from
913/// the following way:
914/// 1. For all the value in the Frame, we search the use of dbg.declare to find
915/// the corresponding debug variables for the value. If we can find the
916/// debug variable, we can get full and accurate debug information.
917/// 2. If we can't get debug information in step 1 and 2, we could only try to
918/// build the DIType by Type. We did this in solveDIType. We only handle
919/// integer, float, double, integer type and struct type for now.
920static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
921 FrameDataInfo &FrameData) {
922 DISubprogram *DIS = F.getSubprogram();
923 // If there is no DISubprogram for F, it implies the Function are not compiled
924 // with debug info. So we also don't need to generate debug info for the frame
925 // neither.
926 if (!DIS || !DIS->getUnit() ||
927 !dwarf::isCPlusPlus(
928 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
929 return;
930
931 assert(Shape.ABI == coro::ABI::Switch &&((void)0)
932 "We could only build debug infomation for C++ coroutine now.\n")((void)0);
933
934 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
935
936 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
937 assert(PromiseAlloca &&((void)0)
938 "Coroutine with switch ABI should own Promise alloca")((void)0);
939
940 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca);
941 if (DIs.empty())
942 return;
943
944 DbgDeclareInst *PromiseDDI = DIs.front();
945 DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
946 DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
947 DIFile *DFile = PromiseDIScope->getFile();
948 DILocation *DILoc = PromiseDDI->getDebugLoc().get();
949 unsigned LineNum = PromiseDIVariable->getLine();
950
951 DICompositeType *FrameDITy = DBuilder.createStructType(
952 DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8,
953 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
954 llvm::DINodeArray());
955 StructType *FrameTy = Shape.FrameTy;
956 SmallVector<Metadata *, 16> Elements;
957 DataLayout Layout = F.getParent()->getDataLayout();
958
959 DenseMap<Value *, DILocalVariable *> DIVarCache;
960 cacheDIVar(FrameData, DIVarCache);
961
962 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
963 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
964 unsigned IndexIndex = Shape.SwitchLowering.IndexField;
965
966 DenseMap<unsigned, StringRef> NameCache;
967 NameCache.insert({ResumeIndex, "__resume_fn"});
968 NameCache.insert({DestroyIndex, "__destroy_fn"});
969 NameCache.insert({IndexIndex, "__coro_index"});
970
971 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
972 *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
973 *IndexTy = FrameTy->getElementType(IndexIndex);
974
975 DenseMap<unsigned, DIType *> TyCache;
976 TyCache.insert({ResumeIndex,
977 DBuilder.createBasicType("__resume_fn",
978 Layout.getTypeSizeInBits(ResumeFnTy),
979 dwarf::DW_ATE_address)});
980 TyCache.insert(
981 {DestroyIndex, DBuilder.createBasicType(
982 "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy),
983 dwarf::DW_ATE_address)});
984
985 /// FIXME: If we fill the field `SizeInBits` with the actual size of
986 /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
987 TyCache.insert({IndexIndex, DBuilder.createBasicType(
988 "__coro_index",
989 (Layout.getTypeSizeInBits(IndexTy) < 8)
990 ? 8
991 : Layout.getTypeSizeInBits(IndexTy),
992 dwarf::DW_ATE_unsigned_char)});
993
994 for (auto *V : FrameData.getAllDefs()) {
995 if (DIVarCache.find(V) == DIVarCache.end())
996 continue;
997
998 auto Index = FrameData.getFieldIndex(V);
999
1000 NameCache.insert({Index, DIVarCache[V]->getName()});
1001 TyCache.insert({Index, DIVarCache[V]->getType()});
1002 }
1003
1004 // Cache from index to (Align, Offset Pair)
1005 DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
1006 // The Align and Offset of Resume function and Destroy function are fixed.
1007 OffsetCache.insert({ResumeIndex, {8, 0}});
1008 OffsetCache.insert({DestroyIndex, {8, 8}});
1009 OffsetCache.insert(
1010 {IndexIndex,
1011 {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});
1012
1013 for (auto *V : FrameData.getAllDefs()) {
1014 auto Index = FrameData.getFieldIndex(V);
1015
1016 OffsetCache.insert(
1017 {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}});
1018 }
1019
1020 DenseMap<Type *, DIType *> DITypeCache;
1021 // This counter is used to avoid same type names. e.g., there would be
1022 // many i32 and i64 types in one coroutine. And we would use i32_0 and
1023 // i32_1 to avoid the same type. Since it makes no sense the name of the
1024 // fields confilicts with each other.
1025 unsigned UnknownTypeNum = 0;
1026 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1027 if (OffsetCache.find(Index) == OffsetCache.end())
1028 continue;
1029
1030 std::string Name;
1031 uint64_t SizeInBits;
1032 uint32_t AlignInBits;
1033 uint64_t OffsetInBits;
1034 DIType *DITy = nullptr;
1035
1036 Type *Ty = FrameTy->getElementType(Index);
1037 assert(Ty->isSized() && "We can't handle type which is not sized.\n")((void)0);
1038 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize();
1039 AlignInBits = OffsetCache[Index].first * 8;
1040 OffsetInBits = OffsetCache[Index].second * 8;
1041
1042 if (NameCache.find(Index) != NameCache.end()) {
1043 Name = NameCache[Index].str();
1044 DITy = TyCache[Index];
1045 } else {
1046 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1047 assert(DITy && "SolveDIType shouldn't return nullptr.\n")((void)0);
1048 Name = DITy->getName().str();
1049 Name += "_" + std::to_string(UnknownTypeNum);
1050 UnknownTypeNum++;
1051 }
1052
1053 Elements.push_back(DBuilder.createMemberType(
1054 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1055 llvm::DINode::FlagArtificial, DITy));
1056 }
1057
1058 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1059
1060 auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1061 DFile, LineNum, FrameDITy,
1062 true, DINode::FlagArtificial);
1063 assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()))((void)0);
1064
1065 // Subprogram would have ContainedNodes field which records the debug
1066 // variables it contained. So we need to add __coro_frame to the
1067 // ContainedNodes of it.
1068 //
1069 // If we don't add __coro_frame to the RetainedNodes, user may get
1070 // `no symbol __coro_frame in context` rather than `__coro_frame`
1071 // is optimized out, which is more precise.
1072 if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1073 auto RetainedNodes = SubProgram->getRetainedNodes();
1074 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1075 RetainedNodes.end());
1076 RetainedNodesVec.push_back(FrameDIVar);
1077 SubProgram->replaceOperandWith(
1078 7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1079 }
1080
1081 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1082 DBuilder.createExpression(), DILoc,
1083 Shape.FramePtr->getNextNode());
1084}
1085
1086// Build a struct that will keep state for an active coroutine.
1087// struct f.frame {
1088// ResumeFnTy ResumeFnAddr;
1089// ResumeFnTy DestroyFnAddr;
1090// int ResumeIndex;
1091// ... promise (if present) ...
1092// ... spills ...
1093// };
1094static StructType *buildFrameType(Function &F, coro::Shape &Shape,
1095 FrameDataInfo &FrameData) {
1096 LLVMContext &C = F.getContext();
1097 const DataLayout &DL = F.getParent()->getDataLayout();
1098 StructType *FrameTy = [&] {
1099 SmallString<32> Name(F.getName());
1100 Name.append(".Frame");
1101 return StructType::create(C, Name);
1102 }();
1103
1104 // We will use this value to cap the alignment of spilled values.
1105 Optional<Align> MaxFrameAlignment;
1106 if (Shape.ABI == coro::ABI::Async)
1107 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1108 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1109
1110 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1111 Optional<FieldIDType> SwitchIndexFieldId;
1112
1113 if (Shape.ABI == coro::ABI::Switch) {
1114 auto *FramePtrTy = FrameTy->getPointerTo();
1115 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
1116 /*IsVarArg=*/false);
1117 auto *FnPtrTy = FnTy->getPointerTo();
1118
1119 // Add header fields for the resume and destroy functions.
1120 // We can rely on these being perfectly packed.
1121 (void)B.addField(FnPtrTy, None, /*header*/ true);
1122 (void)B.addField(FnPtrTy, None, /*header*/ true);
1123
1124 // PromiseAlloca field needs to be explicitly added here because it's
1125 // a header field with a fixed offset based on its alignment. Hence it
1126 // needs special handling and cannot be added to FrameData.Allocas.
1127 if (PromiseAlloca)
1128 FrameData.setFieldIndex(
1129 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1130
1131 // Add a field to store the suspend index. This doesn't need to
1132 // be in the header.
1133 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1134 Type *IndexType = Type::getIntNTy(C, IndexBits);
1135
1136 SwitchIndexFieldId = B.addField(IndexType, None);
1137 } else {
1138 assert(PromiseAlloca == nullptr && "lowering doesn't support promises")((void)0);
1139 }
1140
1141 // Because multiple allocas may own the same field slot,
1142 // we add allocas to field here.
1143 B.addFieldForAllocas(F, FrameData, Shape);
1144 // Add PromiseAlloca to Allocas list so that
1145 // 1. updateLayoutIndex could update its index after
1146 // `performOptimizedStructLayout`
1147 // 2. it is processed in insertSpills.
1148 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1149 // We assume that the promise alloca won't be modified before
1150 // CoroBegin and no alias will be create before CoroBegin.
1151 FrameData.Allocas.emplace_back(
1152 PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
1153 // Create an entry for every spilled value.
1154 for (auto &S : FrameData.Spills) {
1155 Type *FieldType = S.first->getType();
1156 // For byval arguments, we need to store the pointed value in the frame,
1157 // instead of the pointer itself.
1158 if (const Argument *A = dyn_cast<Argument>(S.first))
1159 if (A->hasByValAttr())
1160 FieldType = A->getParamByValType();
1161 FieldIDType Id =
1162 B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/);
1163 FrameData.setFieldIndex(S.first, Id);
1164 }
1165
1166 B.finish(FrameTy);
1167 FrameData.updateLayoutIndex(B);
1168 Shape.FrameAlign = B.getStructAlign();
1169 Shape.FrameSize = B.getStructSize();
1170
1171 switch (Shape.ABI) {
1172 case coro::ABI::Switch: {
1173 // In the switch ABI, remember the switch-index field.
1174 auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1175 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1176 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1177 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1178
1179 // Also round the frame size up to a multiple of its alignment, as is
1180 // generally expected in C/C++.
1181 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1182 break;
1183 }
1184
1185 // In the retcon ABI, remember whether the frame is inline in the storage.
1186 case coro::ABI::Retcon:
1187 case coro::ABI::RetconOnce: {
1188 auto Id = Shape.getRetconCoroId();
1189 Shape.RetconLowering.IsFrameInlineInStorage
1190 = (B.getStructSize() <= Id->getStorageSize() &&
1191 B.getStructAlign() <= Id->getStorageAlignment());
1192 break;
1193 }
1194 case coro::ABI::Async: {
1195 Shape.AsyncLowering.FrameOffset =
1196 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
1197 // Also make the final context size a multiple of the context alignment to
1198 // make allocation easier for allocators.
1199 Shape.AsyncLowering.ContextSize =
1200 alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
1201 Shape.AsyncLowering.getContextAlignment());
1202 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1203 report_fatal_error(
1204 "The alignment requirment of frame variables cannot be higher than "
1205 "the alignment of the async function context");
1206 }
1207 break;
1208 }
1209 }
1210
1211 return FrameTy;
1212}
1213
1214// We use a pointer use visitor to track how an alloca is being used.
1215// The goal is to be able to answer the following three questions:
1216// 1. Should this alloca be allocated on the frame instead.
1217// 2. Could the content of the alloca be modified prior to CoroBegn, which would
1218// require copying the data from alloca to the frame after CoroBegin.
1219// 3. Is there any alias created for this alloca prior to CoroBegin, but used
1220// after CoroBegin. In that case, we will need to recreate the alias after
1221// CoroBegin based off the frame. To answer question 1, we track two things:
1222// a. List of all BasicBlocks that use this alloca or any of the aliases of
1223// the alloca. In the end, we check if there exists any two basic blocks that
1224// cross suspension points. If so, this alloca must be put on the frame. b.
1225// Whether the alloca or any alias of the alloca is escaped at some point,
1226// either by storing the address somewhere, or the address is used in a
1227// function call that might capture. If it's ever escaped, this alloca must be
1228// put on the frame conservatively.
1229// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1230// Whenever a potential write happens, either through a store instruction, a
1231// function call or any of the memory intrinsics, we check whether this
1232// instruction is prior to CoroBegin. To answer question 3, we track the offsets
1233// of all aliases created for the alloca prior to CoroBegin but used after
1234// CoroBegin. llvm::Optional is used to be able to represent the case when the
1235// offset is unknown (e.g. when you have a PHINode that takes in different
1236// offset values). We cannot handle unknown offsets and will assert. This is the
1237// potential issue left out. An ideal solution would likely require a
1238// significant redesign.
1239namespace {
1240struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1241 using Base = PtrUseVisitor<AllocaUseVisitor>;
1242 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1243 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker)
1244 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {}
1245
1246 void visit(Instruction &I) {
1247 Users.insert(&I);
1248 Base::visit(I);
1249 // If the pointer is escaped prior to CoroBegin, we have to assume it would
1250 // be written into before CoroBegin as well.
1251 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1252 MayWriteBeforeCoroBegin = true;
1253 }
1254 }
1255 // We need to provide this overload as PtrUseVisitor uses a pointer based
1256 // visiting function.
1257 void visit(Instruction *I) { return visit(*I); }
1258
1259 void visitPHINode(PHINode &I) {
1260 enqueueUsers(I);
1261 handleAlias(I);
1262 }
1263
1264 void visitSelectInst(SelectInst &I) {
1265 enqueueUsers(I);
1266 handleAlias(I);
1267 }
1268
1269 void visitStoreInst(StoreInst &SI) {
1270 // Regardless whether the alias of the alloca is the value operand or the
1271 // pointer operand, we need to assume the alloca is been written.
1272 handleMayWrite(SI);
1273
1274 if (SI.getValueOperand() != U->get())
1275 return;
1276
1277 // We are storing the pointer into a memory location, potentially escaping.
1278 // As an optimization, we try to detect simple cases where it doesn't
1279 // actually escape, for example:
1280 // %ptr = alloca ..
1281 // %addr = alloca ..
1282 // store %ptr, %addr
1283 // %x = load %addr
1284 // ..
1285 // If %addr is only used by loading from it, we could simply treat %x as
1286 // another alias of %ptr, and not considering %ptr being escaped.
1287 auto IsSimpleStoreThenLoad = [&]() {
1288 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1289 // If the memory location we are storing to is not an alloca, it
1290 // could be an alias of some other memory locations, which is difficult
1291 // to analyze.
1292 if (!AI)
1293 return false;
1294 // StoreAliases contains aliases of the memory location stored into.
1295 SmallVector<Instruction *, 4> StoreAliases = {AI};
1296 while (!StoreAliases.empty()) {
1297 Instruction *I = StoreAliases.pop_back_val();
1298 for (User *U : I->users()) {
1299 // If we are loading from the memory location, we are creating an
1300 // alias of the original pointer.
1301 if (auto *LI = dyn_cast<LoadInst>(U)) {
1302 enqueueUsers(*LI);
1303 handleAlias(*LI);
1304 continue;
1305 }
1306 // If we are overriding the memory location, the pointer certainly
1307 // won't escape.
1308 if (auto *S = dyn_cast<StoreInst>(U))
1309 if (S->getPointerOperand() == I)
1310 continue;
1311 if (auto *II = dyn_cast<IntrinsicInst>(U))
1312 if (II->isLifetimeStartOrEnd())
1313 continue;
1314 // BitCastInst creats aliases of the memory location being stored
1315 // into.
1316 if (auto *BI = dyn_cast<BitCastInst>(U)) {
1317 StoreAliases.push_back(BI);
1318 continue;
1319 }
1320 return false;
1321 }
1322 }
1323
1324 return true;
1325 };
1326
1327 if (!IsSimpleStoreThenLoad())
1328 PI.setEscaped(&SI);
1329 }
1330
1331 // All mem intrinsics modify the data.
1332 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1333
1334 void visitBitCastInst(BitCastInst &BC) {
1335 Base::visitBitCastInst(BC);
1336 handleAlias(BC);
1337 }
1338
1339 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
1340 Base::visitAddrSpaceCastInst(ASC);
1341 handleAlias(ASC);
1342 }
1343
1344 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1345 // The base visitor will adjust Offset accordingly.
1346 Base::visitGetElementPtrInst(GEPI);
1347 handleAlias(GEPI);
1348 }
1349
1350 void visitIntrinsicInst(IntrinsicInst &II) {
1351 if (II.getIntrinsicID() != Intrinsic::lifetime_start)
1352 return Base::visitIntrinsicInst(II);
1353 LifetimeStarts.insert(&II);
1354 }
1355
1356 void visitCallBase(CallBase &CB) {
1357 for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op)
1358 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1359 PI.setEscaped(&CB);
1360 handleMayWrite(CB);
1361 }
1362
1363 bool getShouldLiveOnFrame() const {
1364 if (!ShouldLiveOnFrame)
1365 ShouldLiveOnFrame = computeShouldLiveOnFrame();
1366 return ShouldLiveOnFrame.getValue();
1367 }
1368
1369 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1370
1371 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
1372 assert(getShouldLiveOnFrame() && "This method should only be called if the "((void)0)
1373 "alloca needs to live on the frame.")((void)0);
1374 for (const auto &P : AliasOffetMap)
1375 if (!P.second)
1376 report_fatal_error("Unable to handle an alias with unknown offset "
1377 "created before CoroBegin.");
1378 return AliasOffetMap;
1379 }
1380
1381private:
1382 const DominatorTree &DT;
1383 const CoroBeginInst &CoroBegin;
1384 const SuspendCrossingInfo &Checker;
1385 // All alias to the original AllocaInst, created before CoroBegin and used
1386 // after CoroBegin. Each entry contains the instruction and the offset in the
1387 // original Alloca. They need to be recreated after CoroBegin off the frame.
1388 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
1389 SmallPtrSet<Instruction *, 4> Users{};
1390 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1391 bool MayWriteBeforeCoroBegin{false};
1392
1393 mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1394
1395 bool computeShouldLiveOnFrame() const {
1396 // If lifetime information is available, we check it first since it's
1397 // more precise. We look at every pair of lifetime.start intrinsic and
1398 // every basic block that uses the pointer to see if they cross suspension
1399 // points. The uses cover both direct uses as well as indirect uses.
1400 if (!LifetimeStarts.empty()) {
1401 for (auto *I : Users)
1402 for (auto *S : LifetimeStarts)
1403 if (Checker.isDefinitionAcrossSuspend(*S, I))
1404 return true;
1405 return false;
1406 }
1407 // FIXME: Ideally the isEscaped check should come at the beginning.
1408 // However there are a few loose ends that need to be fixed first before
1409 // we can do that. We need to make sure we are not over-conservative, so
1410 // that the data accessed in-between await_suspend and symmetric transfer
1411 // is always put on the stack, and also data accessed after coro.end is
1412 // always put on the stack (esp the return object). To fix that, we need
1413 // to:
1414 // 1) Potentially treat sret as nocapture in calls
1415 // 2) Special handle the return object and put it on the stack
1416 // 3) Utilize lifetime.end intrinsic
1417 if (PI.isEscaped())
1418 return true;
1419
1420 for (auto *U1 : Users)
1421 for (auto *U2 : Users)
1422 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1423 return true;
1424
1425 return false;
1426 }
1427
1428 void handleMayWrite(const Instruction &I) {
1429 if (!DT.dominates(&CoroBegin, &I))
1430 MayWriteBeforeCoroBegin = true;
1431 }
1432
1433 bool usedAfterCoroBegin(Instruction &I) {
1434 for (auto &U : I.uses())
1435 if (DT.dominates(&CoroBegin, U))
1436 return true;
1437 return false;
1438 }
1439
1440 void handleAlias(Instruction &I) {
1441 // We track all aliases created prior to CoroBegin but used after.
1442 // These aliases may need to be recreated after CoroBegin if the alloca
1443 // need to live on the frame.
1444 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1445 return;
1446
1447 if (!IsOffsetKnown) {
1448 AliasOffetMap[&I].reset();
1449 } else {
1450 auto Itr = AliasOffetMap.find(&I);
1451 if (Itr == AliasOffetMap.end()) {
1452 AliasOffetMap[&I] = Offset;
1453 } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1454 // If we have seen two different possible values for this alias, we set
1455 // it to empty.
1456 AliasOffetMap[&I].reset();
1457 }
1458 }
1459 }
1460};
1461} // namespace
1462
1463// We need to make room to insert a spill after initial PHIs, but before
1464// catchswitch instruction. Placing it before violates the requirement that
1465// catchswitch, like all other EHPads must be the first nonPHI in a block.
1466//
1467// Split away catchswitch into a separate block and insert in its place:
1468//
1469// cleanuppad <InsertPt> cleanupret.
1470//
1471// cleanupret instruction will act as an insert point for the spill.
1472static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1473 BasicBlock *CurrentBlock = CatchSwitch->getParent();
1474 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1475 CurrentBlock->getTerminator()->eraseFromParent();
1476
1477 auto *CleanupPad =
1478 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1479 auto *CleanupRet =
1480 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1481 return CleanupRet;
1482}
1483
1484static void createFramePtr(coro::Shape &Shape) {
1485 auto *CB = Shape.CoroBegin;
1486 IRBuilder<> Builder(CB->getNextNode());
1487 StructType *FrameTy = Shape.FrameTy;
1488 PointerType *FramePtrTy = FrameTy->getPointerTo();
1489 Shape.FramePtr =
1490 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1491}
1492
1493// Replace all alloca and SSA values that are accessed across suspend points
1494// with GetElementPointer from coroutine frame + loads and stores. Create an
1495// AllocaSpillBB that will become the new entry block for the resume parts of
1496// the coroutine:
1497//
1498// %hdl = coro.begin(...)
1499// whatever
1500//
1501// becomes:
1502//
1503// %hdl = coro.begin(...)
1504// %FramePtr = bitcast i8* hdl to %f.frame*
1505// br label %AllocaSpillBB
1506//
1507// AllocaSpillBB:
1508// ; geps corresponding to allocas that were moved to coroutine frame
1509// br label PostSpill
1510//
1511// PostSpill:
1512// whatever
1513//
1514//
1515static Instruction *insertSpills(const FrameDataInfo &FrameData,
1516 coro::Shape &Shape) {
1517 auto *CB = Shape.CoroBegin;
1518 LLVMContext &C = CB->getContext();
1519 IRBuilder<> Builder(C);
1520 StructType *FrameTy = Shape.FrameTy;
1521 Instruction *FramePtr = Shape.FramePtr;
1522 DominatorTree DT(*CB->getFunction());
1523 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1524
1525 // Create a GEP with the given index into the coroutine frame for the original
1526 // value Orig. Appends an extra 0 index for array-allocas, preserving the
1527 // original type.
1528 auto GetFramePointer = [&](Value *Orig) -> Value * {
1529 FieldIDType Index = FrameData.getFieldIndex(Orig);
1530 SmallVector<Value *, 3> Indices = {
1531 ConstantInt::get(Type::getInt32Ty(C), 0),
1532 ConstantInt::get(Type::getInt32Ty(C), Index),
1533 };
1534
1535 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1536 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1537 auto Count = CI->getValue().getZExtValue();
1538 if (Count > 1) {
1539 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1540 }
1541 } else {
1542 report_fatal_error("Coroutines cannot handle non static allocas yet");
1543 }
1544 }
1545
1546 auto GEP = cast<GetElementPtrInst>(
1547 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1548 if (isa<AllocaInst>(Orig)) {
1549 // If the type of GEP is not equal to the type of AllocaInst, it implies
1550 // that the AllocaInst may be reused in the Frame slot of other
1551 // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1552 // the Frame storage.
1553 //
1554 // Note: If we change the strategy dealing with alignment, we need to refine
1555 // this casting.
1556 if (GEP->getResultElementType() != Orig->getType())
1557 return Builder.CreateBitCast(GEP, Orig->getType(),
1558 Orig->getName() + Twine(".cast"));
1559 }
1560 return GEP;
1561 };
1562
1563 for (auto const &E : FrameData.Spills) {
1564 Value *Def = E.first;
1565 auto SpillAlignment = Align(FrameData.getAlign(Def));
1566 // Create a store instruction storing the value into the
1567 // coroutine frame.
1568 Instruction *InsertPt = nullptr;
1569 bool NeedToCopyArgPtrValue = false;
1570 if (auto *Arg = dyn_cast<Argument>(Def)) {
1571 // For arguments, we will place the store instruction right after
1572 // the coroutine frame pointer instruction, i.e. bitcast of
1573 // coro.begin from i8* to %f.frame*.
1574 InsertPt = FramePtr->getNextNode();
1575
1576 // If we're spilling an Argument, make sure we clear 'nocapture'
1577 // from the coroutine function.
1578 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1579
1580 if (Arg->hasByValAttr())
1581 NeedToCopyArgPtrValue = true;
1582
1583 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1584 // Don't spill immediately after a suspend; splitting assumes
1585 // that the suspend will be followed by a branch.
1586 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1587 } else {
1588 auto *I = cast<Instruction>(Def);
1589 if (!DT.dominates(CB, I)) {
1590 // If it is not dominated by CoroBegin, then spill should be
1591 // inserted immediately after CoroFrame is computed.
1592 InsertPt = FramePtr->getNextNode();
1593 } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1594 // If we are spilling the result of the invoke instruction, split
1595 // the normal edge and insert the spill in the new block.
1596 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1597 InsertPt = NewBB->getTerminator();
1598 } else if (isa<PHINode>(I)) {
1599 // Skip the PHINodes and EH pads instructions.
1600 BasicBlock *DefBlock = I->getParent();
1601 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1602 InsertPt = splitBeforeCatchSwitch(CSI);
1603 else
1604 InsertPt = &*DefBlock->getFirstInsertionPt();
1605 } else {
1606 assert(!I->isTerminator() && "unexpected terminator")((void)0);
1607 // For all other values, the spill is placed immediately after
1608 // the definition.
1609 InsertPt = I->getNextNode();
1610 }
1611 }
1612
1613 auto Index = FrameData.getFieldIndex(Def);
1614 Builder.SetInsertPoint(InsertPt);
1615 auto *G = Builder.CreateConstInBoundsGEP2_32(
1616 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1617 if (NeedToCopyArgPtrValue) {
1618 // For byval arguments, we need to store the pointed value in the frame,
1619 // instead of the pointer itself.
1620 auto *Value =
1621 Builder.CreateLoad(Def->getType()->getPointerElementType(), Def);
1622 Builder.CreateAlignedStore(Value, G, SpillAlignment);
1623 } else {
1624 Builder.CreateAlignedStore(Def, G, SpillAlignment);
1625 }
1626
1627 BasicBlock *CurrentBlock = nullptr;
1628 Value *CurrentReload = nullptr;
1629 for (auto *U : E.second) {
1630 // If we have not seen the use block, create a load instruction to reload
1631 // the spilled value from the coroutine frame. Populates the Value pointer
1632 // reference provided with the frame GEP.
1633 if (CurrentBlock != U->getParent()) {
1634 CurrentBlock = U->getParent();
1635 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1636
1637 auto *GEP = GetFramePointer(E.first);
1638 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1639 if (NeedToCopyArgPtrValue)
1640 CurrentReload = GEP;
1641 else
1642 CurrentReload = Builder.CreateAlignedLoad(
1643 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1644 SpillAlignment, E.first->getName() + Twine(".reload"));
1645
1646 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1647 for (DbgDeclareInst *DDI : DIs) {
1648 bool AllowUnresolved = false;
1649 // This dbg.declare is preserved for all coro-split function
1650 // fragments. It will be unreachable in the main function, and
1651 // processed by coro::salvageDebugInfo() by CoroCloner.
1652 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1653 .insertDeclare(CurrentReload, DDI->getVariable(),
1654 DDI->getExpression(), DDI->getDebugLoc(),
1655 &*Builder.GetInsertPoint());
1656 // This dbg.declare is for the main function entry point. It
1657 // will be deleted in all coro-split functions.
1658 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.ReuseFrameSlot);
1659 }
1660 }
1661
1662 // If we have a single edge PHINode, remove it and replace it with a
1663 // reload from the coroutine frame. (We already took care of multi edge
1664 // PHINodes by rewriting them in the rewritePHIs function).
1665 if (auto *PN = dyn_cast<PHINode>(U)) {
1666 assert(PN->getNumIncomingValues() == 1 &&((void)0)
1667 "unexpected number of incoming "((void)0)
1668 "values in the PHINode")((void)0);
1669 PN->replaceAllUsesWith(CurrentReload);
1670 PN->eraseFromParent();
1671 continue;
1672 }
1673
1674 // Replace all uses of CurrentValue in the current instruction with
1675 // reload.
1676 U->replaceUsesOfWith(Def, CurrentReload);
1677 }
1678 }
1679
1680 BasicBlock *FramePtrBB = FramePtr->getParent();
1681
1682 auto SpillBlock =
1683 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
1684 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1685 Shape.AllocaSpillBlock = SpillBlock;
1686
1687 // retcon and retcon.once lowering assumes all uses have been sunk.
1688 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1689 Shape.ABI == coro::ABI::Async) {
1690 // If we found any allocas, replace all of their remaining uses with Geps.
1691 Builder.SetInsertPoint(&SpillBlock->front());
1692 for (const auto &P : FrameData.Allocas) {
1693 AllocaInst *Alloca = P.Alloca;
1694 auto *G = GetFramePointer(Alloca);
1695
1696 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1697 // here, as we are changing location of the instruction.
1698 G->takeName(Alloca);
1699 Alloca->replaceAllUsesWith(G);
1700 Alloca->eraseFromParent();
1701 }
1702 return FramePtr;
1703 }
1704
1705 // If we found any alloca, replace all of their remaining uses with GEP
1706 // instructions. To remain debugbility, we replace the uses of allocas for
1707 // dbg.declares and dbg.values with the reload from the frame.
1708 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1709 // as some of the uses may not be dominated by CoroBegin.
1710 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1711 SmallVector<Instruction *, 4> UsersToUpdate;
1712 for (const auto &A : FrameData.Allocas) {
1713 AllocaInst *Alloca = A.Alloca;
1714 UsersToUpdate.clear();
1715 for (User *U : Alloca->users()) {
1716 auto *I = cast<Instruction>(U);
1717 if (DT.dominates(CB, I))
1718 UsersToUpdate.push_back(I);
1719 }
1720 if (UsersToUpdate.empty())
1721 continue;
1722 auto *G = GetFramePointer(Alloca);
1723 G->setName(Alloca->getName() + Twine(".reload.addr"));
1724
1725 SmallVector<DbgVariableIntrinsic *, 4> DIs;
1726 findDbgUsers(DIs, Alloca);
1727 for (auto *DVI : DIs)
1728 DVI->replaceUsesOfWith(Alloca, G);
1729
1730 for (Instruction *I : UsersToUpdate)
1731 I->replaceUsesOfWith(Alloca, G);
1732 }
1733 Builder.SetInsertPoint(FramePtr->getNextNode());
1734 for (const auto &A : FrameData.Allocas) {
1735 AllocaInst *Alloca = A.Alloca;
1736 if (A.MayWriteBeforeCoroBegin) {
1737 // isEscaped really means potentially modified before CoroBegin.
1738 if (Alloca->isArrayAllocation())
1739 report_fatal_error(
1740 "Coroutines cannot handle copying of array allocas yet");
1741
1742 auto *G = GetFramePointer(Alloca);
1743 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1744 Builder.CreateStore(Value, G);
1745 }
1746 // For each alias to Alloca created before CoroBegin but used after
1747 // CoroBegin, we recreate them after CoroBegin by appplying the offset
1748 // to the pointer in the frame.
1749 for (const auto &Alias : A.Aliases) {
1750 auto *FramePtr = GetFramePointer(Alloca);
1751 auto *FramePtrRaw =
1752 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1753 auto *AliasPtr = Builder.CreateGEP(
1754 Type::getInt8Ty(C), FramePtrRaw,
1755 ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1756 auto *AliasPtrTyped =
1757 Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1758 Alias.first->replaceUsesWithIf(
1759 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1760 }
1761 }
1762 return FramePtr;
1763}
1764
1765// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1766// PHI in InsertedBB.
1767static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1768 BasicBlock *InsertedBB,
1769 BasicBlock *PredBB,
1770 PHINode *UntilPHI = nullptr) {
1771 auto *PN = cast<PHINode>(&SuccBB->front());
1772 do {
1773 int Index = PN->getBasicBlockIndex(InsertedBB);
1774 Value *V = PN->getIncomingValue(Index);
1775 PHINode *InputV = PHINode::Create(
1776 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1777 &InsertedBB->front());
1778 InputV->addIncoming(V, PredBB);
1779 PN->setIncomingValue(Index, InputV);
1780 PN = dyn_cast<PHINode>(PN->getNextNode());
1781 } while (PN != UntilPHI);
1782}
1783
1784// Rewrites the PHI Nodes in a cleanuppad.
1785static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1786 CleanupPadInst *CleanupPad) {
1787 // For every incoming edge to a CleanupPad we will create a new block holding
1788 // all incoming values in single-value PHI nodes. We will then create another
1789 // block to act as a dispather (as all unwind edges for related EH blocks
1790 // must be the same).
1791 //
1792 // cleanuppad:
1793 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1794 // %3 = cleanuppad within none []
1795 //
1796 // It will create:
1797 //
1798 // cleanuppad.corodispatch
1799 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
1800 // %3 = cleanuppad within none []
1801 // switch i8 % 2, label %unreachable
1802 // [i8 0, label %cleanuppad.from.catchswitch
1803 // i8 1, label %cleanuppad.from.catch.1]
1804 // cleanuppad.from.catchswitch:
1805 // %4 = phi i32 [%0, %catchswitch]
1806 // br %label cleanuppad
1807 // cleanuppad.from.catch.1:
1808 // %6 = phi i32 [%1, %catch.1]
1809 // br %label cleanuppad
1810 // cleanuppad:
1811 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1812 // [%6, %cleanuppad.from.catch.1]
1813
1814 // Unreachable BB, in case switching on an invalid value in the dispatcher.
1815 auto *UnreachBB = BasicBlock::Create(
1816 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1817 IRBuilder<> Builder(UnreachBB);
1818 Builder.CreateUnreachable();
1819
1820 // Create a new cleanuppad which will be the dispatcher.
1821 auto *NewCleanupPadBB =
1822 BasicBlock::Create(CleanupPadBB->getContext(),
1823 CleanupPadBB->getName() + Twine(".corodispatch"),
1824 CleanupPadBB->getParent(), CleanupPadBB);
1825 Builder.SetInsertPoint(NewCleanupPadBB);
1826 auto *SwitchType = Builder.getInt8Ty();
1827 auto *SetDispatchValuePN =
1828 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1829 CleanupPad->removeFromParent();
1830 CleanupPad->insertAfter(SetDispatchValuePN);
1831 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1832 pred_size(CleanupPadBB));
1833
1834 int SwitchIndex = 0;
1835 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1836 for (BasicBlock *Pred : Preds) {
1837 // Create a new cleanuppad and move the PHI values to there.
1838 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1839 CleanupPadBB->getName() +
1840 Twine(".from.") + Pred->getName(),
1841 CleanupPadBB->getParent(), CleanupPadBB);
1842 updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1843 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1844 Pred->getName());
1845 Builder.SetInsertPoint(CaseBB);
1846 Builder.CreateBr(CleanupPadBB);
1847 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1848
1849 // Update this Pred to the new unwind point.
1850 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1851
1852 // Setup the switch in the dispatcher.
1853 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1854 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1855 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1856 SwitchIndex++;
1857 }
1858}
1859
1860static void cleanupSinglePredPHIs(Function &F) {
1861 SmallVector<PHINode *, 32> Worklist;
1862 for (auto &BB : F) {
1863 for (auto &Phi : BB.phis()) {
1864 if (Phi.getNumIncomingValues() == 1) {
1865 Worklist.push_back(&Phi);
1866 } else
1867 break;
1868 }
1869 }
1870 while (!Worklist.empty()) {
1871 auto *Phi = Worklist.back();
1872 Worklist.pop_back();
1873 auto *OriginalValue = Phi->getIncomingValue(0);
1874 Phi->replaceAllUsesWith(OriginalValue);
1875 }
1876}
1877
1878static void rewritePHIs(BasicBlock &BB) {
1879 // For every incoming edge we will create a block holding all
1880 // incoming values in a single PHI nodes.
1881 //
1882 // loop:
1883 // %n.val = phi i32[%n, %entry], [%inc, %loop]
1884 //
1885 // It will create:
1886 //
1887 // loop.from.entry:
1888 // %n.loop.pre = phi i32 [%n, %entry]
1889 // br %label loop
1890 // loop.from.loop:
1891 // %inc.loop.pre = phi i32 [%inc, %loop]
1892 // br %label loop
1893 //
1894 // After this rewrite, further analysis will ignore any phi nodes with more
1895 // than one incoming edge.
1896
1897 // TODO: Simplify PHINodes in the basic block to remove duplicate
1898 // predecessors.
1899
1900 // Special case for CleanupPad: all EH blocks must have the same unwind edge
1901 // so we need to create an additional "dispatcher" block.
1902 if (auto *CleanupPad =
1903 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1904 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1905 for (BasicBlock *Pred : Preds) {
1906 if (CatchSwitchInst *CS =
1907 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1908 // CleanupPad with a CatchSwitch predecessor: therefore this is an
1909 // unwind destination that needs to be handle specially.
1910 assert(CS->getUnwindDest() == &BB)((void)0);
1911 (void)CS;
1912 rewritePHIsForCleanupPad(&BB, CleanupPad);
1913 return;
1914 }
1915 }
1916 }
1917
1918 LandingPadInst *LandingPad = nullptr;
1919 PHINode *ReplPHI = nullptr;
1920 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1921 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1922 // We replace the original landing pad with a PHINode that will collect the
1923 // results from all of them.
1924 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1925 ReplPHI->takeName(LandingPad);
1926 LandingPad->replaceAllUsesWith(ReplPHI);
1927 // We will erase the original landing pad at the end of this function after
1928 // ehAwareSplitEdge cloned it in the transition blocks.
1929 }
1930
1931 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1932 for (BasicBlock *Pred : Preds) {
1933 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1934 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1935
1936 // Stop the moving of values at ReplPHI, as this is either null or the PHI
1937 // that replaced the landing pad.
1938 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1939 }
1940
1941 if (LandingPad) {
1942 // Calls to ehAwareSplitEdge function cloned the original lading pad.
1943 // No longer need it.
1944 LandingPad->eraseFromParent();
1945 }
1946}
1947
1948static void rewritePHIs(Function &F) {
1949 SmallVector<BasicBlock *, 8> WorkList;
1950
1951 for (BasicBlock &BB : F)
1952 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1953 if (PN->getNumIncomingValues() > 1)
1954 WorkList.push_back(&BB);
1955
1956 for (BasicBlock *BB : WorkList)
1957 rewritePHIs(*BB);
1958}
1959
1960// Check for instructions that we can recreate on resume as opposed to spill
1961// the result into a coroutine frame.
1962static bool materializable(Instruction &V) {
1963 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1964 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1965}
1966
1967// Check for structural coroutine intrinsics that should not be spilled into
1968// the coroutine frame.
1969static bool isCoroutineStructureIntrinsic(Instruction &I) {
1970 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1971 isa<CoroSuspendInst>(&I);
1972}
1973
1974// For every use of the value that is across suspend point, recreate that value
1975// after a suspend point.
1976static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1977 const SpillInfo &Spills) {
1978 for (const auto &E : Spills) {
1979 Value *Def = E.first;
1980 BasicBlock *CurrentBlock = nullptr;
1981 Instruction *CurrentMaterialization = nullptr;
1982 for (Instruction *U : E.second) {
1983 // If we have not seen this block, materialize the value.
1984 if (CurrentBlock != U->getParent()) {
1985
1986 bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U);
1987 CurrentBlock = IsInCoroSuspendBlock
1988 ? U->getParent()->getSinglePredecessor()
1989 : U->getParent();
1990 CurrentMaterialization = cast<Instruction>(Def)->clone();
1991 CurrentMaterialization->setName(Def->getName());
1992 CurrentMaterialization->insertBefore(
1993 IsInCoroSuspendBlock ? CurrentBlock->getTerminator()
1994 : &*CurrentBlock->getFirstInsertionPt());
1995 }
1996 if (auto *PN = dyn_cast<PHINode>(U)) {
1997 assert(PN->getNumIncomingValues() == 1 &&((void)0)
1998 "unexpected number of incoming "((void)0)
1999 "values in the PHINode")((void)0);
2000 PN->replaceAllUsesWith(CurrentMaterialization);
2001 PN->eraseFromParent();
2002 continue;
2003 }
2004 // Replace all uses of Def in the current instruction with the
2005 // CurrentMaterialization for the block.
2006 U->replaceUsesOfWith(Def, CurrentMaterialization);
2007 }
2008 }
2009}
2010
2011// Splits the block at a particular instruction unless it is the first
2012// instruction in the block with a single predecessor.
2013static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
2014 auto *BB = I->getParent();
2015 if (&BB->front() == I) {
2016 if (BB->getSinglePredecessor()) {
2017 BB->setName(Name);
2018 return BB;
2019 }
2020 }
2021 return BB->splitBasicBlock(I, Name);
2022}
2023
2024// Split above and below a particular instruction so that it
2025// will be all alone by itself in a block.
2026static void splitAround(Instruction *I, const Twine &Name) {
2027 splitBlockIfNotFirst(I, Name);
2028 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2029}
2030
2031static bool isSuspendBlock(BasicBlock *BB) {
2032 return isa<AnyCoroSuspendInst>(BB->front());
2033}
2034
2035typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
2036
2037/// Does control flow starting at the given block ever reach a suspend
2038/// instruction before reaching a block in VisitedOrFreeBBs?
2039static bool isSuspendReachableFrom(BasicBlock *From,
2040 VisitedBlocksSet &VisitedOrFreeBBs) {
2041 // Eagerly try to add this block to the visited set. If it's already
2042 // there, stop recursing; this path doesn't reach a suspend before
2043 // either looping or reaching a freeing block.
2044 if (!VisitedOrFreeBBs.insert(From).second)
2045 return false;
2046
2047 // We assume that we'll already have split suspends into their own blocks.
2048 if (isSuspendBlock(From))
2049 return true;
2050
2051 // Recurse on the successors.
2052 for (auto Succ : successors(From)) {
2053 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2054 return true;
2055 }
2056
2057 return false;
2058}
2059
2060/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2061/// suspend point?
2062static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
2063 // Seed the visited set with all the basic blocks containing a free
2064 // so that we won't pass them up.
2065 VisitedBlocksSet VisitedOrFreeBBs;
2066 for (auto User : AI->users()) {
2067 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2068 VisitedOrFreeBBs.insert(FI->getParent());
2069 }
2070
2071 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2072}
2073
2074/// After we split the coroutine, will the given basic block be along
2075/// an obvious exit path for the resumption function?
2076static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
2077 unsigned depth = 3) {
2078 // If we've bottomed out our depth count, stop searching and assume
2079 // that the path might loop back.
2080 if (depth == 0) return false;
2081
2082 // If this is a suspend block, we're about to exit the resumption function.
2083 if (isSuspendBlock(BB)) return true;
2084
2085 // Recurse into the successors.
2086 for (auto Succ : successors(BB)) {
2087 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2088 return false;
2089 }
2090
2091 // If none of the successors leads back in a loop, we're on an exit/abort.
2092 return true;
2093}
2094
2095static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
2096 // Look for a free that isn't sufficiently obviously followed by
2097 // either a suspend or a termination, i.e. something that will leave
2098 // the coro resumption frame.
2099 for (auto U : AI->users()) {
2100 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2101 if (!FI) continue;
2102
2103 if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2104 return true;
2105 }
2106
2107 // If we never found one, we don't need a stack save.
2108 return false;
2109}
2110
2111/// Turn each of the given local allocas into a normal (dynamic) alloca
2112/// instruction.
2113static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
2114 SmallVectorImpl<Instruction*> &DeadInsts) {
2115 for (auto AI : LocalAllocas) {
2116 auto M = AI->getModule();
2117 IRBuilder<> Builder(AI);
2118
2119 // Save the stack depth. Try to avoid doing this if the stackrestore
2120 // is going to immediately precede a return or something.
2121 Value *StackSave = nullptr;
2122 if (localAllocaNeedsStackSave(AI))
2123 StackSave = Builder.CreateCall(
2124 Intrinsic::getDeclaration(M, Intrinsic::stacksave));
2125
2126 // Allocate memory.
2127 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2128 Alloca->setAlignment(Align(AI->getAlignment()));
2129
2130 for (auto U : AI->users()) {
2131 // Replace gets with the allocation.
2132 if (isa<CoroAllocaGetInst>(U)) {
2133 U->replaceAllUsesWith(Alloca);
2134
2135 // Replace frees with stackrestores. This is safe because
2136 // alloca.alloc is required to obey a stack discipline, although we
2137 // don't enforce that structurally.
2138 } else {
2139 auto FI = cast<CoroAllocaFreeInst>(U);
2140 if (StackSave) {
2141 Builder.SetInsertPoint(FI);
2142 Builder.CreateCall(
2143 Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
2144 StackSave);
2145 }
2146 }
2147 DeadInsts.push_back(cast<Instruction>(U));
2148 }
2149
2150 DeadInsts.push_back(AI);
2151 }
2152}
2153
2154/// Turn the given coro.alloca.alloc call into a dynamic allocation.
2155/// This happens during the all-instructions iteration, so it must not
2156/// delete the call.
2157static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
2158 coro::Shape &Shape,
2159 SmallVectorImpl<Instruction*> &DeadInsts) {
2160 IRBuilder<> Builder(AI);
2161 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2162
2163 for (User *U : AI->users()) {
2164 if (isa<CoroAllocaGetInst>(U)) {
2165 U->replaceAllUsesWith(Alloc);
2166 } else {
2167 auto FI = cast<CoroAllocaFreeInst>(U);
2168 Builder.SetInsertPoint(FI);
2169 Shape.emitDealloc(Builder, Alloc, nullptr);
2170 }
2171 DeadInsts.push_back(cast<Instruction>(U));
2172 }
2173
2174 // Push this on last so that it gets deleted after all the others.
2175 DeadInsts.push_back(AI);
2176
2177 // Return the new allocation value so that we can check for needed spills.
2178 return cast<Instruction>(Alloc);
2179}
2180
2181/// Get the current swifterror value.
2182static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
2183 coro::Shape &Shape) {
2184 // Make a fake function pointer as a sort of intrinsic.
2185 auto FnTy = FunctionType::get(ValueTy, {}, false);
2186 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2187
2188 auto Call = Builder.CreateCall(FnTy, Fn, {});
2189 Shape.SwiftErrorOps.push_back(Call);
2190
2191 return Call;
2192}
2193
2194/// Set the given value as the current swifterror value.
2195///
2196/// Returns a slot that can be used as a swifterror slot.
2197static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
2198 coro::Shape &Shape) {
2199 // Make a fake function pointer as a sort of intrinsic.
2200 auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
2201 {V->getType()}, false);
2202 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2203
2204 auto Call = Builder.CreateCall(FnTy, Fn, { V });
2205 Shape.SwiftErrorOps.push_back(Call);
2206
2207 return Call;
2208}
2209
2210/// Set the swifterror value from the given alloca before a call,
2211/// then put in back in the alloca afterwards.
2212///
2213/// Returns an address that will stand in for the swifterror slot
2214/// until splitting.
2215static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
2216 AllocaInst *Alloca,
2217 coro::Shape &Shape) {
2218 auto ValueTy = Alloca->getAllocatedType();
2219 IRBuilder<> Builder(Call);
2220
2221 // Load the current value from the alloca and set it as the
2222 // swifterror value.
2223 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2224 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2225
2226 // Move to after the call. Since swifterror only has a guaranteed
2227 // value on normal exits, we can ignore implicit and explicit unwind
2228 // edges.
2229 if (isa<CallInst>(Call)) {
2230 Builder.SetInsertPoint(Call->getNextNode());
2231 } else {
2232 auto Invoke = cast<InvokeInst>(Call);
2233 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2234 }
2235
2236 // Get the current swifterror value and store it to the alloca.
2237 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2238 Builder.CreateStore(ValueAfterCall, Alloca);
2239
2240 return Addr;
2241}
2242
2243/// Eliminate a formerly-swifterror alloca by inserting the get/set
2244/// intrinsics and attempting to MemToReg the alloca away.
2245static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
2246 coro::Shape &Shape) {
2247 for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) {
2248 // We're likely changing the use list, so use a mutation-safe
2249 // iteration pattern.
2250 auto &Use = *UI;
2251 ++UI;
2252
2253 // swifterror values can only be used in very specific ways.
2254 // We take advantage of that here.
2255 auto User = Use.getUser();
2256 if (isa<LoadInst>(User) || isa<StoreInst>(User))
2257 continue;
2258
2259 assert(isa<CallInst>(User) || isa<InvokeInst>(User))((void)0);
2260 auto Call = cast<Instruction>(User);
2261
2262 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2263
2264 // Use the returned slot address as the call argument.
2265 Use.set(Addr);
2266 }
2267
2268 // All the uses should be loads and stores now.
2269 assert(isAllocaPromotable(Alloca))((void)0);
2270}
2271
2272/// "Eliminate" a swifterror argument by reducing it to the alloca case
2273/// and then loading and storing in the prologue and epilog.
2274///
2275/// The argument keeps the swifterror flag.
2276static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
2277 coro::Shape &Shape,
2278 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2279 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2280
2281 auto ArgTy = cast<PointerType>(Arg.getType());
2282 auto ValueTy = ArgTy->getElementType();
2283
2284 // Reduce to the alloca case:
2285
2286 // Create an alloca and replace all uses of the arg with it.
2287 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2288 Arg.replaceAllUsesWith(Alloca);
2289
2290 // Set an initial value in the alloca. swifterror is always null on entry.
2291 auto InitialValue = Constant::getNullValue(ValueTy);
2292 Builder.CreateStore(InitialValue, Alloca);
2293
2294 // Find all the suspends in the function and save and restore around them.
2295 for (auto Suspend : Shape.CoroSuspends) {
2296 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2297 }
2298
2299 // Find all the coro.ends in the function and restore the error value.
2300 for (auto End : Shape.CoroEnds) {
2301 Builder.SetInsertPoint(End);
2302 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2303 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2304 }
2305
2306 // Now we can use the alloca logic.
2307 AllocasToPromote.push_back(Alloca);
2308 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2309}
2310
2311/// Eliminate all problematic uses of swifterror arguments and allocas
2312/// from the function. We'll fix them up later when splitting the function.
2313static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
2314 SmallVector<AllocaInst*, 4> AllocasToPromote;
2315
2316 // Look for a swifterror argument.
2317 for (auto &Arg : F.args()) {
2318 if (!Arg.hasSwiftErrorAttr()) continue;
2319
2320 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2321 break;
2322 }
2323
2324 // Look for swifterror allocas.
2325 for (auto &Inst : F.getEntryBlock()) {
2326 auto Alloca = dyn_cast<AllocaInst>(&Inst);
2327 if (!Alloca || !Alloca->isSwiftError()) continue;
2328
2329 // Clear the swifterror flag.
2330 Alloca->setSwiftError(false);
2331
2332 AllocasToPromote.push_back(Alloca);
2333 eliminateSwiftErrorAlloca(F, Alloca, Shape);
2334 }
2335
2336 // If we have any allocas to promote, compute a dominator tree and
2337 // promote them en masse.
2338 if (!AllocasToPromote.empty()) {
2339 DominatorTree DT(F);
2340 PromoteMemToReg(AllocasToPromote, DT);
2341 }
2342}
2343
2344/// retcon and retcon.once conventions assume that all spill uses can be sunk
2345/// after the coro.begin intrinsic.
2346static void sinkSpillUsesAfterCoroBegin(Function &F,
2347 const FrameDataInfo &FrameData,
2348 CoroBeginInst *CoroBegin) {
2349 DominatorTree Dom(F);
2350
2351 SmallSetVector<Instruction *, 32> ToMove;
2352 SmallVector<Instruction *, 32> Worklist;
2353
2354 // Collect all users that precede coro.begin.
2355 for (auto *Def : FrameData.getAllDefs()) {
2356 for (User *U : Def->users()) {
2357 auto Inst = cast<Instruction>(U);
2358 if (Inst->getParent() != CoroBegin->getParent() ||
2359 Dom.dominates(CoroBegin, Inst))
2360 continue;
2361 if (ToMove.insert(Inst))
2362 Worklist.push_back(Inst);
2363 }
2364 }
2365 // Recursively collect users before coro.begin.
2366 while (!Worklist.empty()) {
2367 auto *Def = Worklist.pop_back_val();
2368 for (User *U : Def->users()) {
2369 auto Inst = cast<Instruction>(U);
2370 if (Dom.dominates(CoroBegin, Inst))
2371 continue;
2372 if (ToMove.insert(Inst))
2373 Worklist.push_back(Inst);
2374 }
2375 }
2376
2377 // Sort by dominance.
2378 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2379 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2380 // If a dominates b it should preceed (<) b.
2381 return Dom.dominates(A, B);
2382 });
2383
2384 Instruction *InsertPt = CoroBegin->getNextNode();
2385 for (Instruction *Inst : InsertionList)
2386 Inst->moveBefore(InsertPt);
2387}
2388
2389/// For each local variable that all of its user are only used inside one of
2390/// suspended region, we sink their lifetime.start markers to the place where
2391/// after the suspend block. Doing so minimizes the lifetime of each variable,
2392/// hence minimizing the amount of data we end up putting on the frame.
2393static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2394 SuspendCrossingInfo &Checker) {
2395 DominatorTree DT(F);
2396
2397 // Collect all possible basic blocks which may dominate all uses of allocas.
2398 SmallPtrSet<BasicBlock *, 4> DomSet;
2399 DomSet.insert(&F.getEntryBlock());
2400 for (auto *CSI : Shape.CoroSuspends) {
2401 BasicBlock *SuspendBlock = CSI->getParent();
2402 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&((void)0)
2403 "should have split coro.suspend into its own block")((void)0);
2404 DomSet.insert(SuspendBlock->getSingleSuccessor());
2405 }
2406
2407 for (Instruction &I : instructions(F)) {
2408 AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2409 if (!AI)
2410 continue;
2411
2412 for (BasicBlock *DomBB : DomSet) {
2413 bool Valid = true;
2414 SmallVector<Instruction *, 1> Lifetimes;
2415
2416 auto isLifetimeStart = [](Instruction* I) {
2417 if (auto* II = dyn_cast<IntrinsicInst>(I))
2418 return II->getIntrinsicID() == Intrinsic::lifetime_start;
2419 return false;
2420 };
2421
2422 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2423 if (isLifetimeStart(U)) {
2424 Lifetimes.push_back(U);
2425 return true;
2426 }
2427 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2428 return false;
2429 if (isLifetimeStart(U->user_back())) {
2430 Lifetimes.push_back(U->user_back());
2431 return true;
2432 }
2433 return false;
2434 };
2435
2436 for (User *U : AI->users()) {
2437 Instruction *UI = cast<Instruction>(U);
2438 // For all users except lifetime.start markers, if they are all
2439 // dominated by one of the basic blocks and do not cross
2440 // suspend points as well, then there is no need to spill the
2441 // instruction.
2442 if (!DT.dominates(DomBB, UI->getParent()) ||
2443 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2444 // Skip lifetime.start, GEP and bitcast used by lifetime.start
2445 // markers.
2446 if (collectLifetimeStart(UI, AI))
2447 continue;
2448 Valid = false;
2449 break;
2450 }
2451 }
2452 // Sink lifetime.start markers to dominate block when they are
2453 // only used outside the region.
2454 if (Valid && Lifetimes.size() != 0) {
2455 // May be AI itself, when the type of AI is i8*
2456 auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2457 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2458 return AI;
2459 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2460 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2461 DomBB->getTerminator());
2462 }(AI);
2463
2464 auto *NewLifetime = Lifetimes[0]->clone();
2465 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2466 NewLifetime->insertBefore(DomBB->getTerminator());
2467
2468 // All the outsided lifetime.start markers are no longer necessary.
2469 for (Instruction *S : Lifetimes)
2470 S->eraseFromParent();
2471
2472 break;
2473 }
2474 }
2475 }
2476}
2477
2478static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2479 const SuspendCrossingInfo &Checker,
2480 SmallVectorImpl<AllocaInfo> &Allocas) {
2481 for (Instruction &I : instructions(F)) {
2482 auto *AI = dyn_cast<AllocaInst>(&I);
2483 if (!AI)
2484 continue;
2485 // The PromiseAlloca will be specially handled since it needs to be in a
2486 // fixed position in the frame.
2487 if (AI == Shape.SwitchLowering.PromiseAlloca) {
2488 continue;
2489 }
2490 DominatorTree DT(F);
2491 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2492 *Shape.CoroBegin, Checker};
2493 Visitor.visitPtr(*AI);
2494 if (!Visitor.getShouldLiveOnFrame())
2495 continue;
2496 Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2497 Visitor.getMayWriteBeforeCoroBegin());
2498 }
2499}
2500
2501void coro::salvageDebugInfo(
2502 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2503 DbgVariableIntrinsic *DVI, bool ReuseFrameSlot) {
2504 Function *F = DVI->getFunction();
2505 IRBuilder<> Builder(F->getContext());
2506 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2507 while (isa<IntrinsicInst>(InsertPt))
2508 ++InsertPt;
2509 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2510 DIExpression *Expr = DVI->getExpression();
2511 // Follow the pointer arithmetic all the way to the incoming
2512 // function argument and convert into a DIExpression.
2513 bool OutermostLoad = true;
2514 Value *Storage = DVI->getVariableLocationOp(0);
2515 Value *OriginalStorage = Storage;
2516 while (Storage) {
2517 if (auto *LdInst = dyn_cast<LoadInst>(Storage)) {
2518 Storage = LdInst->getOperand(0);
2519 // FIXME: This is a heuristic that works around the fact that
2520 // LLVM IR debug intrinsics cannot yet distinguish between
2521 // memory and value locations: Because a dbg.declare(alloca) is
2522 // implicitly a memory location no DW_OP_deref operation for the
2523 // last direct load from an alloca is necessary. This condition
2524 // effectively drops the *last* DW_OP_deref in the expression.
2525 if (!OutermostLoad)
2526 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2527 OutermostLoad = false;
2528 } else if (auto *StInst = dyn_cast<StoreInst>(Storage)) {
2529 Storage = StInst->getOperand(0);
2530 } else if (auto *GEPInst = dyn_cast<GetElementPtrInst>(Storage)) {
2531 SmallVector<Value *> AdditionalValues;
2532 DIExpression *SalvagedExpr = llvm::salvageDebugInfoImpl(
2533 *GEPInst, Expr,
2534 /*WithStackValue=*/false, 0, AdditionalValues);
2535 // Debug declares cannot currently handle additional location
2536 // operands.
2537 if (!SalvagedExpr || !AdditionalValues.empty())
2538 break;
2539 Expr = SalvagedExpr;
2540 Storage = GEPInst->getOperand(0);
2541 } else if (auto *BCInst = dyn_cast<llvm::BitCastInst>(Storage))
2542 Storage = BCInst->getOperand(0);
2543 else
2544 break;
2545 }
2546 if (!Storage)
2547 return;
2548
2549 // Store a pointer to the coroutine frame object in an alloca so it
2550 // is available throughout the function when producing unoptimized
2551 // code. Extending the lifetime this way is correct because the
2552 // variable has been declared by a dbg.declare intrinsic.
2553 //
2554 // Avoid to create the alloca would be eliminated by optimization
2555 // passes and the corresponding dbg.declares would be invalid.
2556 if (!ReuseFrameSlot && !EnableReuseStorageInFrame)
2557 if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
2558 auto &Cached = DbgPtrAllocaCache[Storage];
2559 if (!Cached) {
2560 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2561 Arg->getName() + ".debug");
2562 Builder.CreateStore(Storage, Cached);
2563 }
2564 Storage = Cached;
2565 // FIXME: LLVM lacks nuanced semantics to differentiate between
2566 // memory and direct locations at the IR level. The backend will
2567 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2568 // location. Thus, if there are deref and offset operations in the
2569 // expression, we need to add a DW_OP_deref at the *start* of the
2570 // expression to first load the contents of the alloca before
2571 // adjusting it with the expression.
2572 if (Expr && Expr->isComplex())
2573 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2574 }
2575
2576 DVI->replaceVariableLocationOp(OriginalStorage, Storage);
2577 DVI->setExpression(Expr);
2578 /// It makes no sense to move the dbg.value intrinsic.
2579 if (!isa<DbgValueInst>(DVI)) {
2580 if (auto *InsertPt = dyn_cast<Instruction>(Storage))
2581 DVI->moveAfter(InsertPt);
2582 else if (isa<Argument>(Storage))
2583 DVI->moveAfter(F->getEntryBlock().getFirstNonPHI());
2584 }
2585}
2586
2587void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2588 // Don't eliminate swifterror in async functions that won't be split.
2589 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2590 eliminateSwiftError(F, Shape);
2591
2592 if (Shape.ABI == coro::ABI::Switch &&
2593 Shape.SwitchLowering.PromiseAlloca) {
2594 Shape.getSwitchCoroId()->clearPromise();
2595 }
2596
2597 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2598 // intrinsics are in their own blocks to simplify the logic of building up
2599 // SuspendCrossing data.
2600 for (auto *CSI : Shape.CoroSuspends) {
2601 if (auto *Save = CSI->getCoroSave())
2602 splitAround(Save, "CoroSave");
2603 splitAround(CSI, "CoroSuspend");
2604 }
2605
2606 // Put CoroEnds into their own blocks.
2607 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2608 splitAround(CE, "CoroEnd");
2609
2610 // Emit the musttail call function in a new block before the CoroEnd.
2611 // We do this here so that the right suspend crossing info is computed for
2612 // the uses of the musttail call function call. (Arguments to the coro.end
2613 // instructions would be ignored)
2614 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2615 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2616 if (!MustTailCallFn)
2617 continue;
2618 IRBuilder<> Builder(AsyncEnd);
2619 SmallVector<Value *, 8> Args(AsyncEnd->args());
2620 auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2621 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2622 Arguments, Builder);
2623 splitAround(Call, "MustTailCall.Before.CoroEnd");
2624 }
2625 }
2626
2627 // Later code makes structural assumptions about single predecessors phis e.g
2628 // that they are not live accross a suspend point.
2629 cleanupSinglePredPHIs(F);
2630
2631 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2632 // never has its definition separated from the PHI by the suspend point.
2633 rewritePHIs(F);
2634
2635 // Build suspend crossing info.
2636 SuspendCrossingInfo Checker(F, Shape);
2637
2638 IRBuilder<> Builder(F.getContext());
2639 FrameDataInfo FrameData;
2640 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2641 SmallVector<Instruction*, 4> DeadInstructions;
2642
2643 {
2644 SpillInfo Spills;
2645 for (int Repeat = 0; Repeat < 4; ++Repeat) {
2646 // See if there are materializable instructions across suspend points.
2647 for (Instruction &I : instructions(F))
2648 if (materializable(I)) {
2649 for (User *U : I.users())
2650 if (Checker.isDefinitionAcrossSuspend(I, U))
2651 Spills[&I].push_back(cast<Instruction>(U));
2652
2653 // Manually add dbg.value metadata uses of I.
2654 SmallVector<DbgValueInst *, 16> DVIs;
2655 findDbgValues(DVIs, &I);
2656 for (auto *DVI : DVIs)
2657 if (Checker.isDefinitionAcrossSuspend(I, DVI))
2658 Spills[&I].push_back(DVI);
2659 }
2660
2661 if (Spills.empty())
2662 break;
2663
2664 // Rewrite materializable instructions to be materialized at the use
2665 // point.
2666 LLVM_DEBUG(dumpSpills("Materializations", Spills))do { } while (false);
2667 rewriteMaterializableInstructions(Builder, Spills);
2668 Spills.clear();
2669 }
2670 }
2671
2672 sinkLifetimeStartMarkers(F, Shape, Checker);
2673 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2674 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2675 LLVM_DEBUG(dumpAllocas(FrameData.Allocas))do { } while (false);
2676
2677 // Collect the spills for arguments and other not-materializable values.
2678 for (Argument &A : F.args())
2679 for (User *U : A.users())
2680 if (Checker.isDefinitionAcrossSuspend(A, U))
2681 FrameData.Spills[&A].push_back(cast<Instruction>(U));
2682
2683 for (Instruction &I : instructions(F)) {
2684 // Values returned from coroutine structure intrinsics should not be part
2685 // of the Coroutine Frame.
2686 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2687 continue;
2688
2689 // The Coroutine Promise always included into coroutine frame, no need to
2690 // check for suspend crossing.
2691 if (Shape.ABI == coro::ABI::Switch &&
2692 Shape.SwitchLowering.PromiseAlloca == &I)
2693 continue;
2694
2695 // Handle alloca.alloc specially here.
2696 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2697 // Check whether the alloca's lifetime is bounded by suspend points.
2698 if (isLocalAlloca(AI)) {
2699 LocalAllocas.push_back(AI);
2700 continue;
2701 }
2702
2703 // If not, do a quick rewrite of the alloca and then add spills of
2704 // the rewritten value. The rewrite doesn't invalidate anything in
2705 // Spills because the other alloca intrinsics have no other operands
2706 // besides AI, and it doesn't invalidate the iteration because we delay
2707 // erasing AI.
2708 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2709
2710 for (User *U : Alloc->users()) {
2711 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2712 FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2713 }
2714 continue;
2715 }
2716
2717 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2718 if (isa<CoroAllocaGetInst>(I))
2719 continue;
2720
2721 if (isa<AllocaInst>(I))
2722 continue;
2723
2724 for (User *U : I.users())
2725 if (Checker.isDefinitionAcrossSuspend(I, U)) {
2726 // We cannot spill a token.
2727 if (I.getType()->isTokenTy())
2728 report_fatal_error(
2729 "token definition is separated from the use by a suspend point");
2730 FrameData.Spills[&I].push_back(cast<Instruction>(U));
2731 }
2732 }
2733
2734 // We don't want the layout of coroutine frame to be affected
2735 // by debug information. So we only choose to salvage DbgValueInst for
2736 // whose value is already in the frame.
2737 // We would handle the dbg.values for allocas specially
2738 for (auto &Iter : FrameData.Spills) {
2739 auto *V = Iter.first;
2740 SmallVector<DbgValueInst *, 16> DVIs;
2741 findDbgValues(DVIs, V);
2742 llvm::for_each(DVIs, [&](DbgValueInst *DVI) {
2743 if (Checker.isDefinitionAcrossSuspend(*V, DVI))
2744 FrameData.Spills[V].push_back(DVI);
2745 });
2746 }
2747
2748 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills))do { } while (false);
2749 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2750 Shape.ABI == coro::ABI::Async)
2751 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2752 Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2753 createFramePtr(Shape);
2754 // For now, this works for C++ programs only.
2755 buildFrameDebugInfo(F, Shape, FrameData);
2756 insertSpills(FrameData, Shape);
2757 lowerLocalAllocas(LocalAllocas, DeadInstructions);
2758
2759 for (auto I : DeadInstructions)
2760 I->eraseFromParent();
2761}

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h

1//===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains types to represent alignments.
10// They are instrumented to guarantee some invariants are preserved and prevent
11// invalid manipulations.
12//
13// - Align represents an alignment in bytes, it is always set and always a valid
14// power of two, its minimum value is 1 which means no alignment requirements.
15//
16// - MaybeAlign is an optional type, it may be undefined or set. When it's set
17// you can get the underlying Align type by using the getValue() method.
18//
19//===----------------------------------------------------------------------===//
20
21#ifndef LLVM_SUPPORT_ALIGNMENT_H_
22#define LLVM_SUPPORT_ALIGNMENT_H_
23
24#include "llvm/ADT/Optional.h"
25#include "llvm/Support/MathExtras.h"
26#include <cassert>
27#ifndef NDEBUG1
28#include <string>
29#endif // NDEBUG
30
31namespace llvm {
32
33#define ALIGN_CHECK_ISPOSITIVE(decl) \
34 assert(decl > 0 && (#decl " should be defined"))((void)0)
35
36/// This struct is a compact representation of a valid (non-zero power of two)
37/// alignment.
38/// It is suitable for use as static global constants.
39struct Align {
40private:
41 uint8_t ShiftValue = 0; /// The log2 of the required alignment.
42 /// ShiftValue is less than 64 by construction.
43
44 friend struct MaybeAlign;
45 friend unsigned Log2(Align);
46 friend bool operator==(Align Lhs, Align Rhs);
47 friend bool operator!=(Align Lhs, Align Rhs);
48 friend bool operator<=(Align Lhs, Align Rhs);
49 friend bool operator>=(Align Lhs, Align Rhs);
50 friend bool operator<(Align Lhs, Align Rhs);
51 friend bool operator>(Align Lhs, Align Rhs);
52 friend unsigned encode(struct MaybeAlign A);
53 friend struct MaybeAlign decodeMaybeAlign(unsigned Value);
54
55 /// A trivial type to allow construction of constexpr Align.
56 /// This is currently needed to workaround a bug in GCC 5.3 which prevents
57 /// definition of constexpr assign operators.
58 /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic
59 /// FIXME: Remove this, make all assign operators constexpr and introduce user
60 /// defined literals when we don't have to support GCC 5.3 anymore.
61 /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain
62 struct LogValue {
63 uint8_t Log;
64 };
65
66public:
67 /// Default is byte-aligned.
68 constexpr Align() = default;
69 /// Do not perform checks in case of copy/move construct/assign, because the
70 /// checks have been performed when building `Other`.
71 constexpr Align(const Align &Other) = default;
72 constexpr Align(Align &&Other) = default;
73 Align &operator=(const Align &Other) = default;
74 Align &operator=(Align &&Other) = default;
75
76 explicit Align(uint64_t Value) {
77 assert(Value > 0 && "Value must not be 0")((void)0);
78 assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2")((void)0);
79 ShiftValue = Log2_64(Value);
80 assert(ShiftValue < 64 && "Broken invariant")((void)0);
81 }
82
83 /// This is a hole in the type system and should not be abused.
84 /// Needed to interact with C for instance.
85 uint64_t value() const { return uint64_t(1) << ShiftValue; }
14
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'
86
87 /// Allow constructions of constexpr Align.
88 template <size_t kValue> constexpr static LogValue Constant() {
89 return LogValue{static_cast<uint8_t>(CTLog2<kValue>())};
90 }
91
92 /// Allow constructions of constexpr Align from types.
93 /// Compile time equivalent to Align(alignof(T)).
94 template <typename T> constexpr static LogValue Of() {
95 return Constant<std::alignment_of<T>::value>();
96 }
97
98 /// Constexpr constructor from LogValue type.
99 constexpr Align(LogValue CA) : ShiftValue(CA.Log) {}
100};
101
102/// Treats the value 0 as a 1, so Align is always at least 1.
103inline Align assumeAligned(uint64_t Value) {
104 return Value ? Align(Value) : Align();
105}
106
107/// This struct is a compact representation of a valid (power of two) or
108/// undefined (0) alignment.
109struct MaybeAlign : public llvm::Optional<Align> {
110private:
111 using UP = llvm::Optional<Align>;
112
113public:
114 /// Default is undefined.
115 MaybeAlign() = default;
116 /// Do not perform checks in case of copy/move construct/assign, because the
117 /// checks have been performed when building `Other`.
118 MaybeAlign(const MaybeAlign &Other) = default;
119 MaybeAlign &operator=(const MaybeAlign &Other) = default;
120 MaybeAlign(MaybeAlign &&Other) = default;
121 MaybeAlign &operator=(MaybeAlign &&Other) = default;
122
123 /// Use llvm::Optional<Align> constructor.
124 using UP::UP;
125
126 explicit MaybeAlign(uint64_t Value) {
127 assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&((void)0)
128 "Alignment is neither 0 nor a power of 2")((void)0);
129 if (Value)
130 emplace(Value);
131 }
132
133 /// For convenience, returns a valid alignment or 1 if undefined.
134 Align valueOrOne() const { return hasValue() ? getValue() : Align(); }
135};
136
137/// Checks that SizeInBytes is a multiple of the alignment.
138inline bool isAligned(Align Lhs, uint64_t SizeInBytes) {
139 return SizeInBytes % Lhs.value() == 0;
140}
141
142/// Checks that Addr is a multiple of the alignment.
143inline bool isAddrAligned(Align Lhs, const void *Addr) {
144 return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr));
145}
146
147/// Returns a multiple of A needed to store `Size` bytes.
148inline uint64_t alignTo(uint64_t Size, Align A) {
149 const uint64_t Value = A.value();
13
Calling 'Align::value'
150 // The following line is equivalent to `(Size + Value - 1) / Value * Value`.
151
152 // The division followed by a multiplication can be thought of as a right
153 // shift followed by a left shift which zeros out the extra bits produced in
154 // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out
155 // are just zero.
156
157 // Most compilers can generate this code but the pattern may be missed when
158 // multiple functions gets inlined.
159 return (Size + Value - 1) & ~(Value - 1U);
160}
161
162/// If non-zero \p Skew is specified, the return value will be a minimal integer
163/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
164/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
165/// Skew mod \p A'.
166///
167/// Examples:
168/// \code
169/// alignTo(5, Align(8), 7) = 7
170/// alignTo(17, Align(8), 1) = 17
171/// alignTo(~0LL, Align(8), 3) = 3
172/// \endcode
173inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) {
174 const uint64_t Value = A.value();
175 Skew %= Value;
176 return ((Size + Value - 1 - Skew) & ~(Value - 1U)) + Skew;
177}
178
179/// Returns a multiple of A needed to store `Size` bytes.
180/// Returns `Size` if current alignment is undefined.
181inline uint64_t alignTo(uint64_t Size, MaybeAlign A) {
182 return A ? alignTo(Size, A.getValue()) : Size;
10
'?' condition is true
11
The value 255 is assigned to 'A.ShiftValue'
12
Calling 'alignTo'
183}
184
185/// Aligns `Addr` to `Alignment` bytes, rounding up.
186inline uintptr_t alignAddr(const void *Addr, Align Alignment) {
187 uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr);
188 assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=((void)0)
189 ArithAddr &&((void)0)
190 "Overflow")((void)0);
191 return alignTo(ArithAddr, Alignment);
192}
193
194/// Returns the offset to the next integer (mod 2**64) that is greater than
195/// or equal to \p Value and is a multiple of \p Align.
196inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
197 return alignTo(Value, Alignment) - Value;
198}
199
200/// Returns the necessary adjustment for aligning `Addr` to `Alignment`
201/// bytes, rounding up.
202inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) {
203 return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment);
204}
205
206/// Returns the log2 of the alignment.
207inline unsigned Log2(Align A) { return A.ShiftValue; }
208
209/// Returns the alignment that satisfies both alignments.
210/// Same semantic as MinAlign.
211inline Align commonAlignment(Align A, Align B) { return std::min(A, B); }
212
213/// Returns the alignment that satisfies both alignments.
214/// Same semantic as MinAlign.
215inline Align commonAlignment(Align A, uint64_t Offset) {
216 return Align(MinAlign(A.value(), Offset));
217}
218
219/// Returns the alignment that satisfies both alignments.
220/// Same semantic as MinAlign.
221inline MaybeAlign commonAlignment(MaybeAlign A, MaybeAlign B) {
222 return A && B ? commonAlignment(*A, *B) : A ? A : B;
223}
224
225/// Returns the alignment that satisfies both alignments.
226/// Same semantic as MinAlign.
227inline MaybeAlign commonAlignment(MaybeAlign A, uint64_t Offset) {
228 return MaybeAlign(MinAlign((*A).value(), Offset));
229}
230
231/// Returns a representation of the alignment that encodes undefined as 0.
232inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; }
233
234/// Dual operation of the encode function above.
235inline MaybeAlign decodeMaybeAlign(unsigned Value) {
236 if (Value == 0)
237 return MaybeAlign();
238 Align Out;
239 Out.ShiftValue = Value - 1;
240 return Out;
241}
242
243/// Returns a representation of the alignment, the encoded value is positive by
244/// definition.
245inline unsigned encode(Align A) { return encode(MaybeAlign(A)); }
246
247/// Comparisons between Align and scalars. Rhs must be positive.
248inline bool operator==(Align Lhs, uint64_t Rhs) {
249 ALIGN_CHECK_ISPOSITIVE(Rhs);
250 return Lhs.value() == Rhs;
251}
252inline bool operator!=(Align Lhs, uint64_t Rhs) {
253 ALIGN_CHECK_ISPOSITIVE(Rhs);
254 return Lhs.value() != Rhs;
255}
256inline bool operator<=(Align Lhs, uint64_t Rhs) {
257 ALIGN_CHECK_ISPOSITIVE(Rhs);
258 return Lhs.value() <= Rhs;
259}
260inline bool operator>=(Align Lhs, uint64_t Rhs) {
261 ALIGN_CHECK_ISPOSITIVE(Rhs);
262 return Lhs.value() >= Rhs;
263}
264inline bool operator<(Align Lhs, uint64_t Rhs) {
265 ALIGN_CHECK_ISPOSITIVE(Rhs);
266 return Lhs.value() < Rhs;
267}
268inline bool operator>(Align Lhs, uint64_t Rhs) {
269 ALIGN_CHECK_ISPOSITIVE(Rhs);
270 return Lhs.value() > Rhs;
271}
272
273/// Comparisons between MaybeAlign and scalars.
274inline bool operator==(MaybeAlign Lhs, uint64_t Rhs) {
275 return Lhs ? (*Lhs).value() == Rhs : Rhs == 0;
276}
277inline bool operator!=(MaybeAlign Lhs, uint64_t Rhs) {
278 return Lhs ? (*Lhs).value() != Rhs : Rhs != 0;
279}
280
281/// Comparisons operators between Align.
282inline bool operator==(Align Lhs, Align Rhs) {
283 return Lhs.ShiftValue == Rhs.ShiftValue;
284}
285inline bool operator!=(Align Lhs, Align Rhs) {
286 return Lhs.ShiftValue != Rhs.ShiftValue;
287}
288inline bool operator<=(Align Lhs, Align Rhs) {
289 return Lhs.ShiftValue <= Rhs.ShiftValue;
290}
291inline bool operator>=(Align Lhs, Align Rhs) {
292 return Lhs.ShiftValue >= Rhs.ShiftValue;
293}
294inline bool operator<(Align Lhs, Align Rhs) {
295 return Lhs.ShiftValue < Rhs.ShiftValue;
296}
297inline bool operator>(Align Lhs, Align Rhs) {
298 return Lhs.ShiftValue > Rhs.ShiftValue;
299}
300
301// Don't allow relational comparisons with MaybeAlign.
302bool operator<=(Align Lhs, MaybeAlign Rhs) = delete;
303bool operator>=(Align Lhs, MaybeAlign Rhs) = delete;
304bool operator<(Align Lhs, MaybeAlign Rhs) = delete;
305bool operator>(Align Lhs, MaybeAlign Rhs) = delete;
306
307bool operator<=(MaybeAlign Lhs, Align Rhs) = delete;
308bool operator>=(MaybeAlign Lhs, Align Rhs) = delete;
309bool operator<(MaybeAlign Lhs, Align Rhs) = delete;
310bool operator>(MaybeAlign Lhs, Align Rhs) = delete;
311
312bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
313bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
314bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
315bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
316
317inline Align operator*(Align Lhs, uint64_t Rhs) {
318 assert(Rhs > 0 && "Rhs must be positive")((void)0);
319 return Align(Lhs.value() * Rhs);
320}
321
322inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) {
323 assert(Rhs > 0 && "Rhs must be positive")((void)0);
324 return Lhs ? Lhs.getValue() * Rhs : MaybeAlign();
325}
326
327inline Align operator/(Align Lhs, uint64_t Divisor) {
328 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
329 "Divisor must be positive and a power of 2")((void)0);
330 assert(Lhs != 1 && "Can't halve byte alignment")((void)0);
331 return Align(Lhs.value() / Divisor);
332}
333
334inline MaybeAlign operator/(MaybeAlign Lhs, uint64_t Divisor) {
335 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
336 "Divisor must be positive and a power of 2")((void)0);
337 return Lhs ? Lhs.getValue() / Divisor : MaybeAlign();
338}
339
340inline Align max(MaybeAlign Lhs, Align Rhs) {
341 return Lhs && *Lhs > Rhs ? *Lhs : Rhs;
342}
343
344inline Align max(Align Lhs, MaybeAlign Rhs) {
345 return Rhs && *Rhs > Lhs ? *Rhs : Lhs;
346}
347
348#ifndef NDEBUG1
349// For usage in LLVM_DEBUG macros.
350inline std::string DebugStr(const Align &A) {
351 return std::to_string(A.value());
352}
353// For usage in LLVM_DEBUG macros.
354inline std::string DebugStr(const MaybeAlign &MA) {
355 if (MA)
356 return std::to_string(MA->value());
357 return "None";
358}
359#endif // NDEBUG
360
361#undef ALIGN_CHECK_ISPOSITIVE
362
363} // namespace llvm
364
365#endif // LLVM_SUPPORT_ALIGNMENT_H_