Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h
Warning:line 85, column 47
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MachineSSAUpdater.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/MachineSSAUpdater.cpp

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/MachineSSAUpdater.cpp

1//===- MachineSSAUpdater.cpp - Unstructured SSA Update Tool ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the MachineSSAUpdater class. It's based on SSAUpdater
10// class in lib/Transforms/Utils.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/MachineSSAUpdater.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/CodeGen/MachineBasicBlock.h"
18#include "llvm/CodeGen/MachineFunction.h"
19#include "llvm/CodeGen/MachineInstr.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineOperand.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/CodeGen/TargetInstrInfo.h"
24#include "llvm/CodeGen/TargetOpcodes.h"
25#include "llvm/CodeGen/TargetSubtargetInfo.h"
26#include "llvm/IR/DebugLoc.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Support/ErrorHandling.h"
29#include "llvm/Support/raw_ostream.h"
30#include "llvm/Transforms/Utils/SSAUpdaterImpl.h"
31#include <utility>
32
33using namespace llvm;
34
35#define DEBUG_TYPE"machine-ssaupdater" "machine-ssaupdater"
36
37using AvailableValsTy = DenseMap<MachineBasicBlock *, Register>;
38
39static AvailableValsTy &getAvailableVals(void *AV) {
40 return *static_cast<AvailableValsTy*>(AV);
41}
42
43MachineSSAUpdater::MachineSSAUpdater(MachineFunction &MF,
44 SmallVectorImpl<MachineInstr*> *NewPHI)
45 : InsertedPHIs(NewPHI), TII(MF.getSubtarget().getInstrInfo()),
46 MRI(&MF.getRegInfo()) {}
47
48MachineSSAUpdater::~MachineSSAUpdater() {
49 delete static_cast<AvailableValsTy*>(AV);
50}
51
52/// Initialize - Reset this object to get ready for a new set of SSA
53/// updates.
54void MachineSSAUpdater::Initialize(const TargetRegisterClass *RC) {
55 if (!AV)
56 AV = new AvailableValsTy();
57 else
58 getAvailableVals(AV).clear();
59
60 VRC = RC;
61}
62
63void MachineSSAUpdater::Initialize(Register V) {
64 Initialize(MRI->getRegClass(V));
65}
66
67/// HasValueForBlock - Return true if the MachineSSAUpdater already has a value for
68/// the specified block.
69bool MachineSSAUpdater::HasValueForBlock(MachineBasicBlock *BB) const {
70 return getAvailableVals(AV).count(BB);
71}
72
73/// AddAvailableValue - Indicate that a rewritten value is available in the
74/// specified block with the specified value.
75void MachineSSAUpdater::AddAvailableValue(MachineBasicBlock *BB, Register V) {
76 getAvailableVals(AV)[BB] = V;
77}
78
79/// GetValueAtEndOfBlock - Construct SSA form, materializing a value that is
80/// live at the end of the specified block.
81Register MachineSSAUpdater::GetValueAtEndOfBlock(MachineBasicBlock *BB) {
82 return GetValueAtEndOfBlockInternal(BB);
83}
84
85static
86Register LookForIdenticalPHI(MachineBasicBlock *BB,
87 SmallVectorImpl<std::pair<MachineBasicBlock *, Register>> &PredValues) {
88 if (BB->empty())
89 return Register();
90
91 MachineBasicBlock::iterator I = BB->begin();
92 if (!I->isPHI())
93 return Register();
94
95 AvailableValsTy AVals;
96 for (unsigned i = 0, e = PredValues.size(); i != e; ++i)
97 AVals[PredValues[i].first] = PredValues[i].second;
98 while (I != BB->end() && I->isPHI()) {
99 bool Same = true;
100 for (unsigned i = 1, e = I->getNumOperands(); i != e; i += 2) {
101 Register SrcReg = I->getOperand(i).getReg();
102 MachineBasicBlock *SrcBB = I->getOperand(i+1).getMBB();
103 if (AVals[SrcBB] != SrcReg) {
104 Same = false;
105 break;
106 }
107 }
108 if (Same)
109 return I->getOperand(0).getReg();
110 ++I;
111 }
112 return Register();
113}
114
115/// InsertNewDef - Insert an empty PHI or IMPLICIT_DEF instruction which define
116/// a value of the given register class at the start of the specified basic
117/// block. It returns the virtual register defined by the instruction.
118static
119MachineInstrBuilder InsertNewDef(unsigned Opcode,
120 MachineBasicBlock *BB, MachineBasicBlock::iterator I,
121 const TargetRegisterClass *RC,
122 MachineRegisterInfo *MRI,
123 const TargetInstrInfo *TII) {
124 Register NewVR = MRI->createVirtualRegister(RC);
125 return BuildMI(*BB, I, DebugLoc(), TII->get(Opcode), NewVR);
126}
127
128/// GetValueInMiddleOfBlock - Construct SSA form, materializing a value that
129/// is live in the middle of the specified block.
130///
131/// GetValueInMiddleOfBlock is the same as GetValueAtEndOfBlock except in one
132/// important case: if there is a definition of the rewritten value after the
133/// 'use' in BB. Consider code like this:
134///
135/// X1 = ...
136/// SomeBB:
137/// use(X)
138/// X2 = ...
139/// br Cond, SomeBB, OutBB
140///
141/// In this case, there are two values (X1 and X2) added to the AvailableVals
142/// set by the client of the rewriter, and those values are both live out of
143/// their respective blocks. However, the use of X happens in the *middle* of
144/// a block. Because of this, we need to insert a new PHI node in SomeBB to
145/// merge the appropriate values, and this value isn't live out of the block.
146Register MachineSSAUpdater::GetValueInMiddleOfBlock(MachineBasicBlock *BB) {
147 // If there is no definition of the renamed variable in this block, just use
148 // GetValueAtEndOfBlock to do our work.
149 if (!HasValueForBlock(BB))
150 return GetValueAtEndOfBlockInternal(BB);
151
152 // If there are no predecessors, just return undef.
153 if (BB->pred_empty()) {
154 // Insert an implicit_def to represent an undef value.
155 MachineInstr *NewDef = InsertNewDef(TargetOpcode::IMPLICIT_DEF,
156 BB, BB->getFirstTerminator(),
157 VRC, MRI, TII);
158 return NewDef->getOperand(0).getReg();
159 }
160
161 // Otherwise, we have the hard case. Get the live-in values for each
162 // predecessor.
163 SmallVector<std::pair<MachineBasicBlock*, Register>, 8> PredValues;
164 Register SingularValue;
165
166 bool isFirstPred = true;
167 for (MachineBasicBlock *PredBB : BB->predecessors()) {
168 Register PredVal = GetValueAtEndOfBlockInternal(PredBB);
169 PredValues.push_back(std::make_pair(PredBB, PredVal));
170
171 // Compute SingularValue.
172 if (isFirstPred) {
173 SingularValue = PredVal;
174 isFirstPred = false;
175 } else if (PredVal != SingularValue)
176 SingularValue = Register();
177 }
178
179 // Otherwise, if all the merged values are the same, just use it.
180 if (SingularValue)
181 return SingularValue;
182
183 // If an identical PHI is already in BB, just reuse it.
184 Register DupPHI = LookForIdenticalPHI(BB, PredValues);
185 if (DupPHI)
186 return DupPHI;
187
188 // Otherwise, we do need a PHI: insert one now.
189 MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->begin();
190 MachineInstrBuilder InsertedPHI = InsertNewDef(TargetOpcode::PHI, BB,
191 Loc, VRC, MRI, TII);
192
193 // Fill in all the predecessors of the PHI.
194 for (unsigned i = 0, e = PredValues.size(); i != e; ++i)
195 InsertedPHI.addReg(PredValues[i].second).addMBB(PredValues[i].first);
196
197 // See if the PHI node can be merged to a single value. This can happen in
198 // loop cases when we get a PHI of itself and one other value.
199 if (unsigned ConstVal = InsertedPHI->isConstantValuePHI()) {
200 InsertedPHI->eraseFromParent();
201 return ConstVal;
202 }
203
204 // If the client wants to know about all new instructions, tell it.
205 if (InsertedPHIs) InsertedPHIs->push_back(InsertedPHI);
206
207 LLVM_DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n")do { } while (false);
208 return InsertedPHI.getReg(0);
209}
210
211static
212MachineBasicBlock *findCorrespondingPred(const MachineInstr *MI,
213 MachineOperand *U) {
214 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i += 2) {
215 if (&MI->getOperand(i) == U)
216 return MI->getOperand(i+1).getMBB();
217 }
218
219 llvm_unreachable("MachineOperand::getParent() failure?")__builtin_unreachable();
220}
221
222/// RewriteUse - Rewrite a use of the symbolic value. This handles PHI nodes,
223/// which use their value in the corresponding predecessor.
224void MachineSSAUpdater::RewriteUse(MachineOperand &U) {
225 MachineInstr *UseMI = U.getParent();
226 Register NewVR;
227 if (UseMI->isPHI()) {
1
Taking true branch
228 MachineBasicBlock *SourceBB = findCorrespondingPred(UseMI, &U);
229 NewVR = GetValueAtEndOfBlockInternal(SourceBB);
2
Calling 'MachineSSAUpdater::GetValueAtEndOfBlockInternal'
230 } else {
231 NewVR = GetValueInMiddleOfBlock(UseMI->getParent());
232 }
233
234 U.setReg(NewVR);
235}
236
237namespace llvm {
238
239/// SSAUpdaterTraits<MachineSSAUpdater> - Traits for the SSAUpdaterImpl
240/// template, specialized for MachineSSAUpdater.
241template<>
242class SSAUpdaterTraits<MachineSSAUpdater> {
243public:
244 using BlkT = MachineBasicBlock;
245 using ValT = Register;
246 using PhiT = MachineInstr;
247 using BlkSucc_iterator = MachineBasicBlock::succ_iterator;
248
249 static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return BB->succ_begin(); }
250 static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return BB->succ_end(); }
251
252 /// Iterator for PHI operands.
253 class PHI_iterator {
254 private:
255 MachineInstr *PHI;
256 unsigned idx;
257
258 public:
259 explicit PHI_iterator(MachineInstr *P) // begin iterator
260 : PHI(P), idx(1) {}
261 PHI_iterator(MachineInstr *P, bool) // end iterator
262 : PHI(P), idx(PHI->getNumOperands()) {}
263
264 PHI_iterator &operator++() { idx += 2; return *this; }
265 bool operator==(const PHI_iterator& x) const { return idx == x.idx; }
266 bool operator!=(const PHI_iterator& x) const { return !operator==(x); }
267
268 unsigned getIncomingValue() { return PHI->getOperand(idx).getReg(); }
269
270 MachineBasicBlock *getIncomingBlock() {
271 return PHI->getOperand(idx+1).getMBB();
272 }
273 };
274
275 static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); }
276
277 static inline PHI_iterator PHI_end(PhiT *PHI) {
278 return PHI_iterator(PHI, true);
279 }
280
281 /// FindPredecessorBlocks - Put the predecessors of BB into the Preds
282 /// vector.
283 static void FindPredecessorBlocks(MachineBasicBlock *BB,
284 SmallVectorImpl<MachineBasicBlock*> *Preds){
285 append_range(*Preds, BB->predecessors());
286 }
287
288 /// GetUndefVal - Create an IMPLICIT_DEF instruction with a new register.
289 /// Add it into the specified block and return the register.
290 static Register GetUndefVal(MachineBasicBlock *BB,
291 MachineSSAUpdater *Updater) {
292 // Insert an implicit_def to represent an undef value.
293 MachineInstr *NewDef = InsertNewDef(TargetOpcode::IMPLICIT_DEF,
294 BB, BB->getFirstNonPHI(),
295 Updater->VRC, Updater->MRI,
296 Updater->TII);
297 return NewDef->getOperand(0).getReg();
298 }
299
300 /// CreateEmptyPHI - Create a PHI instruction that defines a new register.
301 /// Add it into the specified block and return the register.
302 static Register CreateEmptyPHI(MachineBasicBlock *BB, unsigned NumPreds,
303 MachineSSAUpdater *Updater) {
304 MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->begin();
305 MachineInstr *PHI = InsertNewDef(TargetOpcode::PHI, BB, Loc,
306 Updater->VRC, Updater->MRI,
307 Updater->TII);
308 return PHI->getOperand(0).getReg();
309 }
310
311 /// AddPHIOperand - Add the specified value as an operand of the PHI for
312 /// the specified predecessor block.
313 static void AddPHIOperand(MachineInstr *PHI, Register Val,
314 MachineBasicBlock *Pred) {
315 MachineInstrBuilder(*Pred->getParent(), PHI).addReg(Val).addMBB(Pred);
316 }
317
318 /// InstrIsPHI - Check if an instruction is a PHI.
319 static MachineInstr *InstrIsPHI(MachineInstr *I) {
320 if (I && I->isPHI())
321 return I;
322 return nullptr;
323 }
324
325 /// ValueIsPHI - Check if the instruction that defines the specified register
326 /// is a PHI instruction.
327 static MachineInstr *ValueIsPHI(Register Val, MachineSSAUpdater *Updater) {
328 return InstrIsPHI(Updater->MRI->getVRegDef(Val));
329 }
330
331 /// ValueIsNewPHI - Like ValueIsPHI but also check if the PHI has no source
332 /// operands, i.e., it was just added.
333 static MachineInstr *ValueIsNewPHI(Register Val, MachineSSAUpdater *Updater) {
334 MachineInstr *PHI = ValueIsPHI(Val, Updater);
335 if (PHI && PHI->getNumOperands() <= 1)
336 return PHI;
337 return nullptr;
338 }
339
340 /// GetPHIValue - For the specified PHI instruction, return the register
341 /// that it defines.
342 static Register GetPHIValue(MachineInstr *PHI) {
343 return PHI->getOperand(0).getReg();
344 }
345};
346
347} // end namespace llvm
348
349/// GetValueAtEndOfBlockInternal - Check to see if AvailableVals has an entry
350/// for the specified BB and if so, return it. If not, construct SSA form by
351/// first calculating the required placement of PHIs and then inserting new
352/// PHIs where needed.
353Register MachineSSAUpdater::GetValueAtEndOfBlockInternal(MachineBasicBlock *BB){
354 AvailableValsTy &AvailableVals = getAvailableVals(AV);
355 if (Register V = AvailableVals[BB])
3
Assuming the condition is false
4
Taking false branch
356 return V;
357
358 SSAUpdaterImpl<MachineSSAUpdater> Impl(this, &AvailableVals, InsertedPHIs);
359 return Impl.GetValue(BB);
5
Calling 'SSAUpdaterImpl::GetValue'
360}

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h

1//===- SSAUpdaterImpl.h - SSA Updater Implementation ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides a template that implements the core algorithm for the
10// SSAUpdater and MachineSSAUpdater.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
15#define LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
16
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/Support/Allocator.h"
20#include "llvm/Support/Debug.h"
21#include "llvm/Support/raw_ostream.h"
22
23#define DEBUG_TYPE"machine-ssaupdater" "ssaupdater"
24
25namespace llvm {
26
27template<typename T> class SSAUpdaterTraits;
28
29template<typename UpdaterT>
30class SSAUpdaterImpl {
31private:
32 UpdaterT *Updater;
33
34 using Traits = SSAUpdaterTraits<UpdaterT>;
35 using BlkT = typename Traits::BlkT;
36 using ValT = typename Traits::ValT;
37 using PhiT = typename Traits::PhiT;
38
39 /// BBInfo - Per-basic block information used internally by SSAUpdaterImpl.
40 /// The predecessors of each block are cached here since pred_iterator is
41 /// slow and we need to iterate over the blocks at least a few times.
42 class BBInfo {
43 public:
44 // Back-pointer to the corresponding block.
45 BlkT *BB;
46
47 // Value to use in this block.
48 ValT AvailableVal;
49
50 // Block that defines the available value.
51 BBInfo *DefBB;
52
53 // Postorder number.
54 int BlkNum = 0;
55
56 // Immediate dominator.
57 BBInfo *IDom = nullptr;
58
59 // Number of predecessor blocks.
60 unsigned NumPreds = 0;
61
62 // Array[NumPreds] of predecessor blocks.
63 BBInfo **Preds = nullptr;
64
65 // Marker for existing PHIs that match.
66 PhiT *PHITag = nullptr;
67
68 BBInfo(BlkT *ThisBB, ValT V)
69 : BB(ThisBB), AvailableVal(V), DefBB(V ? this : nullptr) {}
70 };
71
72 using AvailableValsTy = DenseMap<BlkT *, ValT>;
73
74 AvailableValsTy *AvailableVals;
75
76 SmallVectorImpl<PhiT *> *InsertedPHIs;
77
78 using BlockListTy = SmallVectorImpl<BBInfo *>;
79 using BBMapTy = DenseMap<BlkT *, BBInfo *>;
80
81 BBMapTy BBMap;
82 BumpPtrAllocator Allocator;
83
84public:
85 explicit SSAUpdaterImpl(UpdaterT *U, AvailableValsTy *A,
86 SmallVectorImpl<PhiT *> *Ins) :
87 Updater(U), AvailableVals(A), InsertedPHIs(Ins) {}
88
89 /// GetValue - Check to see if AvailableVals has an entry for the specified
90 /// BB and if so, return it. If not, construct SSA form by first
91 /// calculating the required placement of PHIs and then inserting new PHIs
92 /// where needed.
93 ValT GetValue(BlkT *BB) {
94 SmallVector<BBInfo *, 100> BlockList;
95 BBInfo *PseudoEntry = BuildBlockList(BB, &BlockList);
6
Calling 'SSAUpdaterImpl::BuildBlockList'
96
97 // Special case: bail out if BB is unreachable.
98 if (BlockList.size() == 0) {
99 ValT V = Traits::GetUndefVal(BB, Updater);
100 (*AvailableVals)[BB] = V;
101 return V;
102 }
103
104 FindDominators(&BlockList, PseudoEntry);
105 FindPHIPlacement(&BlockList);
106 FindAvailableVals(&BlockList);
107
108 return BBMap[BB]->DefBB->AvailableVal;
109 }
110
111 /// BuildBlockList - Starting from the specified basic block, traverse back
112 /// through its predecessors until reaching blocks with known values.
113 /// Create BBInfo structures for the blocks and append them to the block
114 /// list.
115 BBInfo *BuildBlockList(BlkT *BB, BlockListTy *BlockList) {
116 SmallVector<BBInfo *, 10> RootList;
117 SmallVector<BBInfo *, 64> WorkList;
118
119 BBInfo *Info = new (Allocator) BBInfo(BB, 0);
7
Calling 'operator new<llvm::MallocAllocator, 4096UL, 4096UL, 128UL>'
120 BBMap[BB] = Info;
121 WorkList.push_back(Info);
122
123 // Search backward from BB, creating BBInfos along the way and stopping
124 // when reaching blocks that define the value. Record those defining
125 // blocks on the RootList.
126 SmallVector<BlkT *, 10> Preds;
127 while (!WorkList.empty()) {
128 Info = WorkList.pop_back_val();
129 Preds.clear();
130 Traits::FindPredecessorBlocks(Info->BB, &Preds);
131 Info->NumPreds = Preds.size();
132 if (Info->NumPreds == 0)
133 Info->Preds = nullptr;
134 else
135 Info->Preds = static_cast<BBInfo **>(Allocator.Allocate(
136 Info->NumPreds * sizeof(BBInfo *), alignof(BBInfo *)));
137
138 for (unsigned p = 0; p != Info->NumPreds; ++p) {
139 BlkT *Pred = Preds[p];
140 // Check if BBMap already has a BBInfo for the predecessor block.
141 typename BBMapTy::value_type &BBMapBucket =
142 BBMap.FindAndConstruct(Pred);
143 if (BBMapBucket.second) {
144 Info->Preds[p] = BBMapBucket.second;
145 continue;
146 }
147
148 // Create a new BBInfo for the predecessor.
149 ValT PredVal = AvailableVals->lookup(Pred);
150 BBInfo *PredInfo = new (Allocator) BBInfo(Pred, PredVal);
151 BBMapBucket.second = PredInfo;
152 Info->Preds[p] = PredInfo;
153
154 if (PredInfo->AvailableVal) {
155 RootList.push_back(PredInfo);
156 continue;
157 }
158 WorkList.push_back(PredInfo);
159 }
160 }
161
162 // Now that we know what blocks are backwards-reachable from the starting
163 // block, do a forward depth-first traversal to assign postorder numbers
164 // to those blocks.
165 BBInfo *PseudoEntry = new (Allocator) BBInfo(nullptr, 0);
166 unsigned BlkNum = 1;
167
168 // Initialize the worklist with the roots from the backward traversal.
169 while (!RootList.empty()) {
170 Info = RootList.pop_back_val();
171 Info->IDom = PseudoEntry;
172 Info->BlkNum = -1;
173 WorkList.push_back(Info);
174 }
175
176 while (!WorkList.empty()) {
177 Info = WorkList.back();
178
179 if (Info->BlkNum == -2) {
180 // All the successors have been handled; assign the postorder number.
181 Info->BlkNum = BlkNum++;
182 // If not a root, put it on the BlockList.
183 if (!Info->AvailableVal)
184 BlockList->push_back(Info);
185 WorkList.pop_back();
186 continue;
187 }
188
189 // Leave this entry on the worklist, but set its BlkNum to mark that its
190 // successors have been put on the worklist. When it returns to the top
191 // the list, after handling its successors, it will be assigned a
192 // number.
193 Info->BlkNum = -2;
194
195 // Add unvisited successors to the work list.
196 for (typename Traits::BlkSucc_iterator SI =
197 Traits::BlkSucc_begin(Info->BB),
198 E = Traits::BlkSucc_end(Info->BB); SI != E; ++SI) {
199 BBInfo *SuccInfo = BBMap[*SI];
200 if (!SuccInfo || SuccInfo->BlkNum)
201 continue;
202 SuccInfo->BlkNum = -1;
203 WorkList.push_back(SuccInfo);
204 }
205 }
206 PseudoEntry->BlkNum = BlkNum;
207 return PseudoEntry;
208 }
209
210 /// IntersectDominators - This is the dataflow lattice "meet" operation for
211 /// finding dominators. Given two basic blocks, it walks up the dominator
212 /// tree until it finds a common dominator of both. It uses the postorder
213 /// number of the blocks to determine how to do that.
214 BBInfo *IntersectDominators(BBInfo *Blk1, BBInfo *Blk2) {
215 while (Blk1 != Blk2) {
216 while (Blk1->BlkNum < Blk2->BlkNum) {
217 Blk1 = Blk1->IDom;
218 if (!Blk1)
219 return Blk2;
220 }
221 while (Blk2->BlkNum < Blk1->BlkNum) {
222 Blk2 = Blk2->IDom;
223 if (!Blk2)
224 return Blk1;
225 }
226 }
227 return Blk1;
228 }
229
230 /// FindDominators - Calculate the dominator tree for the subset of the CFG
231 /// corresponding to the basic blocks on the BlockList. This uses the
232 /// algorithm from: "A Simple, Fast Dominance Algorithm" by Cooper, Harvey
233 /// and Kennedy, published in Software--Practice and Experience, 2001,
234 /// 4:1-10. Because the CFG subset does not include any edges leading into
235 /// blocks that define the value, the results are not the usual dominator
236 /// tree. The CFG subset has a single pseudo-entry node with edges to a set
237 /// of root nodes for blocks that define the value. The dominators for this
238 /// subset CFG are not the standard dominators but they are adequate for
239 /// placing PHIs within the subset CFG.
240 void FindDominators(BlockListTy *BlockList, BBInfo *PseudoEntry) {
241 bool Changed;
242 do {
243 Changed = false;
244 // Iterate over the list in reverse order, i.e., forward on CFG edges.
245 for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
246 E = BlockList->rend(); I != E; ++I) {
247 BBInfo *Info = *I;
248 BBInfo *NewIDom = nullptr;
249
250 // Iterate through the block's predecessors.
251 for (unsigned p = 0; p != Info->NumPreds; ++p) {
252 BBInfo *Pred = Info->Preds[p];
253
254 // Treat an unreachable predecessor as a definition with 'undef'.
255 if (Pred->BlkNum == 0) {
256 Pred->AvailableVal = Traits::GetUndefVal(Pred->BB, Updater);
257 (*AvailableVals)[Pred->BB] = Pred->AvailableVal;
258 Pred->DefBB = Pred;
259 Pred->BlkNum = PseudoEntry->BlkNum;
260 PseudoEntry->BlkNum++;
261 }
262
263 if (!NewIDom)
264 NewIDom = Pred;
265 else
266 NewIDom = IntersectDominators(NewIDom, Pred);
267 }
268
269 // Check if the IDom value has changed.
270 if (NewIDom && NewIDom != Info->IDom) {
271 Info->IDom = NewIDom;
272 Changed = true;
273 }
274 }
275 } while (Changed);
276 }
277
278 /// IsDefInDomFrontier - Search up the dominator tree from Pred to IDom for
279 /// any blocks containing definitions of the value. If one is found, then
280 /// the successor of Pred is in the dominance frontier for the definition,
281 /// and this function returns true.
282 bool IsDefInDomFrontier(const BBInfo *Pred, const BBInfo *IDom) {
283 for (; Pred != IDom; Pred = Pred->IDom) {
284 if (Pred->DefBB == Pred)
285 return true;
286 }
287 return false;
288 }
289
290 /// FindPHIPlacement - PHIs are needed in the iterated dominance frontiers
291 /// of the known definitions. Iteratively add PHIs in the dom frontiers
292 /// until nothing changes. Along the way, keep track of the nearest
293 /// dominating definitions for non-PHI blocks.
294 void FindPHIPlacement(BlockListTy *BlockList) {
295 bool Changed;
296 do {
297 Changed = false;
298 // Iterate over the list in reverse order, i.e., forward on CFG edges.
299 for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
300 E = BlockList->rend(); I != E; ++I) {
301 BBInfo *Info = *I;
302
303 // If this block already needs a PHI, there is nothing to do here.
304 if (Info->DefBB == Info)
305 continue;
306
307 // Default to use the same def as the immediate dominator.
308 BBInfo *NewDefBB = Info->IDom->DefBB;
309 for (unsigned p = 0; p != Info->NumPreds; ++p) {
310 if (IsDefInDomFrontier(Info->Preds[p], Info->IDom)) {
311 // Need a PHI here.
312 NewDefBB = Info;
313 break;
314 }
315 }
316
317 // Check if anything changed.
318 if (NewDefBB != Info->DefBB) {
319 Info->DefBB = NewDefBB;
320 Changed = true;
321 }
322 }
323 } while (Changed);
324 }
325
326 /// FindAvailableVal - If this block requires a PHI, first check if an
327 /// existing PHI matches the PHI placement and reaching definitions computed
328 /// earlier, and if not, create a new PHI. Visit all the block's
329 /// predecessors to calculate the available value for each one and fill in
330 /// the incoming values for a new PHI.
331 void FindAvailableVals(BlockListTy *BlockList) {
332 // Go through the worklist in forward order (i.e., backward through the CFG)
333 // and check if existing PHIs can be used. If not, create empty PHIs where
334 // they are needed.
335 for (typename BlockListTy::iterator I = BlockList->begin(),
336 E = BlockList->end(); I != E; ++I) {
337 BBInfo *Info = *I;
338 // Check if there needs to be a PHI in BB.
339 if (Info->DefBB != Info)
340 continue;
341
342 // Look for an existing PHI.
343 FindExistingPHI(Info->BB, BlockList);
344 if (Info->AvailableVal)
345 continue;
346
347 ValT PHI = Traits::CreateEmptyPHI(Info->BB, Info->NumPreds, Updater);
348 Info->AvailableVal = PHI;
349 (*AvailableVals)[Info->BB] = PHI;
350 }
351
352 // Now go back through the worklist in reverse order to fill in the
353 // arguments for any new PHIs added in the forward traversal.
354 for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
355 E = BlockList->rend(); I != E; ++I) {
356 BBInfo *Info = *I;
357
358 if (Info->DefBB != Info) {
359 // Record the available value to speed up subsequent uses of this
360 // SSAUpdater for the same value.
361 (*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal;
362 continue;
363 }
364
365 // Check if this block contains a newly added PHI.
366 PhiT *PHI = Traits::ValueIsNewPHI(Info->AvailableVal, Updater);
367 if (!PHI)
368 continue;
369
370 // Iterate through the block's predecessors.
371 for (unsigned p = 0; p != Info->NumPreds; ++p) {
372 BBInfo *PredInfo = Info->Preds[p];
373 BlkT *Pred = PredInfo->BB;
374 // Skip to the nearest preceding definition.
375 if (PredInfo->DefBB != PredInfo)
376 PredInfo = PredInfo->DefBB;
377 Traits::AddPHIOperand(PHI, PredInfo->AvailableVal, Pred);
378 }
379
380 LLVM_DEBUG(dbgs() << " Inserted PHI: " << *PHI << "\n")do { } while (false);
381
382 // If the client wants to know about all new instructions, tell it.
383 if (InsertedPHIs) InsertedPHIs->push_back(PHI);
384 }
385 }
386
387 /// FindExistingPHI - Look through the PHI nodes in a block to see if any of
388 /// them match what is needed.
389 void FindExistingPHI(BlkT *BB, BlockListTy *BlockList) {
390 for (auto &SomePHI : BB->phis()) {
391 if (CheckIfPHIMatches(&SomePHI)) {
392 RecordMatchingPHIs(BlockList);
393 break;
394 }
395 // Match failed: clear all the PHITag values.
396 for (typename BlockListTy::iterator I = BlockList->begin(),
397 E = BlockList->end(); I != E; ++I)
398 (*I)->PHITag = nullptr;
399 }
400 }
401
402 /// CheckIfPHIMatches - Check if a PHI node matches the placement and values
403 /// in the BBMap.
404 bool CheckIfPHIMatches(PhiT *PHI) {
405 SmallVector<PhiT *, 20> WorkList;
406 WorkList.push_back(PHI);
407
408 // Mark that the block containing this PHI has been visited.
409 BBMap[PHI->getParent()]->PHITag = PHI;
410
411 while (!WorkList.empty()) {
412 PHI = WorkList.pop_back_val();
413
414 // Iterate through the PHI's incoming values.
415 for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI),
416 E = Traits::PHI_end(PHI); I != E; ++I) {
417 ValT IncomingVal = I.getIncomingValue();
418 BBInfo *PredInfo = BBMap[I.getIncomingBlock()];
419 // Skip to the nearest preceding definition.
420 if (PredInfo->DefBB != PredInfo)
421 PredInfo = PredInfo->DefBB;
422
423 // Check if it matches the expected value.
424 if (PredInfo->AvailableVal) {
425 if (IncomingVal == PredInfo->AvailableVal)
426 continue;
427 return false;
428 }
429
430 // Check if the value is a PHI in the correct block.
431 PhiT *IncomingPHIVal = Traits::ValueIsPHI(IncomingVal, Updater);
432 if (!IncomingPHIVal || IncomingPHIVal->getParent() != PredInfo->BB)
433 return false;
434
435 // If this block has already been visited, check if this PHI matches.
436 if (PredInfo->PHITag) {
437 if (IncomingPHIVal == PredInfo->PHITag)
438 continue;
439 return false;
440 }
441 PredInfo->PHITag = IncomingPHIVal;
442
443 WorkList.push_back(IncomingPHIVal);
444 }
445 }
446 return true;
447 }
448
449 /// RecordMatchingPHIs - For each PHI node that matches, record it in both
450 /// the BBMap and the AvailableVals mapping.
451 void RecordMatchingPHIs(BlockListTy *BlockList) {
452 for (typename BlockListTy::iterator I = BlockList->begin(),
453 E = BlockList->end(); I != E; ++I)
454 if (PhiT *PHI = (*I)->PHITag) {
455 BlkT *BB = PHI->getParent();
456 ValT PHIVal = Traits::GetPHIValue(PHI);
457 (*AvailableVals)[BB] = PHIVal;
458 BBMap[BB]->AvailableVal = PHIVal;
459 }
460 }
461};
462
463} // end namespace llvm
464
465#undef DEBUG_TYPE"machine-ssaupdater" // "ssaupdater"
466
467#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Allocator.h

1//===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms
11/// to the LLVM "Allocator" concept and is similar to MallocAllocator, but
12/// objects cannot be deallocated. Their lifetime is tied to the lifetime of the
13/// allocator.
14///
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_SUPPORT_ALLOCATOR_H
18#define LLVM_SUPPORT_ALLOCATOR_H
19
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/Support/Alignment.h"
23#include "llvm/Support/AllocatorBase.h"
24#include "llvm/Support/Compiler.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/MemAlloc.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstdint>
32#include <cstdlib>
33#include <iterator>
34#include <type_traits>
35#include <utility>
36
37namespace llvm {
38
39namespace detail {
40
41// We call out to an external function to actually print the message as the
42// printing code uses Allocator.h in its implementation.
43void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
44 size_t TotalMemory);
45
46} // end namespace detail
47
48/// Allocate memory in an ever growing pool, as if by bump-pointer.
49///
50/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
51/// memory rather than relying on a boundless contiguous heap. However, it has
52/// bump-pointer semantics in that it is a monotonically growing pool of memory
53/// where every allocation is found by merely allocating the next N bytes in
54/// the slab, or the next N bytes in the next slab.
55///
56/// Note that this also has a threshold for forcing allocations above a certain
57/// size into their own slab.
58///
59/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
60/// object, which wraps malloc, to allocate memory, but it can be changed to
61/// use a custom allocator.
62///
63/// The GrowthDelay specifies after how many allocated slabs the allocator
64/// increases the size of the slabs.
65template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
66 size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128>
67class BumpPtrAllocatorImpl
68 : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
69 SizeThreshold, GrowthDelay>>,
70 private AllocatorT {
71public:
72 static_assert(SizeThreshold <= SlabSize,
73 "The SizeThreshold must be at most the SlabSize to ensure "
74 "that objects larger than a slab go into their own memory "
75 "allocation.");
76 static_assert(GrowthDelay > 0,
77 "GrowthDelay must be at least 1 which already increases the"
78 "slab size after each allocated slab.");
79
80 BumpPtrAllocatorImpl() = default;
81
82 template <typename T>
83 BumpPtrAllocatorImpl(T &&Allocator)
84 : AllocatorT(std::forward<T &&>(Allocator)) {}
85
86 // Manually implement a move constructor as we must clear the old allocator's
87 // slabs as a matter of correctness.
88 BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
89 : AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr),
90 End(Old.End), Slabs(std::move(Old.Slabs)),
91 CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
92 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
93 Old.CurPtr = Old.End = nullptr;
94 Old.BytesAllocated = 0;
95 Old.Slabs.clear();
96 Old.CustomSizedSlabs.clear();
97 }
98
99 ~BumpPtrAllocatorImpl() {
100 DeallocateSlabs(Slabs.begin(), Slabs.end());
101 DeallocateCustomSizedSlabs();
102 }
103
104 BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
105 DeallocateSlabs(Slabs.begin(), Slabs.end());
106 DeallocateCustomSizedSlabs();
107
108 CurPtr = RHS.CurPtr;
109 End = RHS.End;
110 BytesAllocated = RHS.BytesAllocated;
111 RedZoneSize = RHS.RedZoneSize;
112 Slabs = std::move(RHS.Slabs);
113 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
114 AllocatorT::operator=(static_cast<AllocatorT &&>(RHS));
115
116 RHS.CurPtr = RHS.End = nullptr;
117 RHS.BytesAllocated = 0;
118 RHS.Slabs.clear();
119 RHS.CustomSizedSlabs.clear();
120 return *this;
121 }
122
123 /// Deallocate all but the current slab and reset the current pointer
124 /// to the beginning of it, freeing all memory allocated so far.
125 void Reset() {
126 // Deallocate all but the first slab, and deallocate all custom-sized slabs.
127 DeallocateCustomSizedSlabs();
128 CustomSizedSlabs.clear();
129
130 if (Slabs.empty())
131 return;
132
133 // Reset the state.
134 BytesAllocated = 0;
135 CurPtr = (char *)Slabs.front();
136 End = CurPtr + SlabSize;
137
138 __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
139 DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
140 Slabs.erase(std::next(Slabs.begin()), Slabs.end());
141 }
142
143 /// Allocate space at the specified alignment.
144 LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void *
145 Allocate(size_t Size, Align Alignment) {
146 // Keep track of how many bytes we've allocated.
147 BytesAllocated += Size;
148
149 size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment);
10
Calling 'offsetToAlignedAddr'
150 assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow")((void)0);
151
152 size_t SizeToAllocate = Size;
153#if LLVM_ADDRESS_SANITIZER_BUILD0
154 // Add trailing bytes as a "red zone" under ASan.
155 SizeToAllocate += RedZoneSize;
156#endif
157
158 // Check if we have enough space.
159 if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
160 char *AlignedPtr = CurPtr + Adjustment;
161 CurPtr = AlignedPtr + SizeToAllocate;
162 // Update the allocation point of this memory block in MemorySanitizer.
163 // Without this, MemorySanitizer messages for values originated from here
164 // will point to the allocation of the entire slab.
165 __msan_allocated_memory(AlignedPtr, Size);
166 // Similarly, tell ASan about this space.
167 __asan_unpoison_memory_region(AlignedPtr, Size);
168 return AlignedPtr;
169 }
170
171 // If Size is really big, allocate a separate slab for it.
172 size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
173 if (PaddedSize > SizeThreshold) {
174 void *NewSlab =
175 AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t));
176 // We own the new slab and don't want anyone reading anyting other than
177 // pieces returned from this method. So poison the whole slab.
178 __asan_poison_memory_region(NewSlab, PaddedSize);
179 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
180
181 uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
182 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize)((void)0);
183 char *AlignedPtr = (char*)AlignedAddr;
184 __msan_allocated_memory(AlignedPtr, Size);
185 __asan_unpoison_memory_region(AlignedPtr, Size);
186 return AlignedPtr;
187 }
188
189 // Otherwise, start a new slab and try again.
190 StartNewSlab();
191 uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
192 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&((void)0)
193 "Unable to allocate memory!")((void)0);
194 char *AlignedPtr = (char*)AlignedAddr;
195 CurPtr = AlignedPtr + SizeToAllocate;
196 __msan_allocated_memory(AlignedPtr, Size);
197 __asan_unpoison_memory_region(AlignedPtr, Size);
198 return AlignedPtr;
199 }
200
201 inline LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void *
202 Allocate(size_t Size, size_t Alignment) {
203 assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.")((void)0);
204 return Allocate(Size, Align(Alignment));
9
Calling 'BumpPtrAllocatorImpl::Allocate'
205 }
206
207 // Pull in base class overloads.
208 using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
209
210 // Bump pointer allocators are expected to never free their storage; and
211 // clients expect pointers to remain valid for non-dereferencing uses even
212 // after deallocation.
213 void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) {
214 __asan_poison_memory_region(Ptr, Size);
215 }
216
217 // Pull in base class overloads.
218 using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
219
220 size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
221
222 /// \return An index uniquely and reproducibly identifying
223 /// an input pointer \p Ptr in the given allocator.
224 /// The returned value is negative iff the object is inside a custom-size
225 /// slab.
226 /// Returns an empty optional if the pointer is not found in the allocator.
227 llvm::Optional<int64_t> identifyObject(const void *Ptr) {
228 const char *P = static_cast<const char *>(Ptr);
229 int64_t InSlabIdx = 0;
230 for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) {
231 const char *S = static_cast<const char *>(Slabs[Idx]);
232 if (P >= S && P < S + computeSlabSize(Idx))
233 return InSlabIdx + static_cast<int64_t>(P - S);
234 InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx));
235 }
236
237 // Use negative index to denote custom sized slabs.
238 int64_t InCustomSizedSlabIdx = -1;
239 for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) {
240 const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first);
241 size_t Size = CustomSizedSlabs[Idx].second;
242 if (P >= S && P < S + Size)
243 return InCustomSizedSlabIdx - static_cast<int64_t>(P - S);
244 InCustomSizedSlabIdx -= static_cast<int64_t>(Size);
245 }
246 return None;
247 }
248
249 /// A wrapper around identifyObject that additionally asserts that
250 /// the object is indeed within the allocator.
251 /// \return An index uniquely and reproducibly identifying
252 /// an input pointer \p Ptr in the given allocator.
253 int64_t identifyKnownObject(const void *Ptr) {
254 Optional<int64_t> Out = identifyObject(Ptr);
255 assert(Out && "Wrong allocator used")((void)0);
256 return *Out;
257 }
258
259 /// A wrapper around identifyKnownObject. Accepts type information
260 /// about the object and produces a smaller identifier by relying on
261 /// the alignment information. Note that sub-classes may have different
262 /// alignment, so the most base class should be passed as template parameter
263 /// in order to obtain correct results. For that reason automatic template
264 /// parameter deduction is disabled.
265 /// \return An index uniquely and reproducibly identifying
266 /// an input pointer \p Ptr in the given allocator. This identifier is
267 /// different from the ones produced by identifyObject and
268 /// identifyAlignedObject.
269 template <typename T>
270 int64_t identifyKnownAlignedObject(const void *Ptr) {
271 int64_t Out = identifyKnownObject(Ptr);
272 assert(Out % alignof(T) == 0 && "Wrong alignment information")((void)0);
273 return Out / alignof(T);
274 }
275
276 size_t getTotalMemory() const {
277 size_t TotalMemory = 0;
278 for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
279 TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
280 for (auto &PtrAndSize : CustomSizedSlabs)
281 TotalMemory += PtrAndSize.second;
282 return TotalMemory;
283 }
284
285 size_t getBytesAllocated() const { return BytesAllocated; }
286
287 void setRedZoneSize(size_t NewSize) {
288 RedZoneSize = NewSize;
289 }
290
291 void PrintStats() const {
292 detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
293 getTotalMemory());
294 }
295
296private:
297 /// The current pointer into the current slab.
298 ///
299 /// This points to the next free byte in the slab.
300 char *CurPtr = nullptr;
301
302 /// The end of the current slab.
303 char *End = nullptr;
304
305 /// The slabs allocated so far.
306 SmallVector<void *, 4> Slabs;
307
308 /// Custom-sized slabs allocated for too-large allocation requests.
309 SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
310
311 /// How many bytes we've allocated.
312 ///
313 /// Used so that we can compute how much space was wasted.
314 size_t BytesAllocated = 0;
315
316 /// The number of bytes to put between allocations when running under
317 /// a sanitizer.
318 size_t RedZoneSize = 1;
319
320 static size_t computeSlabSize(unsigned SlabIdx) {
321 // Scale the actual allocated slab size based on the number of slabs
322 // allocated. Every GrowthDelay slabs allocated, we double
323 // the allocated size to reduce allocation frequency, but saturate at
324 // multiplying the slab size by 2^30.
325 return SlabSize *
326 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
327 }
328
329 /// Allocate a new slab and move the bump pointers over into the new
330 /// slab, modifying CurPtr and End.
331 void StartNewSlab() {
332 size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
333
334 void *NewSlab =
335 AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t));
336 // We own the new slab and don't want anyone reading anything other than
337 // pieces returned from this method. So poison the whole slab.
338 __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
339
340 Slabs.push_back(NewSlab);
341 CurPtr = (char *)(NewSlab);
342 End = ((char *)NewSlab) + AllocatedSlabSize;
343 }
344
345 /// Deallocate a sequence of slabs.
346 void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
347 SmallVectorImpl<void *>::iterator E) {
348 for (; I != E; ++I) {
349 size_t AllocatedSlabSize =
350 computeSlabSize(std::distance(Slabs.begin(), I));
351 AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
352 }
353 }
354
355 /// Deallocate all memory for custom sized slabs.
356 void DeallocateCustomSizedSlabs() {
357 for (auto &PtrAndSize : CustomSizedSlabs) {
358 void *Ptr = PtrAndSize.first;
359 size_t Size = PtrAndSize.second;
360 AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t));
361 }
362 }
363
364 template <typename T> friend class SpecificBumpPtrAllocator;
365};
366
367/// The standard BumpPtrAllocator which just uses the default template
368/// parameters.
369typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
370
371/// A BumpPtrAllocator that allows only elements of a specific type to be
372/// allocated.
373///
374/// This allows calling the destructor in DestroyAll() and when the allocator is
375/// destroyed.
376template <typename T> class SpecificBumpPtrAllocator {
377 BumpPtrAllocator Allocator;
378
379public:
380 SpecificBumpPtrAllocator() {
381 // Because SpecificBumpPtrAllocator walks the memory to call destructors,
382 // it can't have red zones between allocations.
383 Allocator.setRedZoneSize(0);
384 }
385 SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
386 : Allocator(std::move(Old.Allocator)) {}
387 ~SpecificBumpPtrAllocator() { DestroyAll(); }
388
389 SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
390 Allocator = std::move(RHS.Allocator);
391 return *this;
392 }
393
394 /// Call the destructor of each allocated object and deallocate all but the
395 /// current slab and reset the current pointer to the beginning of it, freeing
396 /// all memory allocated so far.
397 void DestroyAll() {
398 auto DestroyElements = [](char *Begin, char *End) {
399 assert(Begin == (char *)alignAddr(Begin, Align::Of<T>()))((void)0);
400 for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
401 reinterpret_cast<T *>(Ptr)->~T();
402 };
403
404 for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
405 ++I) {
406 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
407 std::distance(Allocator.Slabs.begin(), I));
408 char *Begin = (char *)alignAddr(*I, Align::Of<T>());
409 char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
410 : (char *)*I + AllocatedSlabSize;
411
412 DestroyElements(Begin, End);
413 }
414
415 for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
416 void *Ptr = PtrAndSize.first;
417 size_t Size = PtrAndSize.second;
418 DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()),
419 (char *)Ptr + Size);
420 }
421
422 Allocator.Reset();
423 }
424
425 /// Allocate space for an array of objects without constructing them.
426 T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
427};
428
429} // end namespace llvm
430
431template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
432 size_t GrowthDelay>
433void *
434operator new(size_t Size,
435 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold,
436 GrowthDelay> &Allocator) {
437 return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
8
Calling 'BumpPtrAllocatorImpl::Allocate'
438 alignof(std::max_align_t)));
439}
440
441template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
442 size_t GrowthDelay>
443void operator delete(void *,
444 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
445 SizeThreshold, GrowthDelay> &) {
446}
447
448#endif // LLVM_SUPPORT_ALLOCATOR_H

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h

1//===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains types to represent alignments.
10// They are instrumented to guarantee some invariants are preserved and prevent
11// invalid manipulations.
12//
13// - Align represents an alignment in bytes, it is always set and always a valid
14// power of two, its minimum value is 1 which means no alignment requirements.
15//
16// - MaybeAlign is an optional type, it may be undefined or set. When it's set
17// you can get the underlying Align type by using the getValue() method.
18//
19//===----------------------------------------------------------------------===//
20
21#ifndef LLVM_SUPPORT_ALIGNMENT_H_
22#define LLVM_SUPPORT_ALIGNMENT_H_
23
24#include "llvm/ADT/Optional.h"
25#include "llvm/Support/MathExtras.h"
26#include <cassert>
27#ifndef NDEBUG1
28#include <string>
29#endif // NDEBUG
30
31namespace llvm {
32
33#define ALIGN_CHECK_ISPOSITIVE(decl) \
34 assert(decl > 0 && (#decl " should be defined"))((void)0)
35
36/// This struct is a compact representation of a valid (non-zero power of two)
37/// alignment.
38/// It is suitable for use as static global constants.
39struct Align {
40private:
41 uint8_t ShiftValue = 0; /// The log2 of the required alignment.
42 /// ShiftValue is less than 64 by construction.
43
44 friend struct MaybeAlign;
45 friend unsigned Log2(Align);
46 friend bool operator==(Align Lhs, Align Rhs);
47 friend bool operator!=(Align Lhs, Align Rhs);
48 friend bool operator<=(Align Lhs, Align Rhs);
49 friend bool operator>=(Align Lhs, Align Rhs);
50 friend bool operator<(Align Lhs, Align Rhs);
51 friend bool operator>(Align Lhs, Align Rhs);
52 friend unsigned encode(struct MaybeAlign A);
53 friend struct MaybeAlign decodeMaybeAlign(unsigned Value);
54
55 /// A trivial type to allow construction of constexpr Align.
56 /// This is currently needed to workaround a bug in GCC 5.3 which prevents
57 /// definition of constexpr assign operators.
58 /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic
59 /// FIXME: Remove this, make all assign operators constexpr and introduce user
60 /// defined literals when we don't have to support GCC 5.3 anymore.
61 /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain
62 struct LogValue {
63 uint8_t Log;
64 };
65
66public:
67 /// Default is byte-aligned.
68 constexpr Align() = default;
69 /// Do not perform checks in case of copy/move construct/assign, because the
70 /// checks have been performed when building `Other`.
71 constexpr Align(const Align &Other) = default;
72 constexpr Align(Align &&Other) = default;
73 Align &operator=(const Align &Other) = default;
74 Align &operator=(Align &&Other) = default;
75
76 explicit Align(uint64_t Value) {
77 assert(Value > 0 && "Value must not be 0")((void)0);
78 assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2")((void)0);
79 ShiftValue = Log2_64(Value);
80 assert(ShiftValue < 64 && "Broken invariant")((void)0);
81 }
82
83 /// This is a hole in the type system and should not be abused.
84 /// Needed to interact with C for instance.
85 uint64_t value() const { return uint64_t(1) << ShiftValue; }
15
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'
86
87 /// Allow constructions of constexpr Align.
88 template <size_t kValue> constexpr static LogValue Constant() {
89 return LogValue{static_cast<uint8_t>(CTLog2<kValue>())};
90 }
91
92 /// Allow constructions of constexpr Align from types.
93 /// Compile time equivalent to Align(alignof(T)).
94 template <typename T> constexpr static LogValue Of() {
95 return Constant<std::alignment_of<T>::value>();
96 }
97
98 /// Constexpr constructor from LogValue type.
99 constexpr Align(LogValue CA) : ShiftValue(CA.Log) {}
100};
101
102/// Treats the value 0 as a 1, so Align is always at least 1.
103inline Align assumeAligned(uint64_t Value) {
104 return Value ? Align(Value) : Align();
105}
106
107/// This struct is a compact representation of a valid (power of two) or
108/// undefined (0) alignment.
109struct MaybeAlign : public llvm::Optional<Align> {
110private:
111 using UP = llvm::Optional<Align>;
112
113public:
114 /// Default is undefined.
115 MaybeAlign() = default;
116 /// Do not perform checks in case of copy/move construct/assign, because the
117 /// checks have been performed when building `Other`.
118 MaybeAlign(const MaybeAlign &Other) = default;
119 MaybeAlign &operator=(const MaybeAlign &Other) = default;
120 MaybeAlign(MaybeAlign &&Other) = default;
121 MaybeAlign &operator=(MaybeAlign &&Other) = default;
122
123 /// Use llvm::Optional<Align> constructor.
124 using UP::UP;
125
126 explicit MaybeAlign(uint64_t Value) {
127 assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&((void)0)
128 "Alignment is neither 0 nor a power of 2")((void)0);
129 if (Value)
130 emplace(Value);
131 }
132
133 /// For convenience, returns a valid alignment or 1 if undefined.
134 Align valueOrOne() const { return hasValue() ? getValue() : Align(); }
135};
136
137/// Checks that SizeInBytes is a multiple of the alignment.
138inline bool isAligned(Align Lhs, uint64_t SizeInBytes) {
139 return SizeInBytes % Lhs.value() == 0;
140}
141
142/// Checks that Addr is a multiple of the alignment.
143inline bool isAddrAligned(Align Lhs, const void *Addr) {
144 return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr));
145}
146
147/// Returns a multiple of A needed to store `Size` bytes.
148inline uint64_t alignTo(uint64_t Size, Align A) {
149 const uint64_t Value = A.value();
14
Calling 'Align::value'
150 // The following line is equivalent to `(Size + Value - 1) / Value * Value`.
151
152 // The division followed by a multiplication can be thought of as a right
153 // shift followed by a left shift which zeros out the extra bits produced in
154 // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out
155 // are just zero.
156
157 // Most compilers can generate this code but the pattern may be missed when
158 // multiple functions gets inlined.
159 return (Size + Value - 1) & ~(Value - 1U);
160}
161
162/// If non-zero \p Skew is specified, the return value will be a minimal integer
163/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
164/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
165/// Skew mod \p A'.
166///
167/// Examples:
168/// \code
169/// alignTo(5, Align(8), 7) = 7
170/// alignTo(17, Align(8), 1) = 17
171/// alignTo(~0LL, Align(8), 3) = 3
172/// \endcode
173inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) {
174 const uint64_t Value = A.value();
175 Skew %= Value;
176 return ((Size + Value - 1 - Skew) & ~(Value - 1U)) + Skew;
177}
178
179/// Returns a multiple of A needed to store `Size` bytes.
180/// Returns `Size` if current alignment is undefined.
181inline uint64_t alignTo(uint64_t Size, MaybeAlign A) {
182 return A ? alignTo(Size, A.getValue()) : Size;
183}
184
185/// Aligns `Addr` to `Alignment` bytes, rounding up.
186inline uintptr_t alignAddr(const void *Addr, Align Alignment) {
187 uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr);
188 assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=((void)0)
189 ArithAddr &&((void)0)
190 "Overflow")((void)0);
191 return alignTo(ArithAddr, Alignment);
192}
193
194/// Returns the offset to the next integer (mod 2**64) that is greater than
195/// or equal to \p Value and is a multiple of \p Align.
196inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
197 return alignTo(Value, Alignment) - Value;
12
The value 255 is assigned to 'A.ShiftValue'
13
Calling 'alignTo'
198}
199
200/// Returns the necessary adjustment for aligning `Addr` to `Alignment`
201/// bytes, rounding up.
202inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) {
203 return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment);
11
Calling 'offsetToAlignment'
204}
205
206/// Returns the log2 of the alignment.
207inline unsigned Log2(Align A) { return A.ShiftValue; }
208
209/// Returns the alignment that satisfies both alignments.
210/// Same semantic as MinAlign.
211inline Align commonAlignment(Align A, Align B) { return std::min(A, B); }
212
213/// Returns the alignment that satisfies both alignments.
214/// Same semantic as MinAlign.
215inline Align commonAlignment(Align A, uint64_t Offset) {
216 return Align(MinAlign(A.value(), Offset));
217}
218
219/// Returns the alignment that satisfies both alignments.
220/// Same semantic as MinAlign.
221inline MaybeAlign commonAlignment(MaybeAlign A, MaybeAlign B) {
222 return A && B ? commonAlignment(*A, *B) : A ? A : B;
223}
224
225/// Returns the alignment that satisfies both alignments.
226/// Same semantic as MinAlign.
227inline MaybeAlign commonAlignment(MaybeAlign A, uint64_t Offset) {
228 return MaybeAlign(MinAlign((*A).value(), Offset));
229}
230
231/// Returns a representation of the alignment that encodes undefined as 0.
232inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; }
233
234/// Dual operation of the encode function above.
235inline MaybeAlign decodeMaybeAlign(unsigned Value) {
236 if (Value == 0)
237 return MaybeAlign();
238 Align Out;
239 Out.ShiftValue = Value - 1;
240 return Out;
241}
242
243/// Returns a representation of the alignment, the encoded value is positive by
244/// definition.
245inline unsigned encode(Align A) { return encode(MaybeAlign(A)); }
246
247/// Comparisons between Align and scalars. Rhs must be positive.
248inline bool operator==(Align Lhs, uint64_t Rhs) {
249 ALIGN_CHECK_ISPOSITIVE(Rhs);
250 return Lhs.value() == Rhs;
251}
252inline bool operator!=(Align Lhs, uint64_t Rhs) {
253 ALIGN_CHECK_ISPOSITIVE(Rhs);
254 return Lhs.value() != Rhs;
255}
256inline bool operator<=(Align Lhs, uint64_t Rhs) {
257 ALIGN_CHECK_ISPOSITIVE(Rhs);
258 return Lhs.value() <= Rhs;
259}
260inline bool operator>=(Align Lhs, uint64_t Rhs) {
261 ALIGN_CHECK_ISPOSITIVE(Rhs);
262 return Lhs.value() >= Rhs;
263}
264inline bool operator<(Align Lhs, uint64_t Rhs) {
265 ALIGN_CHECK_ISPOSITIVE(Rhs);
266 return Lhs.value() < Rhs;
267}
268inline bool operator>(Align Lhs, uint64_t Rhs) {
269 ALIGN_CHECK_ISPOSITIVE(Rhs);
270 return Lhs.value() > Rhs;
271}
272
273/// Comparisons between MaybeAlign and scalars.
274inline bool operator==(MaybeAlign Lhs, uint64_t Rhs) {
275 return Lhs ? (*Lhs).value() == Rhs : Rhs == 0;
276}
277inline bool operator!=(MaybeAlign Lhs, uint64_t Rhs) {
278 return Lhs ? (*Lhs).value() != Rhs : Rhs != 0;
279}
280
281/// Comparisons operators between Align.
282inline bool operator==(Align Lhs, Align Rhs) {
283 return Lhs.ShiftValue == Rhs.ShiftValue;
284}
285inline bool operator!=(Align Lhs, Align Rhs) {
286 return Lhs.ShiftValue != Rhs.ShiftValue;
287}
288inline bool operator<=(Align Lhs, Align Rhs) {
289 return Lhs.ShiftValue <= Rhs.ShiftValue;
290}
291inline bool operator>=(Align Lhs, Align Rhs) {
292 return Lhs.ShiftValue >= Rhs.ShiftValue;
293}
294inline bool operator<(Align Lhs, Align Rhs) {
295 return Lhs.ShiftValue < Rhs.ShiftValue;
296}
297inline bool operator>(Align Lhs, Align Rhs) {
298 return Lhs.ShiftValue > Rhs.ShiftValue;
299}
300
301// Don't allow relational comparisons with MaybeAlign.
302bool operator<=(Align Lhs, MaybeAlign Rhs) = delete;
303bool operator>=(Align Lhs, MaybeAlign Rhs) = delete;
304bool operator<(Align Lhs, MaybeAlign Rhs) = delete;
305bool operator>(Align Lhs, MaybeAlign Rhs) = delete;
306
307bool operator<=(MaybeAlign Lhs, Align Rhs) = delete;
308bool operator>=(MaybeAlign Lhs, Align Rhs) = delete;
309bool operator<(MaybeAlign Lhs, Align Rhs) = delete;
310bool operator>(MaybeAlign Lhs, Align Rhs) = delete;
311
312bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
313bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
314bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
315bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
316
317inline Align operator*(Align Lhs, uint64_t Rhs) {
318 assert(Rhs > 0 && "Rhs must be positive")((void)0);
319 return Align(Lhs.value() * Rhs);
320}
321
322inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) {
323 assert(Rhs > 0 && "Rhs must be positive")((void)0);
324 return Lhs ? Lhs.getValue() * Rhs : MaybeAlign();
325}
326
327inline Align operator/(Align Lhs, uint64_t Divisor) {
328 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
329 "Divisor must be positive and a power of 2")((void)0);
330 assert(Lhs != 1 && "Can't halve byte alignment")((void)0);
331 return Align(Lhs.value() / Divisor);
332}
333
334inline MaybeAlign operator/(MaybeAlign Lhs, uint64_t Divisor) {
335 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
336 "Divisor must be positive and a power of 2")((void)0);
337 return Lhs ? Lhs.getValue() / Divisor : MaybeAlign();
338}
339
340inline Align max(MaybeAlign Lhs, Align Rhs) {
341 return Lhs && *Lhs > Rhs ? *Lhs : Rhs;
342}
343
344inline Align max(Align Lhs, MaybeAlign Rhs) {
345 return Rhs && *Rhs > Lhs ? *Rhs : Lhs;
346}
347
348#ifndef NDEBUG1
349// For usage in LLVM_DEBUG macros.
350inline std::string DebugStr(const Align &A) {
351 return std::to_string(A.value());
352}
353// For usage in LLVM_DEBUG macros.
354inline std::string DebugStr(const MaybeAlign &MA) {
355 if (MA)
356 return std::to_string(MA->value());
357 return "None";
358}
359#endif // NDEBUG
360
361#undef ALIGN_CHECK_ISPOSITIVE
362
363} // namespace llvm
364
365#endif // LLVM_SUPPORT_ALIGNMENT_H_