Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h
Warning:line 85, column 47
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name RegisterScavenging.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -D PIC -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -D_RET_PROTECTOR -ret-protector -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/RegisterScavenging.cpp

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/RegisterScavenging.cpp

1//===- RegisterScavenging.cpp - Machine register scavenging ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file implements the machine register scavenger. It can provide
11/// information, such as unused registers, at any point in a machine basic
12/// block. It also provides a mechanism to make registers available by evicting
13/// them to spill slots.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/CodeGen/RegisterScavenging.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/CodeGen/LiveRegUnits.h"
23#include "llvm/CodeGen/MachineBasicBlock.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineFunction.h"
26#include "llvm/CodeGen/MachineFunctionPass.h"
27#include "llvm/CodeGen/MachineInstr.h"
28#include "llvm/CodeGen/MachineOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/TargetFrameLowering.h"
31#include "llvm/CodeGen/TargetInstrInfo.h"
32#include "llvm/CodeGen/TargetRegisterInfo.h"
33#include "llvm/CodeGen/TargetSubtargetInfo.h"
34#include "llvm/InitializePasses.h"
35#include "llvm/MC/MCRegisterInfo.h"
36#include "llvm/Pass.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include <algorithm>
41#include <cassert>
42#include <iterator>
43#include <limits>
44#include <string>
45#include <utility>
46
47using namespace llvm;
48
49#define DEBUG_TYPE"reg-scavenging" "reg-scavenging"
50
51STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged")static llvm::Statistic NumScavengedRegs = {"reg-scavenging", "NumScavengedRegs"
, "Number of frame index regs scavenged"}
;
52
53void RegScavenger::setRegUsed(Register Reg, LaneBitmask LaneMask) {
54 LiveUnits.addRegMasked(Reg, LaneMask);
55}
56
57void RegScavenger::init(MachineBasicBlock &MBB) {
58 MachineFunction &MF = *MBB.getParent();
59 TII = MF.getSubtarget().getInstrInfo();
60 TRI = MF.getSubtarget().getRegisterInfo();
61 MRI = &MF.getRegInfo();
62 LiveUnits.init(*TRI);
63
64 assert((NumRegUnits == 0 || NumRegUnits == TRI->getNumRegUnits()) &&((void)0)
65 "Target changed?")((void)0);
66
67 // Self-initialize.
68 if (!this->MBB) {
69 NumRegUnits = TRI->getNumRegUnits();
70 KillRegUnits.resize(NumRegUnits);
71 DefRegUnits.resize(NumRegUnits);
72 TmpRegUnits.resize(NumRegUnits);
73 }
74 this->MBB = &MBB;
75
76 for (ScavengedInfo &SI : Scavenged) {
77 SI.Reg = 0;
78 SI.Restore = nullptr;
79 }
80
81 Tracking = false;
82}
83
84void RegScavenger::enterBasicBlock(MachineBasicBlock &MBB) {
85 init(MBB);
86 LiveUnits.addLiveIns(MBB);
87}
88
89void RegScavenger::enterBasicBlockEnd(MachineBasicBlock &MBB) {
90 init(MBB);
91 LiveUnits.addLiveOuts(MBB);
92
93 // Move internal iterator at the last instruction of the block.
94 if (!MBB.empty()) {
95 MBBI = std::prev(MBB.end());
96 Tracking = true;
97 }
98}
99
100void RegScavenger::addRegUnits(BitVector &BV, MCRegister Reg) {
101 for (MCRegUnitIterator RUI(Reg, TRI); RUI.isValid(); ++RUI)
102 BV.set(*RUI);
103}
104
105void RegScavenger::removeRegUnits(BitVector &BV, MCRegister Reg) {
106 for (MCRegUnitIterator RUI(Reg, TRI); RUI.isValid(); ++RUI)
107 BV.reset(*RUI);
108}
109
110void RegScavenger::determineKillsAndDefs() {
111 assert(Tracking && "Must be tracking to determine kills and defs")((void)0);
112
113 MachineInstr &MI = *MBBI;
114 assert(!MI.isDebugInstr() && "Debug values have no kills or defs")((void)0);
115
116 // Find out which registers are early clobbered, killed, defined, and marked
117 // def-dead in this instruction.
118 KillRegUnits.reset();
119 DefRegUnits.reset();
120 for (const MachineOperand &MO : MI.operands()) {
121 if (MO.isRegMask()) {
122 TmpRegUnits.reset();
123 for (unsigned RU = 0, RUEnd = TRI->getNumRegUnits(); RU != RUEnd; ++RU) {
124 for (MCRegUnitRootIterator RURI(RU, TRI); RURI.isValid(); ++RURI) {
125 if (MO.clobbersPhysReg(*RURI)) {
126 TmpRegUnits.set(RU);
127 break;
128 }
129 }
130 }
131
132 // Apply the mask.
133 KillRegUnits |= TmpRegUnits;
134 }
135 if (!MO.isReg())
136 continue;
137 if (!MO.getReg().isPhysical() || isReserved(MO.getReg()))
138 continue;
139 MCRegister Reg = MO.getReg().asMCReg();
140
141 if (MO.isUse()) {
142 // Ignore undef uses.
143 if (MO.isUndef())
144 continue;
145 if (MO.isKill())
146 addRegUnits(KillRegUnits, Reg);
147 } else {
148 assert(MO.isDef())((void)0);
149 if (MO.isDead())
150 addRegUnits(KillRegUnits, Reg);
151 else
152 addRegUnits(DefRegUnits, Reg);
153 }
154 }
155}
156
157void RegScavenger::forward() {
158 // Move ptr forward.
159 if (!Tracking) {
160 MBBI = MBB->begin();
161 Tracking = true;
162 } else {
163 assert(MBBI != MBB->end() && "Already past the end of the basic block!")((void)0);
164 MBBI = std::next(MBBI);
165 }
166 assert(MBBI != MBB->end() && "Already at the end of the basic block!")((void)0);
167
168 MachineInstr &MI = *MBBI;
169
170 for (ScavengedInfo &I : Scavenged) {
171 if (I.Restore != &MI)
172 continue;
173
174 I.Reg = 0;
175 I.Restore = nullptr;
176 }
177
178 if (MI.isDebugOrPseudoInstr())
179 return;
180
181 determineKillsAndDefs();
182
183 // Verify uses and defs.
184#ifndef NDEBUG1
185 for (const MachineOperand &MO : MI.operands()) {
186 if (!MO.isReg())
187 continue;
188 Register Reg = MO.getReg();
189 if (!Register::isPhysicalRegister(Reg) || isReserved(Reg))
190 continue;
191 if (MO.isUse()) {
192 if (MO.isUndef())
193 continue;
194 if (!isRegUsed(Reg)) {
195 // Check if it's partial live: e.g.
196 // D0 = insert_subreg undef D0, S0
197 // ... D0
198 // The problem is the insert_subreg could be eliminated. The use of
199 // D0 is using a partially undef value. This is not *incorrect* since
200 // S1 is can be freely clobbered.
201 // Ideally we would like a way to model this, but leaving the
202 // insert_subreg around causes both correctness and performance issues.
203 bool SubUsed = false;
204 for (const MCPhysReg &SubReg : TRI->subregs(Reg))
205 if (isRegUsed(SubReg)) {
206 SubUsed = true;
207 break;
208 }
209 bool SuperUsed = false;
210 for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR) {
211 if (isRegUsed(*SR)) {
212 SuperUsed = true;
213 break;
214 }
215 }
216 if (!SubUsed && !SuperUsed) {
217 MBB->getParent()->verify(nullptr, "In Register Scavenger");
218 llvm_unreachable("Using an undefined register!")__builtin_unreachable();
219 }
220 (void)SubUsed;
221 (void)SuperUsed;
222 }
223 } else {
224 assert(MO.isDef())((void)0);
225#if 0
226 // FIXME: Enable this once we've figured out how to correctly transfer
227 // implicit kills during codegen passes like the coalescer.
228 assert((KillRegs.test(Reg) || isUnused(Reg) ||((void)0)
229 isLiveInButUnusedBefore(Reg, MI, MBB, TRI, MRI)) &&((void)0)
230 "Re-defining a live register!")((void)0);
231#endif
232 }
233 }
234#endif // NDEBUG
235
236 // Commit the changes.
237 setUnused(KillRegUnits);
238 setUsed(DefRegUnits);
239}
240
241void RegScavenger::backward() {
242 assert(Tracking && "Must be tracking to determine kills and defs")((void)0);
243
244 const MachineInstr &MI = *MBBI;
245 LiveUnits.stepBackward(MI);
246
247 // Expire scavenge spill frameindex uses.
248 for (ScavengedInfo &I : Scavenged) {
249 if (I.Restore == &MI) {
250 I.Reg = 0;
251 I.Restore = nullptr;
252 }
253 }
254
255 if (MBBI == MBB->begin()) {
256 MBBI = MachineBasicBlock::iterator(nullptr);
257 Tracking = false;
258 } else
259 --MBBI;
260}
261
262bool RegScavenger::isRegUsed(Register Reg, bool includeReserved) const {
263 if (isReserved(Reg))
264 return includeReserved;
265 return !LiveUnits.available(Reg);
266}
267
268Register RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
269 for (Register Reg : *RC) {
270 if (!isRegUsed(Reg)) {
271 LLVM_DEBUG(dbgs() << "Scavenger found unused reg: " << printReg(Reg, TRI)do { } while (false)
272 << "\n")do { } while (false);
273 return Reg;
274 }
275 }
276 return 0;
277}
278
279BitVector RegScavenger::getRegsAvailable(const TargetRegisterClass *RC) {
280 BitVector Mask(TRI->getNumRegs());
281 for (Register Reg : *RC)
282 if (!isRegUsed(Reg))
283 Mask.set(Reg);
284 return Mask;
285}
286
287Register RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
288 BitVector &Candidates,
289 unsigned InstrLimit,
290 MachineBasicBlock::iterator &UseMI) {
291 int Survivor = Candidates.find_first();
292 assert(Survivor > 0 && "No candidates for scavenging")((void)0);
293
294 MachineBasicBlock::iterator ME = MBB->getFirstTerminator();
295 assert(StartMI != ME && "MI already at terminator")((void)0);
296 MachineBasicBlock::iterator RestorePointMI = StartMI;
297 MachineBasicBlock::iterator MI = StartMI;
298
299 bool inVirtLiveRange = false;
300 for (++MI; InstrLimit > 0 && MI != ME; ++MI, --InstrLimit) {
301 if (MI->isDebugOrPseudoInstr()) {
302 ++InstrLimit; // Don't count debug instructions
303 continue;
304 }
305 bool isVirtKillInsn = false;
306 bool isVirtDefInsn = false;
307 // Remove any candidates touched by instruction.
308 for (const MachineOperand &MO : MI->operands()) {
309 if (MO.isRegMask())
310 Candidates.clearBitsNotInMask(MO.getRegMask());
311 if (!MO.isReg() || MO.isUndef() || !MO.getReg())
312 continue;
313 if (Register::isVirtualRegister(MO.getReg())) {
314 if (MO.isDef())
315 isVirtDefInsn = true;
316 else if (MO.isKill())
317 isVirtKillInsn = true;
318 continue;
319 }
320 for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI)
321 Candidates.reset(*AI);
322 }
323 // If we're not in a virtual reg's live range, this is a valid
324 // restore point.
325 if (!inVirtLiveRange) RestorePointMI = MI;
326
327 // Update whether we're in the live range of a virtual register
328 if (isVirtKillInsn) inVirtLiveRange = false;
329 if (isVirtDefInsn) inVirtLiveRange = true;
330
331 // Was our survivor untouched by this instruction?
332 if (Candidates.test(Survivor))
333 continue;
334
335 // All candidates gone?
336 if (Candidates.none())
337 break;
338
339 Survivor = Candidates.find_first();
340 }
341 // If we ran off the end, that's where we want to restore.
342 if (MI == ME) RestorePointMI = ME;
343 assert(RestorePointMI != StartMI &&((void)0)
344 "No available scavenger restore location!")((void)0);
345
346 // We ran out of candidates, so stop the search.
347 UseMI = RestorePointMI;
348 return Survivor;
349}
350
351/// Given the bitvector \p Available of free register units at position
352/// \p From. Search backwards to find a register that is part of \p
353/// Candidates and not used/clobbered until the point \p To. If there is
354/// multiple candidates continue searching and pick the one that is not used/
355/// clobbered for the longest time.
356/// Returns the register and the earliest position we know it to be free or
357/// the position MBB.end() if no register is available.
358static std::pair<MCPhysReg, MachineBasicBlock::iterator>
359findSurvivorBackwards(const MachineRegisterInfo &MRI,
360 MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
361 const LiveRegUnits &LiveOut, ArrayRef<MCPhysReg> AllocationOrder,
362 bool RestoreAfter) {
363 bool FoundTo = false;
364 MCPhysReg Survivor = 0;
365 MachineBasicBlock::iterator Pos;
366 MachineBasicBlock &MBB = *From->getParent();
367 unsigned InstrLimit = 25;
368 unsigned InstrCountDown = InstrLimit;
369 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
370 LiveRegUnits Used(TRI);
371
372 assert(From->getParent() == To->getParent() &&((void)0)
373 "Target instruction is in other than current basic block, use "((void)0)
374 "enterBasicBlockEnd first")((void)0);
375
376 for (MachineBasicBlock::iterator I = From;; --I) {
377 const MachineInstr &MI = *I;
378
379 Used.accumulate(MI);
380
381 if (I == To) {
382 // See if one of the registers in RC wasn't used so far.
383 for (MCPhysReg Reg : AllocationOrder) {
384 if (!MRI.isReserved(Reg) && Used.available(Reg) &&
385 LiveOut.available(Reg))
386 return std::make_pair(Reg, MBB.end());
387 }
388 // Otherwise we will continue up to InstrLimit instructions to find
389 // the register which is not defined/used for the longest time.
390 FoundTo = true;
391 Pos = To;
392 // Note: It was fine so far to start our search at From, however now that
393 // we have to spill, and can only place the restore after From then
394 // add the regs used/defed by std::next(From) to the set.
395 if (RestoreAfter)
396 Used.accumulate(*std::next(From));
397 }
398 if (FoundTo) {
399 if (Survivor == 0 || !Used.available(Survivor)) {
400 MCPhysReg AvilableReg = 0;
401 for (MCPhysReg Reg : AllocationOrder) {
402 if (!MRI.isReserved(Reg) && Used.available(Reg)) {
403 AvilableReg = Reg;
404 break;
405 }
406 }
407 if (AvilableReg == 0)
408 break;
409 Survivor = AvilableReg;
410 }
411 if (--InstrCountDown == 0)
412 break;
413
414 // Keep searching when we find a vreg since the spilled register will
415 // be usefull for this other vreg as well later.
416 bool FoundVReg = false;
417 for (const MachineOperand &MO : MI.operands()) {
418 if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) {
419 FoundVReg = true;
420 break;
421 }
422 }
423 if (FoundVReg) {
424 InstrCountDown = InstrLimit;
425 Pos = I;
426 }
427 if (I == MBB.begin())
428 break;
429 }
430 assert(I != MBB.begin() && "Did not find target instruction while "((void)0)
431 "iterating backwards")((void)0);
432 }
433
434 return std::make_pair(Survivor, Pos);
435}
436
437static unsigned getFrameIndexOperandNum(MachineInstr &MI) {
438 unsigned i = 0;
439 while (!MI.getOperand(i).isFI()) {
440 ++i;
441 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!")((void)0);
442 }
443 return i;
444}
445
446RegScavenger::ScavengedInfo &
447RegScavenger::spill(Register Reg, const TargetRegisterClass &RC, int SPAdj,
448 MachineBasicBlock::iterator Before,
449 MachineBasicBlock::iterator &UseMI) {
450 // Find an available scavenging slot with size and alignment matching
451 // the requirements of the class RC.
452 const MachineFunction &MF = *Before->getMF();
453 const MachineFrameInfo &MFI = MF.getFrameInfo();
454 unsigned NeedSize = TRI->getSpillSize(RC);
455 Align NeedAlign = TRI->getSpillAlign(RC);
456
457 unsigned SI = Scavenged.size(), Diff = std::numeric_limits<unsigned>::max();
458 int FIB = MFI.getObjectIndexBegin(), FIE = MFI.getObjectIndexEnd();
459 for (unsigned I = 0; I < Scavenged.size(); ++I) {
23
Assuming the condition is true
24
Loop condition is true. Entering loop body
460 if (Scavenged[I].Reg != 0)
25
Taking false branch
461 continue;
462 // Verify that this slot is valid for this register.
463 int FI = Scavenged[I].FrameIndex;
464 if (FI < FIB || FI >= FIE)
26
Assuming 'FI' is >= 'FIB'
27
Assuming 'FI' is < 'FIE'
28
Taking false branch
465 continue;
466 unsigned S = MFI.getObjectSize(FI);
467 Align A = MFI.getObjectAlign(FI);
29
Calling 'MachineFrameInfo::getObjectAlign'
31
Returning from 'MachineFrameInfo::getObjectAlign'
468 if (NeedSize > S || NeedAlign > A)
32
Assuming 'NeedSize' is <= 'S'
33
Taking false branch
469 continue;
470 // Avoid wasting slots with large size and/or large alignment. Pick one
471 // that is the best fit for this register class (in street metric).
472 // Picking a larger slot than necessary could happen if a slot for a
473 // larger register is reserved before a slot for a smaller one. When
474 // trying to spill a smaller register, the large slot would be found
475 // first, thus making it impossible to spill the larger register later.
476 unsigned D = (S - NeedSize) + (A.value() - NeedAlign.value());
34
Calling 'Align::value'
477 if (D < Diff) {
478 SI = I;
479 Diff = D;
480 }
481 }
482
483 if (SI == Scavenged.size()) {
484 // We need to scavenge a register but have no spill slot, the target
485 // must know how to do it (if not, we'll assert below).
486 Scavenged.push_back(ScavengedInfo(FIE));
487 }
488
489 // Avoid infinite regress
490 Scavenged[SI].Reg = Reg;
491
492 // If the target knows how to save/restore the register, let it do so;
493 // otherwise, use the emergency stack spill slot.
494 if (!TRI->saveScavengerRegister(*MBB, Before, UseMI, &RC, Reg)) {
495 // Spill the scavenged register before \p Before.
496 int FI = Scavenged[SI].FrameIndex;
497 if (FI < FIB || FI >= FIE) {
498 std::string Msg = std::string("Error while trying to spill ") +
499 TRI->getName(Reg) + " from class " + TRI->getRegClassName(&RC) +
500 ": Cannot scavenge register without an emergency spill slot!";
501 report_fatal_error(Msg.c_str());
502 }
503 TII->storeRegToStackSlot(*MBB, Before, Reg, true, Scavenged[SI].FrameIndex,
504 &RC, TRI);
505 MachineBasicBlock::iterator II = std::prev(Before);
506
507 unsigned FIOperandNum = getFrameIndexOperandNum(*II);
508 TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
509
510 // Restore the scavenged register before its use (or first terminator).
511 TII->loadRegFromStackSlot(*MBB, UseMI, Reg, Scavenged[SI].FrameIndex,
512 &RC, TRI);
513 II = std::prev(UseMI);
514
515 FIOperandNum = getFrameIndexOperandNum(*II);
516 TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
517 }
518 return Scavenged[SI];
519}
520
521Register RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
522 MachineBasicBlock::iterator I,
523 int SPAdj, bool AllowSpill) {
524 MachineInstr &MI = *I;
525 const MachineFunction &MF = *MI.getMF();
526 // Consider all allocatable registers in the register class initially
527 BitVector Candidates = TRI->getAllocatableSet(MF, RC);
528
529 // Exclude all the registers being used by the instruction.
530 for (const MachineOperand &MO : MI.operands()) {
531 if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) &&
532 !Register::isVirtualRegister(MO.getReg()))
533 for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI)
534 Candidates.reset(*AI);
535 }
536
537 // Try to find a register that's unused if there is one, as then we won't
538 // have to spill.
539 BitVector Available = getRegsAvailable(RC);
540 Available &= Candidates;
541 if (Available.any())
542 Candidates = Available;
543
544 // Find the register whose use is furthest away.
545 MachineBasicBlock::iterator UseMI;
546 Register SReg = findSurvivorReg(I, Candidates, 25, UseMI);
547
548 // If we found an unused register there is no reason to spill it.
549 if (!isRegUsed(SReg)) {
550 LLVM_DEBUG(dbgs() << "Scavenged register: " << printReg(SReg, TRI) << "\n")do { } while (false);
551 return SReg;
552 }
553
554 if (!AllowSpill)
555 return 0;
556
557 ScavengedInfo &Scavenged = spill(SReg, *RC, SPAdj, I, UseMI);
558 Scavenged.Restore = &*std::prev(UseMI);
559
560 LLVM_DEBUG(dbgs() << "Scavenged register (with spill): "do { } while (false)
561 << printReg(SReg, TRI) << "\n")do { } while (false);
562
563 return SReg;
564}
565
566Register RegScavenger::scavengeRegisterBackwards(const TargetRegisterClass &RC,
567 MachineBasicBlock::iterator To,
568 bool RestoreAfter, int SPAdj,
569 bool AllowSpill) {
570 const MachineBasicBlock &MBB = *To->getParent();
571 const MachineFunction &MF = *MBB.getParent();
572
573 // Find the register whose use is furthest away.
574 MachineBasicBlock::iterator UseMI;
575 ArrayRef<MCPhysReg> AllocationOrder = RC.getRawAllocationOrder(MF);
576 std::pair<MCPhysReg, MachineBasicBlock::iterator> P =
577 findSurvivorBackwards(*MRI, MBBI, To, LiveUnits, AllocationOrder,
578 RestoreAfter);
579 MCPhysReg Reg = P.first;
580 MachineBasicBlock::iterator SpillBefore = P.second;
581 // Found an available register?
582 if (Reg
17.1
'Reg' is equal to 0
17.1
'Reg' is equal to 0
17.1
'Reg' is equal to 0
!= 0 && SpillBefore == MBB.end()) {
18
Taking false branch
583 LLVM_DEBUG(dbgs() << "Scavenged free register: " << printReg(Reg, TRI)do { } while (false)
584 << '\n')do { } while (false);
585 return Reg;
586 }
587
588 if (!AllowSpill
18.1
'AllowSpill' is true
18.1
'AllowSpill' is true
18.1
'AllowSpill' is true
)
19
Taking false branch
589 return 0;
590
591 assert(Reg != 0 && "No register left to scavenge!")((void)0);
592
593 MachineBasicBlock::iterator ReloadAfter =
594 RestoreAfter
19.1
'RestoreAfter' is false
19.1
'RestoreAfter' is false
19.1
'RestoreAfter' is false
? std::next(MBBI) : MBBI;
20
'?' condition is false
595 MachineBasicBlock::iterator ReloadBefore = std::next(ReloadAfter);
596 if (ReloadBefore != MBB.end())
21
Taking false branch
597 LLVM_DEBUG(dbgs() << "Reload before: " << *ReloadBefore << '\n')do { } while (false);
598 ScavengedInfo &Scavenged = spill(Reg, RC, SPAdj, SpillBefore, ReloadBefore);
22
Calling 'RegScavenger::spill'
599 Scavenged.Restore = &*std::prev(SpillBefore);
600 LiveUnits.removeReg(Reg);
601 LLVM_DEBUG(dbgs() << "Scavenged register with spill: " << printReg(Reg, TRI)do { } while (false)
602 << " until " << *SpillBefore)do { } while (false);
603 return Reg;
604}
605
606/// Allocate a register for the virtual register \p VReg. The last use of
607/// \p VReg is around the current position of the register scavenger \p RS.
608/// \p ReserveAfter controls whether the scavenged register needs to be reserved
609/// after the current instruction, otherwise it will only be reserved before the
610/// current instruction.
611static Register scavengeVReg(MachineRegisterInfo &MRI, RegScavenger &RS,
612 Register VReg, bool ReserveAfter) {
613 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
614#ifndef NDEBUG1
615 // Verify that all definitions and uses are in the same basic block.
616 const MachineBasicBlock *CommonMBB = nullptr;
617 // Real definition for the reg, re-definitions are not considered.
618 const MachineInstr *RealDef = nullptr;
619 for (MachineOperand &MO : MRI.reg_nodbg_operands(VReg)) {
620 MachineBasicBlock *MBB = MO.getParent()->getParent();
621 if (CommonMBB == nullptr)
622 CommonMBB = MBB;
623 assert(MBB == CommonMBB && "All defs+uses must be in the same basic block")((void)0);
624 if (MO.isDef()) {
625 const MachineInstr &MI = *MO.getParent();
626 if (!MI.readsRegister(VReg, &TRI)) {
627 assert((!RealDef || RealDef == &MI) &&((void)0)
628 "Can have at most one definition which is not a redefinition")((void)0);
629 RealDef = &MI;
630 }
631 }
632 }
633 assert(RealDef != nullptr && "Must have at least 1 Def")((void)0);
634#endif
635
636 // We should only have one definition of the register. However to accommodate
637 // the requirements of two address code we also allow definitions in
638 // subsequent instructions provided they also read the register. That way
639 // we get a single contiguous lifetime.
640 //
641 // Definitions in MRI.def_begin() are unordered, search for the first.
642 MachineRegisterInfo::def_iterator FirstDef = llvm::find_if(
643 MRI.def_operands(VReg), [VReg, &TRI](const MachineOperand &MO) {
644 return !MO.getParent()->readsRegister(VReg, &TRI);
645 });
646 assert(FirstDef != MRI.def_end() &&((void)0)
647 "Must have one definition that does not redefine vreg")((void)0);
648 MachineInstr &DefMI = *FirstDef->getParent();
649
650 // The register scavenger will report a free register inserting an emergency
651 // spill/reload if necessary.
652 int SPAdj = 0;
653 const TargetRegisterClass &RC = *MRI.getRegClass(VReg);
654 Register SReg = RS.scavengeRegisterBackwards(RC, DefMI.getIterator(),
17
Calling 'RegScavenger::scavengeRegisterBackwards'
655 ReserveAfter, SPAdj);
656 MRI.replaceRegWith(VReg, SReg);
657 ++NumScavengedRegs;
658 return SReg;
659}
660
661/// Allocate (scavenge) vregs inside a single basic block.
662/// Returns true if the target spill callback created new vregs and a 2nd pass
663/// is necessary.
664static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI,
665 RegScavenger &RS,
666 MachineBasicBlock &MBB) {
667 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
668 RS.enterBasicBlockEnd(MBB);
669
670 unsigned InitialNumVirtRegs = MRI.getNumVirtRegs();
671 bool NextInstructionReadsVReg = false;
672 for (MachineBasicBlock::iterator I = MBB.end(); I != MBB.begin(); ) {
7
Loop condition is true. Entering loop body
673 --I;
674 // Move RegScavenger to the position between *I and *std::next(I).
675 RS.backward(I);
676
677 // Look for unassigned vregs in the uses of *std::next(I).
678 if (NextInstructionReadsVReg
7.1
'NextInstructionReadsVReg' is false
7.1
'NextInstructionReadsVReg' is false
7.1
'NextInstructionReadsVReg' is false
) {
8
Taking false branch
679 MachineBasicBlock::iterator N = std::next(I);
680 const MachineInstr &NMI = *N;
681 for (const MachineOperand &MO : NMI.operands()) {
682 if (!MO.isReg())
683 continue;
684 Register Reg = MO.getReg();
685 // We only care about virtual registers and ignore virtual registers
686 // created by the target callbacks in the process (those will be handled
687 // in a scavenging round).
688 if (!Register::isVirtualRegister(Reg) ||
689 Register::virtReg2Index(Reg) >= InitialNumVirtRegs)
690 continue;
691 if (!MO.readsReg())
692 continue;
693
694 Register SReg = scavengeVReg(MRI, RS, Reg, true);
695 N->addRegisterKilled(SReg, &TRI, false);
696 RS.setRegUsed(SReg);
697 }
698 }
699
700 // Look for unassigned vregs in the defs of *I.
701 NextInstructionReadsVReg = false;
702 const MachineInstr &MI = *I;
703 for (const MachineOperand &MO : MI.operands()) {
9
Assuming '__begin2' is not equal to '__end2'
704 if (!MO.isReg())
10
Taking false branch
705 continue;
706 Register Reg = MO.getReg();
707 // Only vregs, no newly created vregs (see above).
708 if (!Register::isVirtualRegister(Reg) ||
12
Taking false branch
709 Register::virtReg2Index(Reg) >= InitialNumVirtRegs)
11
Assuming the condition is false
710 continue;
711 // We have to look at all operands anyway so we can precalculate here
712 // whether there is a reading operand. This allows use to skip the use
713 // step in the next iteration if there was none.
714 assert(!MO.isInternalRead() && "Cannot assign inside bundles")((void)0);
715 assert((!MO.isUndef() || MO.isDef()) && "Cannot handle undef uses")((void)0);
716 if (MO.readsReg()) {
13
Taking false branch
717 NextInstructionReadsVReg = true;
718 }
719 if (MO.isDef()) {
14
Assuming the condition is true
15
Taking true branch
720 Register SReg = scavengeVReg(MRI, RS, Reg, false);
16
Calling 'scavengeVReg'
721 I->addRegisterDead(SReg, &TRI, false);
722 }
723 }
724 }
725#ifndef NDEBUG1
726 for (const MachineOperand &MO : MBB.front().operands()) {
727 if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
728 continue;
729 assert(!MO.isInternalRead() && "Cannot assign inside bundles")((void)0);
730 assert((!MO.isUndef() || MO.isDef()) && "Cannot handle undef uses")((void)0);
731 assert(!MO.readsReg() && "Vreg use in first instruction not allowed")((void)0);
732 }
733#endif
734
735 return MRI.getNumVirtRegs() != InitialNumVirtRegs;
736}
737
738void llvm::scavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger &RS) {
739 // FIXME: Iterating over the instruction stream is unnecessary. We can simply
740 // iterate over the vreg use list, which at this point only contains machine
741 // operands for which eliminateFrameIndex need a new scratch reg.
742 MachineRegisterInfo &MRI = MF.getRegInfo();
743 // Shortcut.
744 if (MRI.getNumVirtRegs() == 0) {
2
Assuming the condition is false
3
Taking false branch
745 MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
746 return;
747 }
748
749 // Run through the instructions and find any virtual registers.
750 for (MachineBasicBlock &MBB : MF) {
751 if (MBB.empty())
4
Assuming the condition is false
5
Taking false branch
752 continue;
753
754 bool Again = scavengeFrameVirtualRegsInBlock(MRI, RS, MBB);
6
Calling 'scavengeFrameVirtualRegsInBlock'
755 if (Again) {
756 LLVM_DEBUG(dbgs() << "Warning: Required two scavenging passes for block "do { } while (false)
757 << MBB.getName() << '\n')do { } while (false);
758 Again = scavengeFrameVirtualRegsInBlock(MRI, RS, MBB);
759 // The target required a 2nd run (because it created new vregs while
760 // spilling). Refuse to do another pass to keep compiletime in check.
761 if (Again)
762 report_fatal_error("Incomplete scavenging after 2nd pass");
763 }
764 }
765
766 MRI.clearVirtRegs();
767 MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
768}
769
770namespace {
771
772/// This class runs register scavenging independ of the PrologEpilogInserter.
773/// This is used in for testing.
774class ScavengerTest : public MachineFunctionPass {
775public:
776 static char ID;
777
778 ScavengerTest() : MachineFunctionPass(ID) {}
779
780 bool runOnMachineFunction(MachineFunction &MF) override {
781 const TargetSubtargetInfo &STI = MF.getSubtarget();
782 const TargetFrameLowering &TFL = *STI.getFrameLowering();
783
784 RegScavenger RS;
785 // Let's hope that calling those outside of PrologEpilogueInserter works
786 // well enough to initialize the scavenger with some emergency spillslots
787 // for the target.
788 BitVector SavedRegs;
789 TFL.determineCalleeSaves(MF, SavedRegs, &RS);
790 TFL.processFunctionBeforeFrameFinalized(MF, &RS);
791
792 // Let's scavenge the current function
793 scavengeFrameVirtualRegs(MF, RS);
1
Calling 'scavengeFrameVirtualRegs'
794 return true;
795 }
796};
797
798} // end anonymous namespace
799
800char ScavengerTest::ID;
801
802INITIALIZE_PASS(ScavengerTest, "scavenger-test",static void *initializeScavengerTestPassOnce(PassRegistry &
Registry) { PassInfo *PI = new PassInfo( "Scavenge virtual registers inside basic blocks"
, "scavenger-test", &ScavengerTest::ID, PassInfo::NormalCtor_t
(callDefaultCtor<ScavengerTest>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeScavengerTestPassFlag; void llvm::initializeScavengerTestPass
(PassRegistry &Registry) { llvm::call_once(InitializeScavengerTestPassFlag
, initializeScavengerTestPassOnce, std::ref(Registry)); }
803 "Scavenge virtual registers inside basic blocks", false, false)static void *initializeScavengerTestPassOnce(PassRegistry &
Registry) { PassInfo *PI = new PassInfo( "Scavenge virtual registers inside basic blocks"
, "scavenger-test", &ScavengerTest::ID, PassInfo::NormalCtor_t
(callDefaultCtor<ScavengerTest>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeScavengerTestPassFlag; void llvm::initializeScavengerTestPass
(PassRegistry &Registry) { llvm::call_once(InitializeScavengerTestPassFlag
, initializeScavengerTestPassOnce, std::ref(Registry)); }

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/CodeGen/MachineFrameInfo.h

1//===-- CodeGen/MachineFrameInfo.h - Abstract Stack Frame Rep. --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The file defines the MachineFrameInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_MACHINEFRAMEINFO_H
14#define LLVM_CODEGEN_MACHINEFRAMEINFO_H
15
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/CodeGen/Register.h"
18#include "llvm/Support/Alignment.h"
19#include "llvm/Support/DataTypes.h"
20#include <cassert>
21#include <vector>
22
23namespace llvm {
24class raw_ostream;
25class MachineFunction;
26class MachineBasicBlock;
27class BitVector;
28class AllocaInst;
29
30/// The CalleeSavedInfo class tracks the information need to locate where a
31/// callee saved register is in the current frame.
32/// Callee saved reg can also be saved to a different register rather than
33/// on the stack by setting DstReg instead of FrameIdx.
34class CalleeSavedInfo {
35 Register Reg;
36 union {
37 int FrameIdx;
38 unsigned DstReg;
39 };
40 /// Flag indicating whether the register is actually restored in the epilog.
41 /// In most cases, if a register is saved, it is also restored. There are
42 /// some situations, though, when this is not the case. For example, the
43 /// LR register on ARM is usually saved, but on exit from the function its
44 /// saved value may be loaded directly into PC. Since liveness tracking of
45 /// physical registers treats callee-saved registers are live outside of
46 /// the function, LR would be treated as live-on-exit, even though in these
47 /// scenarios it is not. This flag is added to indicate that the saved
48 /// register described by this object is not restored in the epilog.
49 /// The long-term solution is to model the liveness of callee-saved registers
50 /// by implicit uses on the return instructions, however, the required
51 /// changes in the ARM backend would be quite extensive.
52 bool Restored;
53 /// Flag indicating whether the register is spilled to stack or another
54 /// register.
55 bool SpilledToReg;
56
57public:
58 explicit CalleeSavedInfo(unsigned R, int FI = 0)
59 : Reg(R), FrameIdx(FI), Restored(true), SpilledToReg(false) {}
60
61 // Accessors.
62 Register getReg() const { return Reg; }
63 int getFrameIdx() const { return FrameIdx; }
64 unsigned getDstReg() const { return DstReg; }
65 void setFrameIdx(int FI) {
66 FrameIdx = FI;
67 SpilledToReg = false;
68 }
69 void setDstReg(Register SpillReg) {
70 DstReg = SpillReg;
71 SpilledToReg = true;
72 }
73 bool isRestored() const { return Restored; }
74 void setRestored(bool R) { Restored = R; }
75 bool isSpilledToReg() const { return SpilledToReg; }
76};
77
78/// The MachineFrameInfo class represents an abstract stack frame until
79/// prolog/epilog code is inserted. This class is key to allowing stack frame
80/// representation optimizations, such as frame pointer elimination. It also
81/// allows more mundane (but still important) optimizations, such as reordering
82/// of abstract objects on the stack frame.
83///
84/// To support this, the class assigns unique integer identifiers to stack
85/// objects requested clients. These identifiers are negative integers for
86/// fixed stack objects (such as arguments passed on the stack) or nonnegative
87/// for objects that may be reordered. Instructions which refer to stack
88/// objects use a special MO_FrameIndex operand to represent these frame
89/// indexes.
90///
91/// Because this class keeps track of all references to the stack frame, it
92/// knows when a variable sized object is allocated on the stack. This is the
93/// sole condition which prevents frame pointer elimination, which is an
94/// important optimization on register-poor architectures. Because original
95/// variable sized alloca's in the source program are the only source of
96/// variable sized stack objects, it is safe to decide whether there will be
97/// any variable sized objects before all stack objects are known (for
98/// example, register allocator spill code never needs variable sized
99/// objects).
100///
101/// When prolog/epilog code emission is performed, the final stack frame is
102/// built and the machine instructions are modified to refer to the actual
103/// stack offsets of the object, eliminating all MO_FrameIndex operands from
104/// the program.
105///
106/// Abstract Stack Frame Information
107class MachineFrameInfo {
108public:
109 /// Stack Smashing Protection (SSP) rules require that vulnerable stack
110 /// allocations are located close the stack protector.
111 enum SSPLayoutKind {
112 SSPLK_None, ///< Did not trigger a stack protector. No effect on data
113 ///< layout.
114 SSPLK_LargeArray, ///< Array or nested array >= SSP-buffer-size. Closest
115 ///< to the stack protector.
116 SSPLK_SmallArray, ///< Array or nested array < SSP-buffer-size. 2nd closest
117 ///< to the stack protector.
118 SSPLK_AddrOf ///< The address of this allocation is exposed and
119 ///< triggered protection. 3rd closest to the protector.
120 };
121
122private:
123 // Represent a single object allocated on the stack.
124 struct StackObject {
125 // The offset of this object from the stack pointer on entry to
126 // the function. This field has no meaning for a variable sized element.
127 int64_t SPOffset;
128
129 // The size of this object on the stack. 0 means a variable sized object,
130 // ~0ULL means a dead object.
131 uint64_t Size;
132
133 // The required alignment of this stack slot.
134 Align Alignment;
135
136 // If true, the value of the stack object is set before
137 // entering the function and is not modified inside the function. By
138 // default, fixed objects are immutable unless marked otherwise.
139 bool isImmutable;
140
141 // If true the stack object is used as spill slot. It
142 // cannot alias any other memory objects.
143 bool isSpillSlot;
144
145 /// If true, this stack slot is used to spill a value (could be deopt
146 /// and/or GC related) over a statepoint. We know that the address of the
147 /// slot can't alias any LLVM IR value. This is very similar to a Spill
148 /// Slot, but is created by statepoint lowering is SelectionDAG, not the
149 /// register allocator.
150 bool isStatepointSpillSlot = false;
151
152 /// Identifier for stack memory type analagous to address space. If this is
153 /// non-0, the meaning is target defined. Offsets cannot be directly
154 /// compared between objects with different stack IDs. The object may not
155 /// necessarily reside in the same contiguous memory block as other stack
156 /// objects. Objects with differing stack IDs should not be merged or
157 /// replaced substituted for each other.
158 //
159 /// It is assumed a target uses consecutive, increasing stack IDs starting
160 /// from 1.
161 uint8_t StackID;
162
163 /// If this stack object is originated from an Alloca instruction
164 /// this value saves the original IR allocation. Can be NULL.
165 const AllocaInst *Alloca;
166
167 // If true, the object was mapped into the local frame
168 // block and doesn't need additional handling for allocation beyond that.
169 bool PreAllocated = false;
170
171 // If true, an LLVM IR value might point to this object.
172 // Normally, spill slots and fixed-offset objects don't alias IR-accessible
173 // objects, but there are exceptions (on PowerPC, for example, some byval
174 // arguments have ABI-prescribed offsets).
175 bool isAliased;
176
177 /// If true, the object has been zero-extended.
178 bool isZExt = false;
179
180 /// If true, the object has been sign-extended.
181 bool isSExt = false;
182
183 uint8_t SSPLayout;
184
185 StackObject(uint64_t Size, Align Alignment, int64_t SPOffset,
186 bool IsImmutable, bool IsSpillSlot, const AllocaInst *Alloca,
187 bool IsAliased, uint8_t StackID = 0)
188 : SPOffset(SPOffset), Size(Size), Alignment(Alignment),
189 isImmutable(IsImmutable), isSpillSlot(IsSpillSlot), StackID(StackID),
190 Alloca(Alloca), isAliased(IsAliased), SSPLayout(SSPLK_None) {}
191 };
192
193 /// The alignment of the stack.
194 Align StackAlignment;
195
196 /// Can the stack be realigned. This can be false if the target does not
197 /// support stack realignment, or if the user asks us not to realign the
198 /// stack. In this situation, overaligned allocas are all treated as dynamic
199 /// allocations and the target must handle them as part of DYNAMIC_STACKALLOC
200 /// lowering. All non-alloca stack objects have their alignment clamped to the
201 /// base ABI stack alignment.
202 /// FIXME: There is room for improvement in this case, in terms of
203 /// grouping overaligned allocas into a "secondary stack frame" and
204 /// then only use a single alloca to allocate this frame and only a
205 /// single virtual register to access it. Currently, without such an
206 /// optimization, each such alloca gets its own dynamic realignment.
207 bool StackRealignable;
208
209 /// Whether the function has the \c alignstack attribute.
210 bool ForcedRealign;
211
212 /// The list of stack objects allocated.
213 std::vector<StackObject> Objects;
214
215 /// This contains the number of fixed objects contained on
216 /// the stack. Because fixed objects are stored at a negative index in the
217 /// Objects list, this is also the index to the 0th object in the list.
218 unsigned NumFixedObjects = 0;
219
220 /// This boolean keeps track of whether any variable
221 /// sized objects have been allocated yet.
222 bool HasVarSizedObjects = false;
223
224 /// This boolean keeps track of whether there is a call
225 /// to builtin \@llvm.frameaddress.
226 bool FrameAddressTaken = false;
227
228 /// This boolean keeps track of whether there is a call
229 /// to builtin \@llvm.returnaddress.
230 bool ReturnAddressTaken = false;
231
232 /// This boolean keeps track of whether there is a call
233 /// to builtin \@llvm.experimental.stackmap.
234 bool HasStackMap = false;
235
236 /// This boolean keeps track of whether there is a call
237 /// to builtin \@llvm.experimental.patchpoint.
238 bool HasPatchPoint = false;
239
240 /// The prolog/epilog code inserter calculates the final stack
241 /// offsets for all of the fixed size objects, updating the Objects list
242 /// above. It then updates StackSize to contain the number of bytes that need
243 /// to be allocated on entry to the function.
244 uint64_t StackSize = 0;
245
246 /// The amount that a frame offset needs to be adjusted to
247 /// have the actual offset from the stack/frame pointer. The exact usage of
248 /// this is target-dependent, but it is typically used to adjust between
249 /// SP-relative and FP-relative offsets. E.G., if objects are accessed via
250 /// SP then OffsetAdjustment is zero; if FP is used, OffsetAdjustment is set
251 /// to the distance between the initial SP and the value in FP. For many
252 /// targets, this value is only used when generating debug info (via
253 /// TargetRegisterInfo::getFrameIndexReference); when generating code, the
254 /// corresponding adjustments are performed directly.
255 int OffsetAdjustment = 0;
256
257 /// The prolog/epilog code inserter may process objects that require greater
258 /// alignment than the default alignment the target provides.
259 /// To handle this, MaxAlignment is set to the maximum alignment
260 /// needed by the objects on the current frame. If this is greater than the
261 /// native alignment maintained by the compiler, dynamic alignment code will
262 /// be needed.
263 ///
264 Align MaxAlignment;
265
266 /// Set to true if this function adjusts the stack -- e.g.,
267 /// when calling another function. This is only valid during and after
268 /// prolog/epilog code insertion.
269 bool AdjustsStack = false;
270
271 /// Set to true if this function has any function calls.
272 bool HasCalls = false;
273
274 /// The frame index for the stack protector.
275 int StackProtectorIdx = -1;
276
277 struct ReturnProtector {
278 /// The register to use for return protector calculations
279 unsigned Register = 0;
280 /// Set to true if this function needs return protectors
281 bool Needed = false;
282 /// Does the return protector cookie need to be stored in frame
283 bool NeedsStore = true;
284 } RPI;
285
286 /// The frame index for the function context. Used for SjLj exceptions.
287 int FunctionContextIdx = -1;
288
289 /// This contains the size of the largest call frame if the target uses frame
290 /// setup/destroy pseudo instructions (as defined in the TargetFrameInfo
291 /// class). This information is important for frame pointer elimination.
292 /// It is only valid during and after prolog/epilog code insertion.
293 unsigned MaxCallFrameSize = ~0u;
294
295 /// The number of bytes of callee saved registers that the target wants to
296 /// report for the current function in the CodeView S_FRAMEPROC record.
297 unsigned CVBytesOfCalleeSavedRegisters = 0;
298
299 /// The prolog/epilog code inserter fills in this vector with each
300 /// callee saved register saved in either the frame or a different
301 /// register. Beyond its use by the prolog/ epilog code inserter,
302 /// this data is used for debug info and exception handling.
303 std::vector<CalleeSavedInfo> CSInfo;
304
305 /// Has CSInfo been set yet?
306 bool CSIValid = false;
307
308 /// References to frame indices which are mapped
309 /// into the local frame allocation block. <FrameIdx, LocalOffset>
310 SmallVector<std::pair<int, int64_t>, 32> LocalFrameObjects;
311
312 /// Size of the pre-allocated local frame block.
313 int64_t LocalFrameSize = 0;
314
315 /// Required alignment of the local object blob, which is the strictest
316 /// alignment of any object in it.
317 Align LocalFrameMaxAlign;
318
319 /// Whether the local object blob needs to be allocated together. If not,
320 /// PEI should ignore the isPreAllocated flags on the stack objects and
321 /// just allocate them normally.
322 bool UseLocalStackAllocationBlock = false;
323
324 /// True if the function dynamically adjusts the stack pointer through some
325 /// opaque mechanism like inline assembly or Win32 EH.
326 bool HasOpaqueSPAdjustment = false;
327
328 /// True if the function contains operations which will lower down to
329 /// instructions which manipulate the stack pointer.
330 bool HasCopyImplyingStackAdjustment = false;
331
332 /// True if the function contains a call to the llvm.vastart intrinsic.
333 bool HasVAStart = false;
334
335 /// True if this is a varargs function that contains a musttail call.
336 bool HasMustTailInVarArgFunc = false;
337
338 /// True if this function contains a tail call. If so immutable objects like
339 /// function arguments are no longer so. A tail call *can* override fixed
340 /// stack objects like arguments so we can't treat them as immutable.
341 bool HasTailCall = false;
342
343 /// Not null, if shrink-wrapping found a better place for the prologue.
344 MachineBasicBlock *Save = nullptr;
345 /// Not null, if shrink-wrapping found a better place for the epilogue.
346 MachineBasicBlock *Restore = nullptr;
347
348public:
349 explicit MachineFrameInfo(unsigned StackAlignment, bool StackRealignable,
350 bool ForcedRealign)
351 : StackAlignment(assumeAligned(StackAlignment)),
352 StackRealignable(StackRealignable), ForcedRealign(ForcedRealign) {}
353
354 /// Return true if there are any stack objects in this function.
355 bool hasStackObjects() const { return !Objects.empty(); }
356
357 /// This method may be called any time after instruction
358 /// selection is complete to determine if the stack frame for this function
359 /// contains any variable sized objects.
360 bool hasVarSizedObjects() const { return HasVarSizedObjects; }
361
362 /// Return the index for the stack protector object.
363 int getStackProtectorIndex() const { return StackProtectorIdx; }
364 void setStackProtectorIndex(int I) { StackProtectorIdx = I; }
365 bool hasStackProtectorIndex() const { return StackProtectorIdx != -1; }
366
367 /// Get / Set return protector calculation register
368 unsigned getReturnProtectorRegister() const { return RPI.Register; }
369 void setReturnProtectorRegister(unsigned I) { RPI.Register = I; }
370 bool hasReturnProtectorRegister() const { return RPI.Register != 0; }
371 /// Get / Set if this frame needs a return protector
372 void setReturnProtectorNeeded(bool I) { RPI.Needed = I; }
373 bool getReturnProtectorNeeded() const { return RPI.Needed; }
374 /// Get / Set if the return protector cookie needs to be stored in frame
375 void setReturnProtectorNeedsStore(bool I) { RPI.NeedsStore = I; }
376 bool getReturnProtectorNeedsStore() const { return RPI.NeedsStore; }
377
378 /// Return the index for the function context object.
379 /// This object is used for SjLj exceptions.
380 int getFunctionContextIndex() const { return FunctionContextIdx; }
381 void setFunctionContextIndex(int I) { FunctionContextIdx = I; }
382
383 /// This method may be called any time after instruction
384 /// selection is complete to determine if there is a call to
385 /// \@llvm.frameaddress in this function.
386 bool isFrameAddressTaken() const { return FrameAddressTaken; }
387 void setFrameAddressIsTaken(bool T) { FrameAddressTaken = T; }
388
389 /// This method may be called any time after
390 /// instruction selection is complete to determine if there is a call to
391 /// \@llvm.returnaddress in this function.
392 bool isReturnAddressTaken() const { return ReturnAddressTaken; }
393 void setReturnAddressIsTaken(bool s) { ReturnAddressTaken = s; }
394
395 /// This method may be called any time after instruction
396 /// selection is complete to determine if there is a call to builtin
397 /// \@llvm.experimental.stackmap.
398 bool hasStackMap() const { return HasStackMap; }
399 void setHasStackMap(bool s = true) { HasStackMap = s; }
400
401 /// This method may be called any time after instruction
402 /// selection is complete to determine if there is a call to builtin
403 /// \@llvm.experimental.patchpoint.
404 bool hasPatchPoint() const { return HasPatchPoint; }
405 void setHasPatchPoint(bool s = true) { HasPatchPoint = s; }
406
407 /// Return the minimum frame object index.
408 int getObjectIndexBegin() const { return -NumFixedObjects; }
409
410 /// Return one past the maximum frame object index.
411 int getObjectIndexEnd() const { return (int)Objects.size()-NumFixedObjects; }
412
413 /// Return the number of fixed objects.
414 unsigned getNumFixedObjects() const { return NumFixedObjects; }
415
416 /// Return the number of objects.
417 unsigned getNumObjects() const { return Objects.size(); }
418
419 /// Map a frame index into the local object block
420 void mapLocalFrameObject(int ObjectIndex, int64_t Offset) {
421 LocalFrameObjects.push_back(std::pair<int, int64_t>(ObjectIndex, Offset));
422 Objects[ObjectIndex + NumFixedObjects].PreAllocated = true;
423 }
424
425 /// Get the local offset mapping for a for an object.
426 std::pair<int, int64_t> getLocalFrameObjectMap(int i) const {
427 assert (i >= 0 && (unsigned)i < LocalFrameObjects.size() &&((void)0)
428 "Invalid local object reference!")((void)0);
429 return LocalFrameObjects[i];
430 }
431
432 /// Return the number of objects allocated into the local object block.
433 int64_t getLocalFrameObjectCount() const { return LocalFrameObjects.size(); }
434
435 /// Set the size of the local object blob.
436 void setLocalFrameSize(int64_t sz) { LocalFrameSize = sz; }
437
438 /// Get the size of the local object blob.
439 int64_t getLocalFrameSize() const { return LocalFrameSize; }
440
441 /// Required alignment of the local object blob,
442 /// which is the strictest alignment of any object in it.
443 void setLocalFrameMaxAlign(Align Alignment) {
444 LocalFrameMaxAlign = Alignment;
445 }
446
447 /// Return the required alignment of the local object blob.
448 Align getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; }
449
450 /// Get whether the local allocation blob should be allocated together or
451 /// let PEI allocate the locals in it directly.
452 bool getUseLocalStackAllocationBlock() const {
453 return UseLocalStackAllocationBlock;
454 }
455
456 /// setUseLocalStackAllocationBlock - Set whether the local allocation blob
457 /// should be allocated together or let PEI allocate the locals in it
458 /// directly.
459 void setUseLocalStackAllocationBlock(bool v) {
460 UseLocalStackAllocationBlock = v;
461 }
462
463 /// Return true if the object was pre-allocated into the local block.
464 bool isObjectPreAllocated(int ObjectIdx) const {
465 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
466 "Invalid Object Idx!")((void)0);
467 return Objects[ObjectIdx+NumFixedObjects].PreAllocated;
468 }
469
470 /// Return the size of the specified object.
471 int64_t getObjectSize(int ObjectIdx) const {
472 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
473 "Invalid Object Idx!")((void)0);
474 return Objects[ObjectIdx+NumFixedObjects].Size;
475 }
476
477 /// Change the size of the specified stack object.
478 void setObjectSize(int ObjectIdx, int64_t Size) {
479 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
480 "Invalid Object Idx!")((void)0);
481 Objects[ObjectIdx+NumFixedObjects].Size = Size;
482 }
483
484 /// Return the alignment of the specified stack object.
485 Align getObjectAlign(int ObjectIdx) const {
486 assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&((void)0)
487 "Invalid Object Idx!")((void)0);
488 return Objects[ObjectIdx + NumFixedObjects].Alignment;
30
Value assigned to 'A.ShiftValue'
489 }
490
491 /// setObjectAlignment - Change the alignment of the specified stack object.
492 void setObjectAlignment(int ObjectIdx, Align Alignment) {
493 assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&((void)0)
494 "Invalid Object Idx!")((void)0);
495 Objects[ObjectIdx + NumFixedObjects].Alignment = Alignment;
496
497 // Only ensure max alignment for the default stack.
498 if (getStackID(ObjectIdx) == 0)
499 ensureMaxAlignment(Alignment);
500 }
501
502 /// Return the underlying Alloca of the specified
503 /// stack object if it exists. Returns 0 if none exists.
504 const AllocaInst* getObjectAllocation(int ObjectIdx) const {
505 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
506 "Invalid Object Idx!")((void)0);
507 return Objects[ObjectIdx+NumFixedObjects].Alloca;
508 }
509
510 /// Return the assigned stack offset of the specified object
511 /// from the incoming stack pointer.
512 int64_t getObjectOffset(int ObjectIdx) const {
513 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
514 "Invalid Object Idx!")((void)0);
515 assert(!isDeadObjectIndex(ObjectIdx) &&((void)0)
516 "Getting frame offset for a dead object?")((void)0);
517 return Objects[ObjectIdx+NumFixedObjects].SPOffset;
518 }
519
520 bool isObjectZExt(int ObjectIdx) const {
521 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
522 "Invalid Object Idx!")((void)0);
523 return Objects[ObjectIdx+NumFixedObjects].isZExt;
524 }
525
526 void setObjectZExt(int ObjectIdx, bool IsZExt) {
527 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
528 "Invalid Object Idx!")((void)0);
529 Objects[ObjectIdx+NumFixedObjects].isZExt = IsZExt;
530 }
531
532 bool isObjectSExt(int ObjectIdx) const {
533 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
534 "Invalid Object Idx!")((void)0);
535 return Objects[ObjectIdx+NumFixedObjects].isSExt;
536 }
537
538 void setObjectSExt(int ObjectIdx, bool IsSExt) {
539 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
540 "Invalid Object Idx!")((void)0);
541 Objects[ObjectIdx+NumFixedObjects].isSExt = IsSExt;
542 }
543
544 /// Set the stack frame offset of the specified object. The
545 /// offset is relative to the stack pointer on entry to the function.
546 void setObjectOffset(int ObjectIdx, int64_t SPOffset) {
547 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
548 "Invalid Object Idx!")((void)0);
549 assert(!isDeadObjectIndex(ObjectIdx) &&((void)0)
550 "Setting frame offset for a dead object?")((void)0);
551 Objects[ObjectIdx+NumFixedObjects].SPOffset = SPOffset;
552 }
553
554 SSPLayoutKind getObjectSSPLayout(int ObjectIdx) const {
555 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
556 "Invalid Object Idx!")((void)0);
557 return (SSPLayoutKind)Objects[ObjectIdx+NumFixedObjects].SSPLayout;
558 }
559
560 void setObjectSSPLayout(int ObjectIdx, SSPLayoutKind Kind) {
561 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
562 "Invalid Object Idx!")((void)0);
563 assert(!isDeadObjectIndex(ObjectIdx) &&((void)0)
564 "Setting SSP layout for a dead object?")((void)0);
565 Objects[ObjectIdx+NumFixedObjects].SSPLayout = Kind;
566 }
567
568 /// Return the number of bytes that must be allocated to hold
569 /// all of the fixed size frame objects. This is only valid after
570 /// Prolog/Epilog code insertion has finalized the stack frame layout.
571 uint64_t getStackSize() const { return StackSize; }
572
573 /// Set the size of the stack.
574 void setStackSize(uint64_t Size) { StackSize = Size; }
575
576 /// Estimate and return the size of the stack frame.
577 uint64_t estimateStackSize(const MachineFunction &MF) const;
578
579 /// Return the correction for frame offsets.
580 int getOffsetAdjustment() const { return OffsetAdjustment; }
581
582 /// Set the correction for frame offsets.
583 void setOffsetAdjustment(int Adj) { OffsetAdjustment = Adj; }
584
585 /// Return the alignment in bytes that this function must be aligned to,
586 /// which is greater than the default stack alignment provided by the target.
587 Align getMaxAlign() const { return MaxAlignment; }
588
589 /// Make sure the function is at least Align bytes aligned.
590 void ensureMaxAlignment(Align Alignment);
591
592 /// Return true if this function adjusts the stack -- e.g.,
593 /// when calling another function. This is only valid during and after
594 /// prolog/epilog code insertion.
595 bool adjustsStack() const { return AdjustsStack; }
596 void setAdjustsStack(bool V) { AdjustsStack = V; }
597
598 /// Return true if the current function has any function calls.
599 bool hasCalls() const { return HasCalls; }
600 void setHasCalls(bool V) { HasCalls = V; }
601
602 /// Returns true if the function contains opaque dynamic stack adjustments.
603 bool hasOpaqueSPAdjustment() const { return HasOpaqueSPAdjustment; }
604 void setHasOpaqueSPAdjustment(bool B) { HasOpaqueSPAdjustment = B; }
605
606 /// Returns true if the function contains operations which will lower down to
607 /// instructions which manipulate the stack pointer.
608 bool hasCopyImplyingStackAdjustment() const {
609 return HasCopyImplyingStackAdjustment;
610 }
611 void setHasCopyImplyingStackAdjustment(bool B) {
612 HasCopyImplyingStackAdjustment = B;
613 }
614
615 /// Returns true if the function calls the llvm.va_start intrinsic.
616 bool hasVAStart() const { return HasVAStart; }
617 void setHasVAStart(bool B) { HasVAStart = B; }
618
619 /// Returns true if the function is variadic and contains a musttail call.
620 bool hasMustTailInVarArgFunc() const { return HasMustTailInVarArgFunc; }
621 void setHasMustTailInVarArgFunc(bool B) { HasMustTailInVarArgFunc = B; }
622
623 /// Returns true if the function contains a tail call.
624 bool hasTailCall() const { return HasTailCall; }
625 void setHasTailCall(bool V = true) { HasTailCall = V; }
626
627 /// Computes the maximum size of a callframe and the AdjustsStack property.
628 /// This only works for targets defining
629 /// TargetInstrInfo::getCallFrameSetupOpcode(), getCallFrameDestroyOpcode(),
630 /// and getFrameSize().
631 /// This is usually computed by the prologue epilogue inserter but some
632 /// targets may call this to compute it earlier.
633 void computeMaxCallFrameSize(const MachineFunction &MF);
634
635 /// Return the maximum size of a call frame that must be
636 /// allocated for an outgoing function call. This is only available if
637 /// CallFrameSetup/Destroy pseudo instructions are used by the target, and
638 /// then only during or after prolog/epilog code insertion.
639 ///
640 unsigned getMaxCallFrameSize() const {
641 // TODO: Enable this assert when targets are fixed.
642 //assert(isMaxCallFrameSizeComputed() && "MaxCallFrameSize not computed yet");
643 if (!isMaxCallFrameSizeComputed())
644 return 0;
645 return MaxCallFrameSize;
646 }
647 bool isMaxCallFrameSizeComputed() const {
648 return MaxCallFrameSize != ~0u;
649 }
650 void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
651
652 /// Returns how many bytes of callee-saved registers the target pushed in the
653 /// prologue. Only used for debug info.
654 unsigned getCVBytesOfCalleeSavedRegisters() const {
655 return CVBytesOfCalleeSavedRegisters;
656 }
657 void setCVBytesOfCalleeSavedRegisters(unsigned S) {
658 CVBytesOfCalleeSavedRegisters = S;
659 }
660
661 /// Create a new object at a fixed location on the stack.
662 /// All fixed objects should be created before other objects are created for
663 /// efficiency. By default, fixed objects are not pointed to by LLVM IR
664 /// values. This returns an index with a negative value.
665 int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable,
666 bool isAliased = false);
667
668 /// Create a spill slot at a fixed location on the stack.
669 /// Returns an index with a negative value.
670 int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset,
671 bool IsImmutable = false);
672
673 /// Returns true if the specified index corresponds to a fixed stack object.
674 bool isFixedObjectIndex(int ObjectIdx) const {
675 return ObjectIdx < 0 && (ObjectIdx >= -(int)NumFixedObjects);
676 }
677
678 /// Returns true if the specified index corresponds
679 /// to an object that might be pointed to by an LLVM IR value.
680 bool isAliasedObjectIndex(int ObjectIdx) const {
681 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
682 "Invalid Object Idx!")((void)0);
683 return Objects[ObjectIdx+NumFixedObjects].isAliased;
684 }
685
686 /// Returns true if the specified index corresponds to an immutable object.
687 bool isImmutableObjectIndex(int ObjectIdx) const {
688 // Tail calling functions can clobber their function arguments.
689 if (HasTailCall)
690 return false;
691 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
692 "Invalid Object Idx!")((void)0);
693 return Objects[ObjectIdx+NumFixedObjects].isImmutable;
694 }
695
696 /// Marks the immutability of an object.
697 void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable) {
698 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
699 "Invalid Object Idx!")((void)0);
700 Objects[ObjectIdx+NumFixedObjects].isImmutable = IsImmutable;
701 }
702
703 /// Returns true if the specified index corresponds to a spill slot.
704 bool isSpillSlotObjectIndex(int ObjectIdx) const {
705 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
706 "Invalid Object Idx!")((void)0);
707 return Objects[ObjectIdx+NumFixedObjects].isSpillSlot;
708 }
709
710 bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const {
711 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
712 "Invalid Object Idx!")((void)0);
713 return Objects[ObjectIdx+NumFixedObjects].isStatepointSpillSlot;
714 }
715
716 /// \see StackID
717 uint8_t getStackID(int ObjectIdx) const {
718 return Objects[ObjectIdx+NumFixedObjects].StackID;
719 }
720
721 /// \see StackID
722 void setStackID(int ObjectIdx, uint8_t ID) {
723 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
724 "Invalid Object Idx!")((void)0);
725 Objects[ObjectIdx+NumFixedObjects].StackID = ID;
726 // If ID > 0, MaxAlignment may now be overly conservative.
727 // If ID == 0, MaxAlignment will need to be updated separately.
728 }
729
730 /// Returns true if the specified index corresponds to a dead object.
731 bool isDeadObjectIndex(int ObjectIdx) const {
732 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
733 "Invalid Object Idx!")((void)0);
734 return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL;
735 }
736
737 /// Returns true if the specified index corresponds to a variable sized
738 /// object.
739 bool isVariableSizedObjectIndex(int ObjectIdx) const {
740 assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&((void)0)
741 "Invalid Object Idx!")((void)0);
742 return Objects[ObjectIdx + NumFixedObjects].Size == 0;
743 }
744
745 void markAsStatepointSpillSlotObjectIndex(int ObjectIdx) {
746 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&((void)0)
747 "Invalid Object Idx!")((void)0);
748 Objects[ObjectIdx+NumFixedObjects].isStatepointSpillSlot = true;
749 assert(isStatepointSpillSlotObjectIndex(ObjectIdx) && "inconsistent")((void)0);
750 }
751
752 /// Create a new statically sized stack object, returning
753 /// a nonnegative identifier to represent it.
754 int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot,
755 const AllocaInst *Alloca = nullptr, uint8_t ID = 0);
756
757 /// Create a new statically sized stack object that represents a spill slot,
758 /// returning a nonnegative identifier to represent it.
759 int CreateSpillStackObject(uint64_t Size, Align Alignment);
760
761 /// Remove or mark dead a statically sized stack object.
762 void RemoveStackObject(int ObjectIdx) {
763 // Mark it dead.
764 Objects[ObjectIdx+NumFixedObjects].Size = ~0ULL;
765 }
766
767 /// Notify the MachineFrameInfo object that a variable sized object has been
768 /// created. This must be created whenever a variable sized object is
769 /// created, whether or not the index returned is actually used.
770 int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca);
771
772 /// Returns a reference to call saved info vector for the current function.
773 const std::vector<CalleeSavedInfo> &getCalleeSavedInfo() const {
774 return CSInfo;
775 }
776 /// \copydoc getCalleeSavedInfo()
777 std::vector<CalleeSavedInfo> &getCalleeSavedInfo() { return CSInfo; }
778
779 /// Used by prolog/epilog inserter to set the function's callee saved
780 /// information.
781 void setCalleeSavedInfo(std::vector<CalleeSavedInfo> CSI) {
782 CSInfo = std::move(CSI);
783 }
784
785 /// Has the callee saved info been calculated yet?
786 bool isCalleeSavedInfoValid() const { return CSIValid; }
787
788 void setCalleeSavedInfoValid(bool v) { CSIValid = v; }
789
790 MachineBasicBlock *getSavePoint() const { return Save; }
791 void setSavePoint(MachineBasicBlock *NewSave) { Save = NewSave; }
792 MachineBasicBlock *getRestorePoint() const { return Restore; }
793 void setRestorePoint(MachineBasicBlock *NewRestore) { Restore = NewRestore; }
794
795 /// Return a set of physical registers that are pristine.
796 ///
797 /// Pristine registers hold a value that is useless to the current function,
798 /// but that must be preserved - they are callee saved registers that are not
799 /// saved.
800 ///
801 /// Before the PrologueEpilogueInserter has placed the CSR spill code, this
802 /// method always returns an empty set.
803 BitVector getPristineRegs(const MachineFunction &MF) const;
804
805 /// Used by the MachineFunction printer to print information about
806 /// stack objects. Implemented in MachineFunction.cpp.
807 void print(const MachineFunction &MF, raw_ostream &OS) const;
808
809 /// dump - Print the function to stderr.
810 void dump(const MachineFunction &MF) const;
811};
812
813} // End llvm namespace
814
815#endif

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h

1//===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains types to represent alignments.
10// They are instrumented to guarantee some invariants are preserved and prevent
11// invalid manipulations.
12//
13// - Align represents an alignment in bytes, it is always set and always a valid
14// power of two, its minimum value is 1 which means no alignment requirements.
15//
16// - MaybeAlign is an optional type, it may be undefined or set. When it's set
17// you can get the underlying Align type by using the getValue() method.
18//
19//===----------------------------------------------------------------------===//
20
21#ifndef LLVM_SUPPORT_ALIGNMENT_H_
22#define LLVM_SUPPORT_ALIGNMENT_H_
23
24#include "llvm/ADT/Optional.h"
25#include "llvm/Support/MathExtras.h"
26#include <cassert>
27#ifndef NDEBUG1
28#include <string>
29#endif // NDEBUG
30
31namespace llvm {
32
33#define ALIGN_CHECK_ISPOSITIVE(decl) \
34 assert(decl > 0 && (#decl " should be defined"))((void)0)
35
36/// This struct is a compact representation of a valid (non-zero power of two)
37/// alignment.
38/// It is suitable for use as static global constants.
39struct Align {
40private:
41 uint8_t ShiftValue = 0; /// The log2 of the required alignment.
42 /// ShiftValue is less than 64 by construction.
43
44 friend struct MaybeAlign;
45 friend unsigned Log2(Align);
46 friend bool operator==(Align Lhs, Align Rhs);
47 friend bool operator!=(Align Lhs, Align Rhs);
48 friend bool operator<=(Align Lhs, Align Rhs);
49 friend bool operator>=(Align Lhs, Align Rhs);
50 friend bool operator<(Align Lhs, Align Rhs);
51 friend bool operator>(Align Lhs, Align Rhs);
52 friend unsigned encode(struct MaybeAlign A);
53 friend struct MaybeAlign decodeMaybeAlign(unsigned Value);
54
55 /// A trivial type to allow construction of constexpr Align.
56 /// This is currently needed to workaround a bug in GCC 5.3 which prevents
57 /// definition of constexpr assign operators.
58 /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic
59 /// FIXME: Remove this, make all assign operators constexpr and introduce user
60 /// defined literals when we don't have to support GCC 5.3 anymore.
61 /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain
62 struct LogValue {
63 uint8_t Log;
64 };
65
66public:
67 /// Default is byte-aligned.
68 constexpr Align() = default;
69 /// Do not perform checks in case of copy/move construct/assign, because the
70 /// checks have been performed when building `Other`.
71 constexpr Align(const Align &Other) = default;
72 constexpr Align(Align &&Other) = default;
73 Align &operator=(const Align &Other) = default;
74 Align &operator=(Align &&Other) = default;
75
76 explicit Align(uint64_t Value) {
77 assert(Value > 0 && "Value must not be 0")((void)0);
78 assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2")((void)0);
79 ShiftValue = Log2_64(Value);
80 assert(ShiftValue < 64 && "Broken invariant")((void)0);
81 }
82
83 /// This is a hole in the type system and should not be abused.
84 /// Needed to interact with C for instance.
85 uint64_t value() const { return uint64_t(1) << ShiftValue; }
35
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'
86
87 /// Allow constructions of constexpr Align.
88 template <size_t kValue> constexpr static LogValue Constant() {
89 return LogValue{static_cast<uint8_t>(CTLog2<kValue>())};
90 }
91
92 /// Allow constructions of constexpr Align from types.
93 /// Compile time equivalent to Align(alignof(T)).
94 template <typename T> constexpr static LogValue Of() {
95 return Constant<std::alignment_of<T>::value>();
96 }
97
98 /// Constexpr constructor from LogValue type.
99 constexpr Align(LogValue CA) : ShiftValue(CA.Log) {}
100};
101
102/// Treats the value 0 as a 1, so Align is always at least 1.
103inline Align assumeAligned(uint64_t Value) {
104 return Value ? Align(Value) : Align();
105}
106
107/// This struct is a compact representation of a valid (power of two) or
108/// undefined (0) alignment.
109struct MaybeAlign : public llvm::Optional<Align> {
110private:
111 using UP = llvm::Optional<Align>;
112
113public:
114 /// Default is undefined.
115 MaybeAlign() = default;
116 /// Do not perform checks in case of copy/move construct/assign, because the
117 /// checks have been performed when building `Other`.
118 MaybeAlign(const MaybeAlign &Other) = default;
119 MaybeAlign &operator=(const MaybeAlign &Other) = default;
120 MaybeAlign(MaybeAlign &&Other) = default;
121 MaybeAlign &operator=(MaybeAlign &&Other) = default;
122
123 /// Use llvm::Optional<Align> constructor.
124 using UP::UP;
125
126 explicit MaybeAlign(uint64_t Value) {
127 assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&((void)0)
128 "Alignment is neither 0 nor a power of 2")((void)0);
129 if (Value)
130 emplace(Value);
131 }
132
133 /// For convenience, returns a valid alignment or 1 if undefined.
134 Align valueOrOne() const { return hasValue() ? getValue() : Align(); }
135};
136
137/// Checks that SizeInBytes is a multiple of the alignment.
138inline bool isAligned(Align Lhs, uint64_t SizeInBytes) {
139 return SizeInBytes % Lhs.value() == 0;
140}
141
142/// Checks that Addr is a multiple of the alignment.
143inline bool isAddrAligned(Align Lhs, const void *Addr) {
144 return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr));
145}
146
147/// Returns a multiple of A needed to store `Size` bytes.
148inline uint64_t alignTo(uint64_t Size, Align A) {
149 const uint64_t Value = A.value();
150 // The following line is equivalent to `(Size + Value - 1) / Value * Value`.
151
152 // The division followed by a multiplication can be thought of as a right
153 // shift followed by a left shift which zeros out the extra bits produced in
154 // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out
155 // are just zero.
156
157 // Most compilers can generate this code but the pattern may be missed when
158 // multiple functions gets inlined.
159 return (Size + Value - 1) & ~(Value - 1U);
160}
161
162/// If non-zero \p Skew is specified, the return value will be a minimal integer
163/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
164/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
165/// Skew mod \p A'.
166///
167/// Examples:
168/// \code
169/// alignTo(5, Align(8), 7) = 7
170/// alignTo(17, Align(8), 1) = 17
171/// alignTo(~0LL, Align(8), 3) = 3
172/// \endcode
173inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) {
174 const uint64_t Value = A.value();
175 Skew %= Value;
176 return ((Size + Value - 1 - Skew) & ~(Value - 1U)) + Skew;
177}
178
179/// Returns a multiple of A needed to store `Size` bytes.
180/// Returns `Size` if current alignment is undefined.
181inline uint64_t alignTo(uint64_t Size, MaybeAlign A) {
182 return A ? alignTo(Size, A.getValue()) : Size;
183}
184
185/// Aligns `Addr` to `Alignment` bytes, rounding up.
186inline uintptr_t alignAddr(const void *Addr, Align Alignment) {
187 uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr);
188 assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=((void)0)
189 ArithAddr &&((void)0)
190 "Overflow")((void)0);
191 return alignTo(ArithAddr, Alignment);
192}
193
194/// Returns the offset to the next integer (mod 2**64) that is greater than
195/// or equal to \p Value and is a multiple of \p Align.
196inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
197 return alignTo(Value, Alignment) - Value;
198}
199
200/// Returns the necessary adjustment for aligning `Addr` to `Alignment`
201/// bytes, rounding up.
202inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) {
203 return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment);
204}
205
206/// Returns the log2 of the alignment.
207inline unsigned Log2(Align A) { return A.ShiftValue; }
208
209/// Returns the alignment that satisfies both alignments.
210/// Same semantic as MinAlign.
211inline Align commonAlignment(Align A, Align B) { return std::min(A, B); }
212
213/// Returns the alignment that satisfies both alignments.
214/// Same semantic as MinAlign.
215inline Align commonAlignment(Align A, uint64_t Offset) {
216 return Align(MinAlign(A.value(), Offset));
217}
218
219/// Returns the alignment that satisfies both alignments.
220/// Same semantic as MinAlign.
221inline MaybeAlign commonAlignment(MaybeAlign A, MaybeAlign B) {
222 return A && B ? commonAlignment(*A, *B) : A ? A : B;
223}
224
225/// Returns the alignment that satisfies both alignments.
226/// Same semantic as MinAlign.
227inline MaybeAlign commonAlignment(MaybeAlign A, uint64_t Offset) {
228 return MaybeAlign(MinAlign((*A).value(), Offset));
229}
230
231/// Returns a representation of the alignment that encodes undefined as 0.
232inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; }
233
234/// Dual operation of the encode function above.
235inline MaybeAlign decodeMaybeAlign(unsigned Value) {
236 if (Value == 0)
237 return MaybeAlign();
238 Align Out;
239 Out.ShiftValue = Value - 1;
240 return Out;
241}
242
243/// Returns a representation of the alignment, the encoded value is positive by
244/// definition.
245inline unsigned encode(Align A) { return encode(MaybeAlign(A)); }
246
247/// Comparisons between Align and scalars. Rhs must be positive.
248inline bool operator==(Align Lhs, uint64_t Rhs) {
249 ALIGN_CHECK_ISPOSITIVE(Rhs);
250 return Lhs.value() == Rhs;
251}
252inline bool operator!=(Align Lhs, uint64_t Rhs) {
253 ALIGN_CHECK_ISPOSITIVE(Rhs);
254 return Lhs.value() != Rhs;
255}
256inline bool operator<=(Align Lhs, uint64_t Rhs) {
257 ALIGN_CHECK_ISPOSITIVE(Rhs);
258 return Lhs.value() <= Rhs;
259}
260inline bool operator>=(Align Lhs, uint64_t Rhs) {
261 ALIGN_CHECK_ISPOSITIVE(Rhs);
262 return Lhs.value() >= Rhs;
263}
264inline bool operator<(Align Lhs, uint64_t Rhs) {
265 ALIGN_CHECK_ISPOSITIVE(Rhs);
266 return Lhs.value() < Rhs;
267}
268inline bool operator>(Align Lhs, uint64_t Rhs) {
269 ALIGN_CHECK_ISPOSITIVE(Rhs);
270 return Lhs.value() > Rhs;
271}
272
273/// Comparisons between MaybeAlign and scalars.
274inline bool operator==(MaybeAlign Lhs, uint64_t Rhs) {
275 return Lhs ? (*Lhs).value() == Rhs : Rhs == 0;
276}
277inline bool operator!=(MaybeAlign Lhs, uint64_t Rhs) {
278 return Lhs ? (*Lhs).value() != Rhs : Rhs != 0;
279}
280
281/// Comparisons operators between Align.
282inline bool operator==(Align Lhs, Align Rhs) {
283 return Lhs.ShiftValue == Rhs.ShiftValue;
284}
285inline bool operator!=(Align Lhs, Align Rhs) {
286 return Lhs.ShiftValue != Rhs.ShiftValue;
287}
288inline bool operator<=(Align Lhs, Align Rhs) {
289 return Lhs.ShiftValue <= Rhs.ShiftValue;
290}
291inline bool operator>=(Align Lhs, Align Rhs) {
292 return Lhs.ShiftValue >= Rhs.ShiftValue;
293}
294inline bool operator<(Align Lhs, Align Rhs) {
295 return Lhs.ShiftValue < Rhs.ShiftValue;
296}
297inline bool operator>(Align Lhs, Align Rhs) {
298 return Lhs.ShiftValue > Rhs.ShiftValue;
299}
300
301// Don't allow relational comparisons with MaybeAlign.
302bool operator<=(Align Lhs, MaybeAlign Rhs) = delete;
303bool operator>=(Align Lhs, MaybeAlign Rhs) = delete;
304bool operator<(Align Lhs, MaybeAlign Rhs) = delete;
305bool operator>(Align Lhs, MaybeAlign Rhs) = delete;
306
307bool operator<=(MaybeAlign Lhs, Align Rhs) = delete;
308bool operator>=(MaybeAlign Lhs, Align Rhs) = delete;
309bool operator<(MaybeAlign Lhs, Align Rhs) = delete;
310bool operator>(MaybeAlign Lhs, Align Rhs) = delete;
311
312bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
313bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
314bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
315bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
316
317inline Align operator*(Align Lhs, uint64_t Rhs) {
318 assert(Rhs > 0 && "Rhs must be positive")((void)0);
319 return Align(Lhs.value() * Rhs);
320}
321
322inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) {
323 assert(Rhs > 0 && "Rhs must be positive")((void)0);
324 return Lhs ? Lhs.getValue() * Rhs : MaybeAlign();
325}
326
327inline Align operator/(Align Lhs, uint64_t Divisor) {
328 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
329 "Divisor must be positive and a power of 2")((void)0);
330 assert(Lhs != 1 && "Can't halve byte alignment")((void)0);
331 return Align(Lhs.value() / Divisor);
332}
333
334inline MaybeAlign operator/(MaybeAlign Lhs, uint64_t Divisor) {
335 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
336 "Divisor must be positive and a power of 2")((void)0);
337 return Lhs ? Lhs.getValue() / Divisor : MaybeAlign();
338}
339
340inline Align max(MaybeAlign Lhs, Align Rhs) {
341 return Lhs && *Lhs > Rhs ? *Lhs : Rhs;
342}
343
344inline Align max(Align Lhs, MaybeAlign Rhs) {
345 return Rhs && *Rhs > Lhs ? *Rhs : Lhs;
346}
347
348#ifndef NDEBUG1
349// For usage in LLVM_DEBUG macros.
350inline std::string DebugStr(const Align &A) {
351 return std::to_string(A.value());
352}
353// For usage in LLVM_DEBUG macros.
354inline std::string DebugStr(const MaybeAlign &MA) {
355 if (MA)
356 return std::to_string(MA->value());
357 return "None";
358}
359#endif // NDEBUG
360
361#undef ALIGN_CHECK_ISPOSITIVE
362
363} // namespace llvm
364
365#endif // LLVM_SUPPORT_ALIGNMENT_H_