Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h
Warning:line 85, column 47
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MachineFunction.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -D PIC -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -D_RET_PROTECTOR -ret-protector -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/MachineFunction.cpp

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/MachineFunction.cpp

1//===- MachineFunction.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Collect native machine code information for a function. This allows
10// target-specific information about the generated code to be stored with each
11// function.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/CodeGen/MachineFunction.h"
16#include "llvm/ADT/BitVector.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallString.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/Analysis/ConstantFolding.h"
25#include "llvm/Analysis/EHPersonalities.h"
26#include "llvm/CodeGen/MachineBasicBlock.h"
27#include "llvm/CodeGen/MachineConstantPool.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineInstr.h"
30#include "llvm/CodeGen/MachineJumpTableInfo.h"
31#include "llvm/CodeGen/MachineMemOperand.h"
32#include "llvm/CodeGen/MachineModuleInfo.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/CodeGen/PseudoSourceValue.h"
35#include "llvm/CodeGen/TargetFrameLowering.h"
36#include "llvm/CodeGen/TargetInstrInfo.h"
37#include "llvm/CodeGen/TargetLowering.h"
38#include "llvm/CodeGen/TargetRegisterInfo.h"
39#include "llvm/CodeGen/TargetSubtargetInfo.h"
40#include "llvm/CodeGen/WasmEHFuncInfo.h"
41#include "llvm/CodeGen/WinEHFuncInfo.h"
42#include "llvm/Config/llvm-config.h"
43#include "llvm/IR/Attributes.h"
44#include "llvm/IR/BasicBlock.h"
45#include "llvm/IR/Constant.h"
46#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/DebugInfoMetadata.h"
48#include "llvm/IR/DerivedTypes.h"
49#include "llvm/IR/Function.h"
50#include "llvm/IR/GlobalValue.h"
51#include "llvm/IR/Instruction.h"
52#include "llvm/IR/Instructions.h"
53#include "llvm/IR/Metadata.h"
54#include "llvm/IR/Module.h"
55#include "llvm/IR/ModuleSlotTracker.h"
56#include "llvm/IR/Value.h"
57#include "llvm/MC/MCContext.h"
58#include "llvm/MC/MCSymbol.h"
59#include "llvm/MC/SectionKind.h"
60#include "llvm/Support/Casting.h"
61#include "llvm/Support/CommandLine.h"
62#include "llvm/Support/Compiler.h"
63#include "llvm/Support/DOTGraphTraits.h"
64#include "llvm/Support/Debug.h"
65#include "llvm/Support/ErrorHandling.h"
66#include "llvm/Support/GraphWriter.h"
67#include "llvm/Support/raw_ostream.h"
68#include "llvm/Target/TargetMachine.h"
69#include <algorithm>
70#include <cassert>
71#include <cstddef>
72#include <cstdint>
73#include <iterator>
74#include <string>
75#include <type_traits>
76#include <utility>
77#include <vector>
78
79using namespace llvm;
80
81#define DEBUG_TYPE"codegen" "codegen"
82
83static cl::opt<unsigned> AlignAllFunctions(
84 "align-all-functions",
85 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
86 "means align on 16B boundaries)."),
87 cl::init(0), cl::Hidden);
88
89static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
90 using P = MachineFunctionProperties::Property;
91
92 switch(Prop) {
93 case P::FailedISel: return "FailedISel";
94 case P::IsSSA: return "IsSSA";
95 case P::Legalized: return "Legalized";
96 case P::NoPHIs: return "NoPHIs";
97 case P::NoVRegs: return "NoVRegs";
98 case P::RegBankSelected: return "RegBankSelected";
99 case P::Selected: return "Selected";
100 case P::TracksLiveness: return "TracksLiveness";
101 case P::TiedOpsRewritten: return "TiedOpsRewritten";
102 }
103 llvm_unreachable("Invalid machine function property")__builtin_unreachable();
104}
105
106// Pin the vtable to this file.
107void MachineFunction::Delegate::anchor() {}
108
109void MachineFunctionProperties::print(raw_ostream &OS) const {
110 const char *Separator = "";
111 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
112 if (!Properties[I])
113 continue;
114 OS << Separator << getPropertyName(static_cast<Property>(I));
115 Separator = ", ";
116 }
117}
118
119//===----------------------------------------------------------------------===//
120// MachineFunction implementation
121//===----------------------------------------------------------------------===//
122
123// Out-of-line virtual method.
124MachineFunctionInfo::~MachineFunctionInfo() = default;
125
126void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
127 MBB->getParent()->DeleteMachineBasicBlock(MBB);
128}
129
130static inline unsigned getFnStackAlignment(const TargetSubtargetInfo *STI,
131 const Function &F) {
132 if (F.hasFnAttribute(Attribute::StackAlignment))
133 return F.getFnStackAlignment();
134 return STI->getFrameLowering()->getStackAlign().value();
135}
136
137MachineFunction::MachineFunction(Function &F, const LLVMTargetMachine &Target,
138 const TargetSubtargetInfo &STI,
139 unsigned FunctionNum, MachineModuleInfo &mmi)
140 : F(F), Target(Target), STI(&STI), Ctx(mmi.getContext()), MMI(mmi) {
141 FunctionNumber = FunctionNum;
142 init();
143}
144
145void MachineFunction::handleInsertion(MachineInstr &MI) {
146 if (TheDelegate)
147 TheDelegate->MF_HandleInsertion(MI);
148}
149
150void MachineFunction::handleRemoval(MachineInstr &MI) {
151 if (TheDelegate)
152 TheDelegate->MF_HandleRemoval(MI);
153}
154
155void MachineFunction::init() {
156 // Assume the function starts in SSA form with correct liveness.
157 Properties.set(MachineFunctionProperties::Property::IsSSA);
158 Properties.set(MachineFunctionProperties::Property::TracksLiveness);
159 if (STI->getRegisterInfo())
160 RegInfo = new (Allocator) MachineRegisterInfo(this);
161 else
162 RegInfo = nullptr;
163
164 MFInfo = nullptr;
165 // We can realign the stack if the target supports it and the user hasn't
166 // explicitly asked us not to.
167 bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() &&
168 !F.hasFnAttribute("no-realign-stack");
169 FrameInfo = new (Allocator) MachineFrameInfo(
170 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
171 /*ForcedRealign=*/CanRealignSP &&
172 F.hasFnAttribute(Attribute::StackAlignment));
173
174 if (F.hasFnAttribute(Attribute::StackAlignment))
175 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign());
176
177 ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
178 Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
179
180 // FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
181 // FIXME: Use Function::hasOptSize().
182 if (!F.hasFnAttribute(Attribute::OptimizeForSize))
183 Alignment = std::max(Alignment,
184 STI->getTargetLowering()->getPrefFunctionAlignment());
185
186 if (AlignAllFunctions)
187 Alignment = Align(1ULL << AlignAllFunctions);
188
189 JumpTableInfo = nullptr;
190
191 if (isFuncletEHPersonality(classifyEHPersonality(
192 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
193 WinEHInfo = new (Allocator) WinEHFuncInfo();
194 }
195
196 if (isScopedEHPersonality(classifyEHPersonality(
197 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
198 WasmEHInfo = new (Allocator) WasmEHFuncInfo();
199 }
200
201 assert(Target.isCompatibleDataLayout(getDataLayout()) &&((void)0)
202 "Can't create a MachineFunction using a Module with a "((void)0)
203 "Target-incompatible DataLayout attached\n")((void)0);
204
205 PSVManager =
206 std::make_unique<PseudoSourceValueManager>(*(getSubtarget().
207 getInstrInfo()));
208}
209
210MachineFunction::~MachineFunction() {
211 clear();
212}
213
214void MachineFunction::clear() {
215 Properties.reset();
216 // Don't call destructors on MachineInstr and MachineOperand. All of their
217 // memory comes from the BumpPtrAllocator which is about to be purged.
218 //
219 // Do call MachineBasicBlock destructors, it contains std::vectors.
220 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
221 I->Insts.clearAndLeakNodesUnsafely();
222 MBBNumbering.clear();
223
224 InstructionRecycler.clear(Allocator);
225 OperandRecycler.clear(Allocator);
226 BasicBlockRecycler.clear(Allocator);
227 CodeViewAnnotations.clear();
228 VariableDbgInfos.clear();
229 if (RegInfo) {
230 RegInfo->~MachineRegisterInfo();
231 Allocator.Deallocate(RegInfo);
232 }
233 if (MFInfo) {
234 MFInfo->~MachineFunctionInfo();
235 Allocator.Deallocate(MFInfo);
236 }
237
238 FrameInfo->~MachineFrameInfo();
239 Allocator.Deallocate(FrameInfo);
240
241 ConstantPool->~MachineConstantPool();
242 Allocator.Deallocate(ConstantPool);
243
244 if (JumpTableInfo) {
245 JumpTableInfo->~MachineJumpTableInfo();
246 Allocator.Deallocate(JumpTableInfo);
247 }
248
249 if (WinEHInfo) {
250 WinEHInfo->~WinEHFuncInfo();
251 Allocator.Deallocate(WinEHInfo);
252 }
253
254 if (WasmEHInfo) {
255 WasmEHInfo->~WasmEHFuncInfo();
256 Allocator.Deallocate(WasmEHInfo);
257 }
258}
259
260const DataLayout &MachineFunction::getDataLayout() const {
261 return F.getParent()->getDataLayout();
262}
263
264/// Get the JumpTableInfo for this function.
265/// If it does not already exist, allocate one.
266MachineJumpTableInfo *MachineFunction::
267getOrCreateJumpTableInfo(unsigned EntryKind) {
268 if (JumpTableInfo) return JumpTableInfo;
269
270 JumpTableInfo = new (Allocator)
271 MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
272 return JumpTableInfo;
273}
274
275DenormalMode MachineFunction::getDenormalMode(const fltSemantics &FPType) const {
276 return F.getDenormalMode(FPType);
277}
278
279/// Should we be emitting segmented stack stuff for the function
280bool MachineFunction::shouldSplitStack() const {
281 return getFunction().hasFnAttribute("split-stack");
282}
283
284LLVM_NODISCARD[[clang::warn_unused_result]] unsigned
285MachineFunction::addFrameInst(const MCCFIInstruction &Inst) {
286 FrameInstructions.push_back(Inst);
287 return FrameInstructions.size() - 1;
288}
289
290/// This discards all of the MachineBasicBlock numbers and recomputes them.
291/// This guarantees that the MBB numbers are sequential, dense, and match the
292/// ordering of the blocks within the function. If a specific MachineBasicBlock
293/// is specified, only that block and those after it are renumbered.
294void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
295 if (empty()) { MBBNumbering.clear(); return; }
296 MachineFunction::iterator MBBI, E = end();
297 if (MBB == nullptr)
298 MBBI = begin();
299 else
300 MBBI = MBB->getIterator();
301
302 // Figure out the block number this should have.
303 unsigned BlockNo = 0;
304 if (MBBI != begin())
305 BlockNo = std::prev(MBBI)->getNumber() + 1;
306
307 for (; MBBI != E; ++MBBI, ++BlockNo) {
308 if (MBBI->getNumber() != (int)BlockNo) {
309 // Remove use of the old number.
310 if (MBBI->getNumber() != -1) {
311 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&((void)0)
312 "MBB number mismatch!")((void)0);
313 MBBNumbering[MBBI->getNumber()] = nullptr;
314 }
315
316 // If BlockNo is already taken, set that block's number to -1.
317 if (MBBNumbering[BlockNo])
318 MBBNumbering[BlockNo]->setNumber(-1);
319
320 MBBNumbering[BlockNo] = &*MBBI;
321 MBBI->setNumber(BlockNo);
322 }
323 }
324
325 // Okay, all the blocks are renumbered. If we have compactified the block
326 // numbering, shrink MBBNumbering now.
327 assert(BlockNo <= MBBNumbering.size() && "Mismatch!")((void)0);
328 MBBNumbering.resize(BlockNo);
329}
330
331/// This method iterates over the basic blocks and assigns their IsBeginSection
332/// and IsEndSection fields. This must be called after MBB layout is finalized
333/// and the SectionID's are assigned to MBBs.
334void MachineFunction::assignBeginEndSections() {
335 front().setIsBeginSection();
336 auto CurrentSectionID = front().getSectionID();
337 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) {
338 if (MBBI->getSectionID() == CurrentSectionID)
339 continue;
340 MBBI->setIsBeginSection();
341 std::prev(MBBI)->setIsEndSection();
342 CurrentSectionID = MBBI->getSectionID();
343 }
344 back().setIsEndSection();
345}
346
347/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
348MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
349 const DebugLoc &DL,
350 bool NoImplicit) {
351 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
352 MachineInstr(*this, MCID, DL, NoImplicit);
353}
354
355/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
356/// identical in all ways except the instruction has no parent, prev, or next.
357MachineInstr *
358MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
359 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
360 MachineInstr(*this, *Orig);
361}
362
363MachineInstr &MachineFunction::CloneMachineInstrBundle(MachineBasicBlock &MBB,
364 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) {
365 MachineInstr *FirstClone = nullptr;
366 MachineBasicBlock::const_instr_iterator I = Orig.getIterator();
367 while (true) {
368 MachineInstr *Cloned = CloneMachineInstr(&*I);
369 MBB.insert(InsertBefore, Cloned);
370 if (FirstClone == nullptr) {
371 FirstClone = Cloned;
372 } else {
373 Cloned->bundleWithPred();
374 }
375
376 if (!I->isBundledWithSucc())
377 break;
378 ++I;
379 }
380 // Copy over call site info to the cloned instruction if needed. If Orig is in
381 // a bundle, copyCallSiteInfo takes care of finding the call instruction in
382 // the bundle.
383 if (Orig.shouldUpdateCallSiteInfo())
384 copyCallSiteInfo(&Orig, FirstClone);
385 return *FirstClone;
386}
387
388/// Delete the given MachineInstr.
389///
390/// This function also serves as the MachineInstr destructor - the real
391/// ~MachineInstr() destructor must be empty.
392void
393MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
394 // Verify that a call site info is at valid state. This assertion should
395 // be triggered during the implementation of support for the
396 // call site info of a new architecture. If the assertion is triggered,
397 // back trace will tell where to insert a call to updateCallSiteInfo().
398 assert((!MI->isCandidateForCallSiteEntry() ||((void)0)
399 CallSitesInfo.find(MI) == CallSitesInfo.end()) &&((void)0)
400 "Call site info was not updated!")((void)0);
401 // Strip it for parts. The operand array and the MI object itself are
402 // independently recyclable.
403 if (MI->Operands)
404 deallocateOperandArray(MI->CapOperands, MI->Operands);
405 // Don't call ~MachineInstr() which must be trivial anyway because
406 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
407 // destructors.
408 InstructionRecycler.Deallocate(Allocator, MI);
409}
410
411/// Allocate a new MachineBasicBlock. Use this instead of
412/// `new MachineBasicBlock'.
413MachineBasicBlock *
414MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
415 return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
416 MachineBasicBlock(*this, bb);
417}
418
419/// Delete the given MachineBasicBlock.
420void
421MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
422 assert(MBB->getParent() == this && "MBB parent mismatch!")((void)0);
423 // Clean up any references to MBB in jump tables before deleting it.
424 if (JumpTableInfo)
425 JumpTableInfo->RemoveMBBFromJumpTables(MBB);
426 MBB->~MachineBasicBlock();
427 BasicBlockRecycler.Deallocate(Allocator, MBB);
428}
429
430MachineMemOperand *MachineFunction::getMachineMemOperand(
431 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
432 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
433 SyncScope::ID SSID, AtomicOrdering Ordering,
434 AtomicOrdering FailureOrdering) {
435 return new (Allocator)
436 MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges,
437 SSID, Ordering, FailureOrdering);
438}
439
440MachineMemOperand *MachineFunction::getMachineMemOperand(
441 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy,
442 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
443 SyncScope::ID SSID, AtomicOrdering Ordering,
444 AtomicOrdering FailureOrdering) {
445 return new (Allocator)
446 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID,
447 Ordering, FailureOrdering);
448}
449
450MachineMemOperand *MachineFunction::getMachineMemOperand(
451 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, uint64_t Size) {
452 return new (Allocator)
453 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(),
454 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
455 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
456}
457
458MachineMemOperand *MachineFunction::getMachineMemOperand(
459 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) {
460 return new (Allocator)
461 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(),
462 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
463 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
464}
465
466MachineMemOperand *
467MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
468 int64_t Offset, LLT Ty) {
469 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
470
471 // If there is no pointer value, the offset isn't tracked so we need to adjust
472 // the base alignment.
473 Align Alignment = PtrInfo.V.isNull()
474 ? commonAlignment(MMO->getBaseAlign(), Offset)
475 : MMO->getBaseAlign();
476
477 // Do not preserve ranges, since we don't necessarily know what the high bits
478 // are anymore.
479 return new (Allocator) MachineMemOperand(
480 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment,
481 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(),
482 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
483}
484
485MachineMemOperand *
486MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
487 const AAMDNodes &AAInfo) {
488 MachinePointerInfo MPI = MMO->getValue() ?
489 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
490 MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset());
491
492 return new (Allocator) MachineMemOperand(
493 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo,
494 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(),
495 MMO->getFailureOrdering());
496}
497
498MachineMemOperand *
499MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
500 MachineMemOperand::Flags Flags) {
501 return new (Allocator) MachineMemOperand(
1
Calling 'operator new<llvm::MallocAllocator, 4096UL, 4096UL, 128UL>'
502 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(),
503 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
504 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
505}
506
507MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo(
508 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
509 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker) {
510 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
511 PostInstrSymbol, HeapAllocMarker);
512}
513
514const char *MachineFunction::createExternalSymbolName(StringRef Name) {
515 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
516 llvm::copy(Name, Dest);
517 Dest[Name.size()] = 0;
518 return Dest;
519}
520
521uint32_t *MachineFunction::allocateRegMask() {
522 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
523 unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
524 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
525 memset(Mask, 0, Size * sizeof(Mask[0]));
526 return Mask;
527}
528
529ArrayRef<int> MachineFunction::allocateShuffleMask(ArrayRef<int> Mask) {
530 int* AllocMask = Allocator.Allocate<int>(Mask.size());
531 copy(Mask, AllocMask);
532 return {AllocMask, Mask.size()};
533}
534
535#if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP)
536LLVM_DUMP_METHOD__attribute__((noinline)) void MachineFunction::dump() const {
537 print(dbgs());
538}
539#endif
540
541StringRef MachineFunction::getName() const {
542 return getFunction().getName();
543}
544
545void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
546 OS << "# Machine code for function " << getName() << ": ";
547 getProperties().print(OS);
548 OS << '\n';
549
550 // Print Frame Information
551 FrameInfo->print(*this, OS);
552
553 // Print JumpTable Information
554 if (JumpTableInfo)
555 JumpTableInfo->print(OS);
556
557 // Print Constant Pool
558 ConstantPool->print(OS);
559
560 const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
561
562 if (RegInfo && !RegInfo->livein_empty()) {
563 OS << "Function Live Ins: ";
564 for (MachineRegisterInfo::livein_iterator
565 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
566 OS << printReg(I->first, TRI);
567 if (I->second)
568 OS << " in " << printReg(I->second, TRI);
569 if (std::next(I) != E)
570 OS << ", ";
571 }
572 OS << '\n';
573 }
574
575 ModuleSlotTracker MST(getFunction().getParent());
576 MST.incorporateFunction(getFunction());
577 for (const auto &BB : *this) {
578 OS << '\n';
579 // If we print the whole function, print it at its most verbose level.
580 BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
581 }
582
583 OS << "\n# End machine code for function " << getName() << ".\n\n";
584}
585
586/// True if this function needs frame moves for debug or exceptions.
587bool MachineFunction::needsFrameMoves() const {
588 return getMMI().hasDebugInfo() ||
589 getTarget().Options.ForceDwarfFrameSection ||
590 F.needsUnwindTableEntry();
591}
592
593namespace llvm {
594
595 template<>
596 struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
597 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
598
599 static std::string getGraphName(const MachineFunction *F) {
600 return ("CFG for '" + F->getName() + "' function").str();
601 }
602
603 std::string getNodeLabel(const MachineBasicBlock *Node,
604 const MachineFunction *Graph) {
605 std::string OutStr;
606 {
607 raw_string_ostream OSS(OutStr);
608
609 if (isSimple()) {
610 OSS << printMBBReference(*Node);
611 if (const BasicBlock *BB = Node->getBasicBlock())
612 OSS << ": " << BB->getName();
613 } else
614 Node->print(OSS);
615 }
616
617 if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
618
619 // Process string output to make it nicer...
620 for (unsigned i = 0; i != OutStr.length(); ++i)
621 if (OutStr[i] == '\n') { // Left justify
622 OutStr[i] = '\\';
623 OutStr.insert(OutStr.begin()+i+1, 'l');
624 }
625 return OutStr;
626 }
627 };
628
629} // end namespace llvm
630
631void MachineFunction::viewCFG() const
632{
633#ifndef NDEBUG1
634 ViewGraph(this, "mf" + getName());
635#else
636 errs() << "MachineFunction::viewCFG is only available in debug builds on "
637 << "systems with Graphviz or gv!\n";
638#endif // NDEBUG
639}
640
641void MachineFunction::viewCFGOnly() const
642{
643#ifndef NDEBUG1
644 ViewGraph(this, "mf" + getName(), true);
645#else
646 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
647 << "systems with Graphviz or gv!\n";
648#endif // NDEBUG
649}
650
651/// Add the specified physical register as a live-in value and
652/// create a corresponding virtual register for it.
653Register MachineFunction::addLiveIn(MCRegister PReg,
654 const TargetRegisterClass *RC) {
655 MachineRegisterInfo &MRI = getRegInfo();
656 Register VReg = MRI.getLiveInVirtReg(PReg);
657 if (VReg) {
658 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
659 (void)VRegRC;
660 // A physical register can be added several times.
661 // Between two calls, the register class of the related virtual register
662 // may have been constrained to match some operation constraints.
663 // In that case, check that the current register class includes the
664 // physical register and is a sub class of the specified RC.
665 assert((VRegRC == RC || (VRegRC->contains(PReg) &&((void)0)
666 RC->hasSubClassEq(VRegRC))) &&((void)0)
667 "Register class mismatch!")((void)0);
668 return VReg;
669 }
670 VReg = MRI.createVirtualRegister(RC);
671 MRI.addLiveIn(PReg, VReg);
672 return VReg;
673}
674
675/// Return the MCSymbol for the specified non-empty jump table.
676/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
677/// normal 'L' label is returned.
678MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
679 bool isLinkerPrivate) const {
680 const DataLayout &DL = getDataLayout();
681 assert(JumpTableInfo && "No jump tables")((void)0);
682 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!")((void)0);
683
684 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
685 : DL.getPrivateGlobalPrefix();
686 SmallString<60> Name;
687 raw_svector_ostream(Name)
688 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
689 return Ctx.getOrCreateSymbol(Name);
690}
691
692/// Return a function-local symbol to represent the PIC base.
693MCSymbol *MachineFunction::getPICBaseSymbol() const {
694 const DataLayout &DL = getDataLayout();
695 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
696 Twine(getFunctionNumber()) + "$pb");
697}
698
699/// \name Exception Handling
700/// \{
701
702LandingPadInfo &
703MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) {
704 unsigned N = LandingPads.size();
705 for (unsigned i = 0; i < N; ++i) {
706 LandingPadInfo &LP = LandingPads[i];
707 if (LP.LandingPadBlock == LandingPad)
708 return LP;
709 }
710
711 LandingPads.push_back(LandingPadInfo(LandingPad));
712 return LandingPads[N];
713}
714
715void MachineFunction::addInvoke(MachineBasicBlock *LandingPad,
716 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
717 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
718 LP.BeginLabels.push_back(BeginLabel);
719 LP.EndLabels.push_back(EndLabel);
720}
721
722MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) {
723 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
724 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
725 LP.LandingPadLabel = LandingPadLabel;
726
727 const Instruction *FirstI = LandingPad->getBasicBlock()->getFirstNonPHI();
728 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
729 if (const auto *PF =
730 dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()))
731 getMMI().addPersonality(PF);
732
733 if (LPI->isCleanup())
734 addCleanup(LandingPad);
735
736 // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
737 // correct, but we need to do it this way because of how the DWARF EH
738 // emitter processes the clauses.
739 for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
740 Value *Val = LPI->getClause(I - 1);
741 if (LPI->isCatch(I - 1)) {
742 addCatchTypeInfo(LandingPad,
743 dyn_cast<GlobalValue>(Val->stripPointerCasts()));
744 } else {
745 // Add filters in a list.
746 auto *CVal = cast<Constant>(Val);
747 SmallVector<const GlobalValue *, 4> FilterList;
748 for (User::op_iterator II = CVal->op_begin(), IE = CVal->op_end();
749 II != IE; ++II)
750 FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts()));
751
752 addFilterTypeInfo(LandingPad, FilterList);
753 }
754 }
755
756 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
757 for (unsigned I = CPI->getNumArgOperands(); I != 0; --I) {
758 Value *TypeInfo = CPI->getArgOperand(I - 1)->stripPointerCasts();
759 addCatchTypeInfo(LandingPad, dyn_cast<GlobalValue>(TypeInfo));
760 }
761
762 } else {
763 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!")((void)0);
764 }
765
766 return LandingPadLabel;
767}
768
769void MachineFunction::addCatchTypeInfo(MachineBasicBlock *LandingPad,
770 ArrayRef<const GlobalValue *> TyInfo) {
771 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
772 for (unsigned N = TyInfo.size(); N; --N)
773 LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1]));
774}
775
776void MachineFunction::addFilterTypeInfo(MachineBasicBlock *LandingPad,
777 ArrayRef<const GlobalValue *> TyInfo) {
778 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
779 std::vector<unsigned> IdsInFilter(TyInfo.size());
780 for (unsigned I = 0, E = TyInfo.size(); I != E; ++I)
781 IdsInFilter[I] = getTypeIDFor(TyInfo[I]);
782 LP.TypeIds.push_back(getFilterIDFor(IdsInFilter));
783}
784
785void MachineFunction::tidyLandingPads(DenseMap<MCSymbol *, uintptr_t> *LPMap,
786 bool TidyIfNoBeginLabels) {
787 for (unsigned i = 0; i != LandingPads.size(); ) {
788 LandingPadInfo &LandingPad = LandingPads[i];
789 if (LandingPad.LandingPadLabel &&
790 !LandingPad.LandingPadLabel->isDefined() &&
791 (!LPMap || (*LPMap)[LandingPad.LandingPadLabel] == 0))
792 LandingPad.LandingPadLabel = nullptr;
793
794 // Special case: we *should* emit LPs with null LP MBB. This indicates
795 // "nounwind" case.
796 if (!LandingPad.LandingPadLabel && LandingPad.LandingPadBlock) {
797 LandingPads.erase(LandingPads.begin() + i);
798 continue;
799 }
800
801 if (TidyIfNoBeginLabels) {
802 for (unsigned j = 0, e = LandingPads[i].BeginLabels.size(); j != e; ++j) {
803 MCSymbol *BeginLabel = LandingPad.BeginLabels[j];
804 MCSymbol *EndLabel = LandingPad.EndLabels[j];
805 if ((BeginLabel->isDefined() || (LPMap && (*LPMap)[BeginLabel] != 0)) &&
806 (EndLabel->isDefined() || (LPMap && (*LPMap)[EndLabel] != 0)))
807 continue;
808
809 LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
810 LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
811 --j;
812 --e;
813 }
814
815 // Remove landing pads with no try-ranges.
816 if (LandingPads[i].BeginLabels.empty()) {
817 LandingPads.erase(LandingPads.begin() + i);
818 continue;
819 }
820 }
821
822 // If there is no landing pad, ensure that the list of typeids is empty.
823 // If the only typeid is a cleanup, this is the same as having no typeids.
824 if (!LandingPad.LandingPadBlock ||
825 (LandingPad.TypeIds.size() == 1 && !LandingPad.TypeIds[0]))
826 LandingPad.TypeIds.clear();
827 ++i;
828 }
829}
830
831void MachineFunction::addCleanup(MachineBasicBlock *LandingPad) {
832 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
833 LP.TypeIds.push_back(0);
834}
835
836void MachineFunction::addSEHCatchHandler(MachineBasicBlock *LandingPad,
837 const Function *Filter,
838 const BlockAddress *RecoverBA) {
839 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
840 SEHHandler Handler;
841 Handler.FilterOrFinally = Filter;
842 Handler.RecoverBA = RecoverBA;
843 LP.SEHHandlers.push_back(Handler);
844}
845
846void MachineFunction::addSEHCleanupHandler(MachineBasicBlock *LandingPad,
847 const Function *Cleanup) {
848 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
849 SEHHandler Handler;
850 Handler.FilterOrFinally = Cleanup;
851 Handler.RecoverBA = nullptr;
852 LP.SEHHandlers.push_back(Handler);
853}
854
855void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym,
856 ArrayRef<unsigned> Sites) {
857 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
858}
859
860unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) {
861 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
862 if (TypeInfos[i] == TI) return i + 1;
863
864 TypeInfos.push_back(TI);
865 return TypeInfos.size();
866}
867
868int MachineFunction::getFilterIDFor(std::vector<unsigned> &TyIds) {
869 // If the new filter coincides with the tail of an existing filter, then
870 // re-use the existing filter. Folding filters more than this requires
871 // re-ordering filters and/or their elements - probably not worth it.
872 for (unsigned i : FilterEnds) {
873 unsigned j = TyIds.size();
874
875 while (i && j)
876 if (FilterIds[--i] != TyIds[--j])
877 goto try_next;
878
879 if (!j)
880 // The new filter coincides with range [i, end) of the existing filter.
881 return -(1 + i);
882
883try_next:;
884 }
885
886 // Add the new filter.
887 int FilterID = -(1 + FilterIds.size());
888 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
889 llvm::append_range(FilterIds, TyIds);
890 FilterEnds.push_back(FilterIds.size());
891 FilterIds.push_back(0); // terminator
892 return FilterID;
893}
894
895MachineFunction::CallSiteInfoMap::iterator
896MachineFunction::getCallSiteInfo(const MachineInstr *MI) {
897 assert(MI->isCandidateForCallSiteEntry() &&((void)0)
898 "Call site info refers only to call (MI) candidates")((void)0);
899
900 if (!Target.Options.EmitCallSiteInfo)
901 return CallSitesInfo.end();
902 return CallSitesInfo.find(MI);
903}
904
905/// Return the call machine instruction or find a call within bundle.
906static const MachineInstr *getCallInstr(const MachineInstr *MI) {
907 if (!MI->isBundle())
908 return MI;
909
910 for (auto &BMI : make_range(getBundleStart(MI->getIterator()),
911 getBundleEnd(MI->getIterator())))
912 if (BMI.isCandidateForCallSiteEntry())
913 return &BMI;
914
915 llvm_unreachable("Unexpected bundle without a call site candidate")__builtin_unreachable();
916}
917
918void MachineFunction::eraseCallSiteInfo(const MachineInstr *MI) {
919 assert(MI->shouldUpdateCallSiteInfo() &&((void)0)
920 "Call site info refers only to call (MI) candidates or "((void)0)
921 "candidates inside bundles")((void)0);
922
923 const MachineInstr *CallMI = getCallInstr(MI);
924 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI);
925 if (CSIt == CallSitesInfo.end())
926 return;
927 CallSitesInfo.erase(CSIt);
928}
929
930void MachineFunction::copyCallSiteInfo(const MachineInstr *Old,
931 const MachineInstr *New) {
932 assert(Old->shouldUpdateCallSiteInfo() &&((void)0)
933 "Call site info refers only to call (MI) candidates or "((void)0)
934 "candidates inside bundles")((void)0);
935
936 if (!New->isCandidateForCallSiteEntry())
937 return eraseCallSiteInfo(Old);
938
939 const MachineInstr *OldCallMI = getCallInstr(Old);
940 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
941 if (CSIt == CallSitesInfo.end())
942 return;
943
944 CallSiteInfo CSInfo = CSIt->second;
945 CallSitesInfo[New] = CSInfo;
946}
947
948void MachineFunction::moveCallSiteInfo(const MachineInstr *Old,
949 const MachineInstr *New) {
950 assert(Old->shouldUpdateCallSiteInfo() &&((void)0)
951 "Call site info refers only to call (MI) candidates or "((void)0)
952 "candidates inside bundles")((void)0);
953
954 if (!New->isCandidateForCallSiteEntry())
955 return eraseCallSiteInfo(Old);
956
957 const MachineInstr *OldCallMI = getCallInstr(Old);
958 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
959 if (CSIt == CallSitesInfo.end())
960 return;
961
962 CallSiteInfo CSInfo = std::move(CSIt->second);
963 CallSitesInfo.erase(CSIt);
964 CallSitesInfo[New] = CSInfo;
965}
966
967void MachineFunction::setDebugInstrNumberingCount(unsigned Num) {
968 DebugInstrNumberingCount = Num;
969}
970
971void MachineFunction::makeDebugValueSubstitution(DebugInstrOperandPair A,
972 DebugInstrOperandPair B,
973 unsigned Subreg) {
974 // Catch any accidental self-loops.
975 assert(A.first != B.first)((void)0);
976 DebugValueSubstitutions.push_back({A, B, Subreg});
977}
978
979void MachineFunction::substituteDebugValuesForInst(const MachineInstr &Old,
980 MachineInstr &New,
981 unsigned MaxOperand) {
982 // If the Old instruction wasn't tracked at all, there is no work to do.
983 unsigned OldInstrNum = Old.peekDebugInstrNum();
984 if (!OldInstrNum)
985 return;
986
987 // Iterate over all operands looking for defs to create substitutions for.
988 // Avoid creating new instr numbers unless we create a new substitution.
989 // While this has no functional effect, it risks confusing someone reading
990 // MIR output.
991 // Examine all the operands, or the first N specified by the caller.
992 MaxOperand = std::min(MaxOperand, Old.getNumOperands());
993 for (unsigned int I = 0; I < MaxOperand; ++I) {
994 const auto &OldMO = Old.getOperand(I);
995 auto &NewMO = New.getOperand(I);
996 (void)NewMO;
997
998 if (!OldMO.isReg() || !OldMO.isDef())
999 continue;
1000 assert(NewMO.isDef())((void)0);
1001
1002 unsigned NewInstrNum = New.getDebugInstrNum();
1003 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I),
1004 std::make_pair(NewInstrNum, I));
1005 }
1006}
1007
1008auto MachineFunction::salvageCopySSA(MachineInstr &MI)
1009 -> DebugInstrOperandPair {
1010 MachineRegisterInfo &MRI = getRegInfo();
1011 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
1012 const TargetInstrInfo &TII = *getSubtarget().getInstrInfo();
1013
1014 // Chase the value read by a copy-like instruction back to the instruction
1015 // that ultimately _defines_ that value. This may pass:
1016 // * Through multiple intermediate copies, including subregister moves /
1017 // copies,
1018 // * Copies from physical registers that must then be traced back to the
1019 // defining instruction,
1020 // * Or, physical registers may be live-in to (only) the entry block, which
1021 // requires a DBG_PHI to be created.
1022 // We can pursue this problem in that order: trace back through copies,
1023 // optionally through a physical register, to a defining instruction. We
1024 // should never move from physreg to vreg. As we're still in SSA form, no need
1025 // to worry about partial definitions of registers.
1026
1027 // Helper lambda to interpret a copy-like instruction. Takes instruction,
1028 // returns the register read and any subregister identifying which part is
1029 // read.
1030 auto GetRegAndSubreg =
1031 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> {
1032 Register NewReg, OldReg;
1033 unsigned SubReg;
1034 if (Cpy.isCopy()) {
1035 OldReg = Cpy.getOperand(0).getReg();
1036 NewReg = Cpy.getOperand(1).getReg();
1037 SubReg = Cpy.getOperand(1).getSubReg();
1038 } else if (Cpy.isSubregToReg()) {
1039 OldReg = Cpy.getOperand(0).getReg();
1040 NewReg = Cpy.getOperand(2).getReg();
1041 SubReg = Cpy.getOperand(3).getImm();
1042 } else {
1043 auto CopyDetails = *TII.isCopyInstr(Cpy);
1044 const MachineOperand &Src = *CopyDetails.Source;
1045 const MachineOperand &Dest = *CopyDetails.Destination;
1046 OldReg = Dest.getReg();
1047 NewReg = Src.getReg();
1048 SubReg = Src.getSubReg();
1049 }
1050
1051 return {NewReg, SubReg};
1052 };
1053
1054 // First seek either the defining instruction, or a copy from a physreg.
1055 // During search, the current state is the current copy instruction, and which
1056 // register we've read. Accumulate qualifying subregisters into SubregsSeen;
1057 // deal with those later.
1058 auto State = GetRegAndSubreg(MI);
1059 auto CurInst = MI.getIterator();
1060 SmallVector<unsigned, 4> SubregsSeen;
1061 while (true) {
1062 // If we've found a copy from a physreg, first portion of search is over.
1063 if (!State.first.isVirtual())
1064 break;
1065
1066 // Record any subregister qualifier.
1067 if (State.second)
1068 SubregsSeen.push_back(State.second);
1069
1070 assert(MRI.hasOneDef(State.first))((void)0);
1071 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent();
1072 CurInst = Inst.getIterator();
1073
1074 // Any non-copy instruction is the defining instruction we're seeking.
1075 if (!Inst.isCopyLike() && !TII.isCopyInstr(Inst))
1076 break;
1077 State = GetRegAndSubreg(Inst);
1078 };
1079
1080 // Helper lambda to apply additional subregister substitutions to a known
1081 // instruction/operand pair. Adds new (fake) substitutions so that we can
1082 // record the subregister. FIXME: this isn't very space efficient if multiple
1083 // values are tracked back through the same copies; cache something later.
1084 auto ApplySubregisters =
1085 [&](DebugInstrOperandPair P) -> DebugInstrOperandPair {
1086 for (unsigned Subreg : reverse(SubregsSeen)) {
1087 // Fetch a new instruction number, not attached to an actual instruction.
1088 unsigned NewInstrNumber = getNewDebugInstrNum();
1089 // Add a substitution from the "new" number to the known one, with a
1090 // qualifying subreg.
1091 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg);
1092 // Return the new number; to find the underlying value, consumers need to
1093 // deal with the qualifying subreg.
1094 P = {NewInstrNumber, 0};
1095 }
1096 return P;
1097 };
1098
1099 // If we managed to find the defining instruction after COPYs, return an
1100 // instruction / operand pair after adding subregister qualifiers.
1101 if (State.first.isVirtual()) {
1102 // Virtual register def -- we can just look up where this happens.
1103 MachineInstr *Inst = MRI.def_begin(State.first)->getParent();
1104 for (auto &MO : Inst->operands()) {
1105 if (!MO.isReg() || !MO.isDef() || MO.getReg() != State.first)
1106 continue;
1107 return ApplySubregisters(
1108 {Inst->getDebugInstrNum(), Inst->getOperandNo(&MO)});
1109 }
1110
1111 llvm_unreachable("Vreg def with no corresponding operand?")__builtin_unreachable();
1112 }
1113
1114 // Our search ended in a copy from a physreg: walk back up the function
1115 // looking for whatever defines the physreg.
1116 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst))((void)0);
1117 State = GetRegAndSubreg(*CurInst);
1118 Register RegToSeek = State.first;
1119
1120 auto RMII = CurInst->getReverseIterator();
1121 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend());
1122 for (auto &ToExamine : PrevInstrs) {
1123 for (auto &MO : ToExamine.operands()) {
1124 // Test for operand that defines something aliasing RegToSeek.
1125 if (!MO.isReg() || !MO.isDef() ||
1126 !TRI.regsOverlap(RegToSeek, MO.getReg()))
1127 continue;
1128
1129 return ApplySubregisters(
1130 {ToExamine.getDebugInstrNum(), ToExamine.getOperandNo(&MO)});
1131 }
1132 }
1133
1134 MachineBasicBlock &InsertBB = *CurInst->getParent();
1135
1136 // We reached the start of the block before finding a defining instruction.
1137 // It could be from a constant register, otherwise it must be an argument.
1138 if (TRI.isConstantPhysReg(State.first)) {
1139 // We can produce a DBG_PHI that identifies the constant physreg. Doesn't
1140 // matter where we put it, as it's constant valued.
1141 assert(CurInst->isCopy())((void)0);
1142 } else if (State.first == TRI.getFrameRegister(*this)) {
1143 // LLVM IR is allowed to read the framepointer by calling a
1144 // llvm.frameaddress.* intrinsic. We can support this by emitting a
1145 // DBG_PHI $fp. This isn't ideal, because it extends the behaviours /
1146 // position that DBG_PHIs appear at, limiting what can be done later.
1147 // TODO: see if there's a better way of expressing these variable
1148 // locations.
1149 ;
1150 } else {
1151 // Assert that this is the entry block. If it isn't, then there is some
1152 // code construct we don't recognise that deals with physregs across
1153 // blocks.
1154 assert(!State.first.isVirtual())((void)0);
1155 assert(&*InsertBB.getParent()->begin() == &InsertBB)((void)0);
1156 }
1157
1158 // Create DBG_PHI for specified physreg.
1159 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(),
1160 TII.get(TargetOpcode::DBG_PHI));
1161 Builder.addReg(State.first, RegState::Debug);
1162 unsigned NewNum = getNewDebugInstrNum();
1163 Builder.addImm(NewNum);
1164 return ApplySubregisters({NewNum, 0u});
1165}
1166
1167void MachineFunction::finalizeDebugInstrRefs() {
1168 auto *TII = getSubtarget().getInstrInfo();
1169
1170 auto MakeDbgValue = [&](MachineInstr &MI) {
1171 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE);
1172 MI.setDesc(RefII);
1173 MI.getOperand(1).ChangeToRegister(0, false);
1174 MI.getOperand(0).setIsDebug();
1175 };
1176
1177 if (!getTarget().Options.ValueTrackingVariableLocations)
1178 return;
1179
1180 for (auto &MBB : *this) {
1181 for (auto &MI : MBB) {
1182 if (!MI.isDebugRef() || !MI.getOperand(0).isReg())
1183 continue;
1184
1185 Register Reg = MI.getOperand(0).getReg();
1186
1187 // Some vregs can be deleted as redundant in the meantime. Mark those
1188 // as DBG_VALUE $noreg.
1189 if (Reg == 0) {
1190 MakeDbgValue(MI);
1191 continue;
1192 }
1193
1194 assert(Reg.isVirtual())((void)0);
1195 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg);
1196 assert(RegInfo->hasOneDef(Reg))((void)0);
1197
1198 // If we've found a copy-like instruction, follow it back to the
1199 // instruction that defines the source value, see salvageCopySSA docs
1200 // for why this is important.
1201 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) {
1202 auto Result = salvageCopySSA(DefMI);
1203 MI.getOperand(0).ChangeToImmediate(Result.first);
1204 MI.getOperand(1).setImm(Result.second);
1205 } else {
1206 // Otherwise, identify the operand number that the VReg refers to.
1207 unsigned OperandIdx = 0;
1208 for (const auto &MO : DefMI.operands()) {
1209 if (MO.isReg() && MO.isDef() && MO.getReg() == Reg)
1210 break;
1211 ++OperandIdx;
1212 }
1213 assert(OperandIdx < DefMI.getNumOperands())((void)0);
1214
1215 // Morph this instr ref to point at the given instruction and operand.
1216 unsigned ID = DefMI.getDebugInstrNum();
1217 MI.getOperand(0).ChangeToImmediate(ID);
1218 MI.getOperand(1).setImm(OperandIdx);
1219 }
1220 }
1221 }
1222}
1223
1224/// \}
1225
1226//===----------------------------------------------------------------------===//
1227// MachineJumpTableInfo implementation
1228//===----------------------------------------------------------------------===//
1229
1230/// Return the size of each entry in the jump table.
1231unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
1232 // The size of a jump table entry is 4 bytes unless the entry is just the
1233 // address of a block, in which case it is the pointer size.
1234 switch (getEntryKind()) {
1235 case MachineJumpTableInfo::EK_BlockAddress:
1236 return TD.getPointerSize();
1237 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
1238 return 8;
1239 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
1240 case MachineJumpTableInfo::EK_LabelDifference32:
1241 case MachineJumpTableInfo::EK_Custom32:
1242 return 4;
1243 case MachineJumpTableInfo::EK_Inline:
1244 return 0;
1245 }
1246 llvm_unreachable("Unknown jump table encoding!")__builtin_unreachable();
1247}
1248
1249/// Return the alignment of each entry in the jump table.
1250unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
1251 // The alignment of a jump table entry is the alignment of int32 unless the
1252 // entry is just the address of a block, in which case it is the pointer
1253 // alignment.
1254 switch (getEntryKind()) {
1255 case MachineJumpTableInfo::EK_BlockAddress:
1256 return TD.getPointerABIAlignment(0).value();
1257 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
1258 return TD.getABIIntegerTypeAlignment(64).value();
1259 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
1260 case MachineJumpTableInfo::EK_LabelDifference32:
1261 case MachineJumpTableInfo::EK_Custom32:
1262 return TD.getABIIntegerTypeAlignment(32).value();
1263 case MachineJumpTableInfo::EK_Inline:
1264 return 1;
1265 }
1266 llvm_unreachable("Unknown jump table encoding!")__builtin_unreachable();
1267}
1268
1269/// Create a new jump table entry in the jump table info.
1270unsigned MachineJumpTableInfo::createJumpTableIndex(
1271 const std::vector<MachineBasicBlock*> &DestBBs) {
1272 assert(!DestBBs.empty() && "Cannot create an empty jump table!")((void)0);
1273 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
1274 return JumpTables.size()-1;
1275}
1276
1277/// If Old is the target of any jump tables, update the jump tables to branch
1278/// to New instead.
1279bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
1280 MachineBasicBlock *New) {
1281 assert(Old != New && "Not making a change?")((void)0);
1282 bool MadeChange = false;
1283 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
1284 ReplaceMBBInJumpTable(i, Old, New);
1285 return MadeChange;
1286}
1287
1288/// If MBB is present in any jump tables, remove it.
1289bool MachineJumpTableInfo::RemoveMBBFromJumpTables(MachineBasicBlock *MBB) {
1290 bool MadeChange = false;
1291 for (MachineJumpTableEntry &JTE : JumpTables) {
1292 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB);
1293 MadeChange |= (removeBeginItr != JTE.MBBs.end());
1294 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end());
1295 }
1296 return MadeChange;
1297}
1298
1299/// If Old is a target of the jump tables, update the jump table to branch to
1300/// New instead.
1301bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
1302 MachineBasicBlock *Old,
1303 MachineBasicBlock *New) {
1304 assert(Old != New && "Not making a change?")((void)0);
1305 bool MadeChange = false;
1306 MachineJumpTableEntry &JTE = JumpTables[Idx];
1307 for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
1308 if (JTE.MBBs[j] == Old) {
1309 JTE.MBBs[j] = New;
1310 MadeChange = true;
1311 }
1312 return MadeChange;
1313}
1314
1315void MachineJumpTableInfo::print(raw_ostream &OS) const {
1316 if (JumpTables.empty()) return;
1317
1318 OS << "Jump Tables:\n";
1319
1320 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
1321 OS << printJumpTableEntryReference(i) << ':';
1322 for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
1323 OS << ' ' << printMBBReference(*JumpTables[i].MBBs[j]);
1324 if (i != e)
1325 OS << '\n';
1326 }
1327
1328 OS << '\n';
1329}
1330
1331#if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP)
1332LLVM_DUMP_METHOD__attribute__((noinline)) void MachineJumpTableInfo::dump() const { print(dbgs()); }
1333#endif
1334
1335Printable llvm::printJumpTableEntryReference(unsigned Idx) {
1336 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
1337}
1338
1339//===----------------------------------------------------------------------===//
1340// MachineConstantPool implementation
1341//===----------------------------------------------------------------------===//
1342
1343void MachineConstantPoolValue::anchor() {}
1344
1345unsigned MachineConstantPoolValue::getSizeInBytes(const DataLayout &DL) const {
1346 return DL.getTypeAllocSize(Ty);
1347}
1348
1349unsigned MachineConstantPoolEntry::getSizeInBytes(const DataLayout &DL) const {
1350 if (isMachineConstantPoolEntry())
1351 return Val.MachineCPVal->getSizeInBytes(DL);
1352 return DL.getTypeAllocSize(Val.ConstVal->getType());
1353}
1354
1355bool MachineConstantPoolEntry::needsRelocation() const {
1356 if (isMachineConstantPoolEntry())
1357 return true;
1358 return Val.ConstVal->needsDynamicRelocation();
1359}
1360
1361SectionKind
1362MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
1363 if (needsRelocation())
1364 return SectionKind::getReadOnlyWithRel();
1365 switch (getSizeInBytes(*DL)) {
1366 case 4:
1367 return SectionKind::getMergeableConst4();
1368 case 8:
1369 return SectionKind::getMergeableConst8();
1370 case 16:
1371 return SectionKind::getMergeableConst16();
1372 case 32:
1373 return SectionKind::getMergeableConst32();
1374 default:
1375 return SectionKind::getReadOnly();
1376 }
1377}
1378
1379MachineConstantPool::~MachineConstantPool() {
1380 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
1381 // so keep track of which we've deleted to avoid double deletions.
1382 DenseSet<MachineConstantPoolValue*> Deleted;
1383 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1384 if (Constants[i].isMachineConstantPoolEntry()) {
1385 Deleted.insert(Constants[i].Val.MachineCPVal);
1386 delete Constants[i].Val.MachineCPVal;
1387 }
1388 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) {
1389 if (Deleted.count(CPV) == 0)
1390 delete CPV;
1391 }
1392}
1393
1394/// Test whether the given two constants can be allocated the same constant pool
1395/// entry.
1396static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
1397 const DataLayout &DL) {
1398 // Handle the trivial case quickly.
1399 if (A == B) return true;
1400
1401 // If they have the same type but weren't the same constant, quickly
1402 // reject them.
1403 if (A->getType() == B->getType()) return false;
1404
1405 // We can't handle structs or arrays.
1406 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
1407 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
1408 return false;
1409
1410 // For now, only support constants with the same size.
1411 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
1412 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
1413 return false;
1414
1415 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
1416
1417 // Try constant folding a bitcast of both instructions to an integer. If we
1418 // get two identical ConstantInt's, then we are good to share them. We use
1419 // the constant folding APIs to do this so that we get the benefit of
1420 // DataLayout.
1421 if (isa<PointerType>(A->getType()))
1422 A = ConstantFoldCastOperand(Instruction::PtrToInt,
1423 const_cast<Constant *>(A), IntTy, DL);
1424 else if (A->getType() != IntTy)
1425 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1426 IntTy, DL);
1427 if (isa<PointerType>(B->getType()))
1428 B = ConstantFoldCastOperand(Instruction::PtrToInt,
1429 const_cast<Constant *>(B), IntTy, DL);
1430 else if (B->getType() != IntTy)
1431 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1432 IntTy, DL);
1433
1434 return A == B;
1435}
1436
1437/// Create a new entry in the constant pool or return an existing one.
1438/// User must specify the log2 of the minimum required alignment for the object.
1439unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
1440 Align Alignment) {
1441 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1442
1443 // Check to see if we already have this constant.
1444 //
1445 // FIXME, this could be made much more efficient for large constant pools.
1446 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1447 if (!Constants[i].isMachineConstantPoolEntry() &&
1448 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1449 if (Constants[i].getAlign() < Alignment)
1450 Constants[i].Alignment = Alignment;
1451 return i;
1452 }
1453
1454 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1455 return Constants.size()-1;
1456}
1457
1458unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
1459 Align Alignment) {
1460 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1461
1462 // Check to see if we already have this constant.
1463 //
1464 // FIXME, this could be made much more efficient for large constant pools.
1465 int Idx = V->getExistingMachineCPValue(this, Alignment);
1466 if (Idx != -1) {
1467 MachineCPVsSharingEntries.insert(V);
1468 return (unsigned)Idx;
1469 }
1470
1471 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1472 return Constants.size()-1;
1473}
1474
1475void MachineConstantPool::print(raw_ostream &OS) const {
1476 if (Constants.empty()) return;
1477
1478 OS << "Constant Pool:\n";
1479 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1480 OS << " cp#" << i << ": ";
1481 if (Constants[i].isMachineConstantPoolEntry())
1482 Constants[i].Val.MachineCPVal->print(OS);
1483 else
1484 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1485 OS << ", align=" << Constants[i].getAlign().value();
1486 OS << "\n";
1487 }
1488}
1489
1490#if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP)
1491LLVM_DUMP_METHOD__attribute__((noinline)) void MachineConstantPool::dump() const { print(dbgs()); }
1492#endif

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Allocator.h

1//===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms
11/// to the LLVM "Allocator" concept and is similar to MallocAllocator, but
12/// objects cannot be deallocated. Their lifetime is tied to the lifetime of the
13/// allocator.
14///
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_SUPPORT_ALLOCATOR_H
18#define LLVM_SUPPORT_ALLOCATOR_H
19
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/Support/Alignment.h"
23#include "llvm/Support/AllocatorBase.h"
24#include "llvm/Support/Compiler.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/MemAlloc.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstdint>
32#include <cstdlib>
33#include <iterator>
34#include <type_traits>
35#include <utility>
36
37namespace llvm {
38
39namespace detail {
40
41// We call out to an external function to actually print the message as the
42// printing code uses Allocator.h in its implementation.
43void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
44 size_t TotalMemory);
45
46} // end namespace detail
47
48/// Allocate memory in an ever growing pool, as if by bump-pointer.
49///
50/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
51/// memory rather than relying on a boundless contiguous heap. However, it has
52/// bump-pointer semantics in that it is a monotonically growing pool of memory
53/// where every allocation is found by merely allocating the next N bytes in
54/// the slab, or the next N bytes in the next slab.
55///
56/// Note that this also has a threshold for forcing allocations above a certain
57/// size into their own slab.
58///
59/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
60/// object, which wraps malloc, to allocate memory, but it can be changed to
61/// use a custom allocator.
62///
63/// The GrowthDelay specifies after how many allocated slabs the allocator
64/// increases the size of the slabs.
65template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
66 size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128>
67class BumpPtrAllocatorImpl
68 : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
69 SizeThreshold, GrowthDelay>>,
70 private AllocatorT {
71public:
72 static_assert(SizeThreshold <= SlabSize,
73 "The SizeThreshold must be at most the SlabSize to ensure "
74 "that objects larger than a slab go into their own memory "
75 "allocation.");
76 static_assert(GrowthDelay > 0,
77 "GrowthDelay must be at least 1 which already increases the"
78 "slab size after each allocated slab.");
79
80 BumpPtrAllocatorImpl() = default;
81
82 template <typename T>
83 BumpPtrAllocatorImpl(T &&Allocator)
84 : AllocatorT(std::forward<T &&>(Allocator)) {}
85
86 // Manually implement a move constructor as we must clear the old allocator's
87 // slabs as a matter of correctness.
88 BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
89 : AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr),
90 End(Old.End), Slabs(std::move(Old.Slabs)),
91 CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
92 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
93 Old.CurPtr = Old.End = nullptr;
94 Old.BytesAllocated = 0;
95 Old.Slabs.clear();
96 Old.CustomSizedSlabs.clear();
97 }
98
99 ~BumpPtrAllocatorImpl() {
100 DeallocateSlabs(Slabs.begin(), Slabs.end());
101 DeallocateCustomSizedSlabs();
102 }
103
104 BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
105 DeallocateSlabs(Slabs.begin(), Slabs.end());
106 DeallocateCustomSizedSlabs();
107
108 CurPtr = RHS.CurPtr;
109 End = RHS.End;
110 BytesAllocated = RHS.BytesAllocated;
111 RedZoneSize = RHS.RedZoneSize;
112 Slabs = std::move(RHS.Slabs);
113 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
114 AllocatorT::operator=(static_cast<AllocatorT &&>(RHS));
115
116 RHS.CurPtr = RHS.End = nullptr;
117 RHS.BytesAllocated = 0;
118 RHS.Slabs.clear();
119 RHS.CustomSizedSlabs.clear();
120 return *this;
121 }
122
123 /// Deallocate all but the current slab and reset the current pointer
124 /// to the beginning of it, freeing all memory allocated so far.
125 void Reset() {
126 // Deallocate all but the first slab, and deallocate all custom-sized slabs.
127 DeallocateCustomSizedSlabs();
128 CustomSizedSlabs.clear();
129
130 if (Slabs.empty())
131 return;
132
133 // Reset the state.
134 BytesAllocated = 0;
135 CurPtr = (char *)Slabs.front();
136 End = CurPtr + SlabSize;
137
138 __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
139 DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
140 Slabs.erase(std::next(Slabs.begin()), Slabs.end());
141 }
142
143 /// Allocate space at the specified alignment.
144 LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void *
145 Allocate(size_t Size, Align Alignment) {
146 // Keep track of how many bytes we've allocated.
147 BytesAllocated += Size;
148
149 size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment);
4
Calling 'offsetToAlignedAddr'
150 assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow")((void)0);
151
152 size_t SizeToAllocate = Size;
153#if LLVM_ADDRESS_SANITIZER_BUILD0
154 // Add trailing bytes as a "red zone" under ASan.
155 SizeToAllocate += RedZoneSize;
156#endif
157
158 // Check if we have enough space.
159 if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
160 char *AlignedPtr = CurPtr + Adjustment;
161 CurPtr = AlignedPtr + SizeToAllocate;
162 // Update the allocation point of this memory block in MemorySanitizer.
163 // Without this, MemorySanitizer messages for values originated from here
164 // will point to the allocation of the entire slab.
165 __msan_allocated_memory(AlignedPtr, Size);
166 // Similarly, tell ASan about this space.
167 __asan_unpoison_memory_region(AlignedPtr, Size);
168 return AlignedPtr;
169 }
170
171 // If Size is really big, allocate a separate slab for it.
172 size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
173 if (PaddedSize > SizeThreshold) {
174 void *NewSlab =
175 AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t));
176 // We own the new slab and don't want anyone reading anyting other than
177 // pieces returned from this method. So poison the whole slab.
178 __asan_poison_memory_region(NewSlab, PaddedSize);
179 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
180
181 uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
182 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize)((void)0);
183 char *AlignedPtr = (char*)AlignedAddr;
184 __msan_allocated_memory(AlignedPtr, Size);
185 __asan_unpoison_memory_region(AlignedPtr, Size);
186 return AlignedPtr;
187 }
188
189 // Otherwise, start a new slab and try again.
190 StartNewSlab();
191 uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
192 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&((void)0)
193 "Unable to allocate memory!")((void)0);
194 char *AlignedPtr = (char*)AlignedAddr;
195 CurPtr = AlignedPtr + SizeToAllocate;
196 __msan_allocated_memory(AlignedPtr, Size);
197 __asan_unpoison_memory_region(AlignedPtr, Size);
198 return AlignedPtr;
199 }
200
201 inline LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void *
202 Allocate(size_t Size, size_t Alignment) {
203 assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.")((void)0);
204 return Allocate(Size, Align(Alignment));
3
Calling 'BumpPtrAllocatorImpl::Allocate'
205 }
206
207 // Pull in base class overloads.
208 using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
209
210 // Bump pointer allocators are expected to never free their storage; and
211 // clients expect pointers to remain valid for non-dereferencing uses even
212 // after deallocation.
213 void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) {
214 __asan_poison_memory_region(Ptr, Size);
215 }
216
217 // Pull in base class overloads.
218 using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
219
220 size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
221
222 /// \return An index uniquely and reproducibly identifying
223 /// an input pointer \p Ptr in the given allocator.
224 /// The returned value is negative iff the object is inside a custom-size
225 /// slab.
226 /// Returns an empty optional if the pointer is not found in the allocator.
227 llvm::Optional<int64_t> identifyObject(const void *Ptr) {
228 const char *P = static_cast<const char *>(Ptr);
229 int64_t InSlabIdx = 0;
230 for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) {
231 const char *S = static_cast<const char *>(Slabs[Idx]);
232 if (P >= S && P < S + computeSlabSize(Idx))
233 return InSlabIdx + static_cast<int64_t>(P - S);
234 InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx));
235 }
236
237 // Use negative index to denote custom sized slabs.
238 int64_t InCustomSizedSlabIdx = -1;
239 for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) {
240 const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first);
241 size_t Size = CustomSizedSlabs[Idx].second;
242 if (P >= S && P < S + Size)
243 return InCustomSizedSlabIdx - static_cast<int64_t>(P - S);
244 InCustomSizedSlabIdx -= static_cast<int64_t>(Size);
245 }
246 return None;
247 }
248
249 /// A wrapper around identifyObject that additionally asserts that
250 /// the object is indeed within the allocator.
251 /// \return An index uniquely and reproducibly identifying
252 /// an input pointer \p Ptr in the given allocator.
253 int64_t identifyKnownObject(const void *Ptr) {
254 Optional<int64_t> Out = identifyObject(Ptr);
255 assert(Out && "Wrong allocator used")((void)0);
256 return *Out;
257 }
258
259 /// A wrapper around identifyKnownObject. Accepts type information
260 /// about the object and produces a smaller identifier by relying on
261 /// the alignment information. Note that sub-classes may have different
262 /// alignment, so the most base class should be passed as template parameter
263 /// in order to obtain correct results. For that reason automatic template
264 /// parameter deduction is disabled.
265 /// \return An index uniquely and reproducibly identifying
266 /// an input pointer \p Ptr in the given allocator. This identifier is
267 /// different from the ones produced by identifyObject and
268 /// identifyAlignedObject.
269 template <typename T>
270 int64_t identifyKnownAlignedObject(const void *Ptr) {
271 int64_t Out = identifyKnownObject(Ptr);
272 assert(Out % alignof(T) == 0 && "Wrong alignment information")((void)0);
273 return Out / alignof(T);
274 }
275
276 size_t getTotalMemory() const {
277 size_t TotalMemory = 0;
278 for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
279 TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
280 for (auto &PtrAndSize : CustomSizedSlabs)
281 TotalMemory += PtrAndSize.second;
282 return TotalMemory;
283 }
284
285 size_t getBytesAllocated() const { return BytesAllocated; }
286
287 void setRedZoneSize(size_t NewSize) {
288 RedZoneSize = NewSize;
289 }
290
291 void PrintStats() const {
292 detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
293 getTotalMemory());
294 }
295
296private:
297 /// The current pointer into the current slab.
298 ///
299 /// This points to the next free byte in the slab.
300 char *CurPtr = nullptr;
301
302 /// The end of the current slab.
303 char *End = nullptr;
304
305 /// The slabs allocated so far.
306 SmallVector<void *, 4> Slabs;
307
308 /// Custom-sized slabs allocated for too-large allocation requests.
309 SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
310
311 /// How many bytes we've allocated.
312 ///
313 /// Used so that we can compute how much space was wasted.
314 size_t BytesAllocated = 0;
315
316 /// The number of bytes to put between allocations when running under
317 /// a sanitizer.
318 size_t RedZoneSize = 1;
319
320 static size_t computeSlabSize(unsigned SlabIdx) {
321 // Scale the actual allocated slab size based on the number of slabs
322 // allocated. Every GrowthDelay slabs allocated, we double
323 // the allocated size to reduce allocation frequency, but saturate at
324 // multiplying the slab size by 2^30.
325 return SlabSize *
326 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
327 }
328
329 /// Allocate a new slab and move the bump pointers over into the new
330 /// slab, modifying CurPtr and End.
331 void StartNewSlab() {
332 size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
333
334 void *NewSlab =
335 AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t));
336 // We own the new slab and don't want anyone reading anything other than
337 // pieces returned from this method. So poison the whole slab.
338 __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
339
340 Slabs.push_back(NewSlab);
341 CurPtr = (char *)(NewSlab);
342 End = ((char *)NewSlab) + AllocatedSlabSize;
343 }
344
345 /// Deallocate a sequence of slabs.
346 void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
347 SmallVectorImpl<void *>::iterator E) {
348 for (; I != E; ++I) {
349 size_t AllocatedSlabSize =
350 computeSlabSize(std::distance(Slabs.begin(), I));
351 AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
352 }
353 }
354
355 /// Deallocate all memory for custom sized slabs.
356 void DeallocateCustomSizedSlabs() {
357 for (auto &PtrAndSize : CustomSizedSlabs) {
358 void *Ptr = PtrAndSize.first;
359 size_t Size = PtrAndSize.second;
360 AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t));
361 }
362 }
363
364 template <typename T> friend class SpecificBumpPtrAllocator;
365};
366
367/// The standard BumpPtrAllocator which just uses the default template
368/// parameters.
369typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
370
371/// A BumpPtrAllocator that allows only elements of a specific type to be
372/// allocated.
373///
374/// This allows calling the destructor in DestroyAll() and when the allocator is
375/// destroyed.
376template <typename T> class SpecificBumpPtrAllocator {
377 BumpPtrAllocator Allocator;
378
379public:
380 SpecificBumpPtrAllocator() {
381 // Because SpecificBumpPtrAllocator walks the memory to call destructors,
382 // it can't have red zones between allocations.
383 Allocator.setRedZoneSize(0);
384 }
385 SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
386 : Allocator(std::move(Old.Allocator)) {}
387 ~SpecificBumpPtrAllocator() { DestroyAll(); }
388
389 SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
390 Allocator = std::move(RHS.Allocator);
391 return *this;
392 }
393
394 /// Call the destructor of each allocated object and deallocate all but the
395 /// current slab and reset the current pointer to the beginning of it, freeing
396 /// all memory allocated so far.
397 void DestroyAll() {
398 auto DestroyElements = [](char *Begin, char *End) {
399 assert(Begin == (char *)alignAddr(Begin, Align::Of<T>()))((void)0);
400 for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
401 reinterpret_cast<T *>(Ptr)->~T();
402 };
403
404 for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
405 ++I) {
406 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
407 std::distance(Allocator.Slabs.begin(), I));
408 char *Begin = (char *)alignAddr(*I, Align::Of<T>());
409 char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
410 : (char *)*I + AllocatedSlabSize;
411
412 DestroyElements(Begin, End);
413 }
414
415 for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
416 void *Ptr = PtrAndSize.first;
417 size_t Size = PtrAndSize.second;
418 DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()),
419 (char *)Ptr + Size);
420 }
421
422 Allocator.Reset();
423 }
424
425 /// Allocate space for an array of objects without constructing them.
426 T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
427};
428
429} // end namespace llvm
430
431template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
432 size_t GrowthDelay>
433void *
434operator new(size_t Size,
435 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold,
436 GrowthDelay> &Allocator) {
437 return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
2
Calling 'BumpPtrAllocatorImpl::Allocate'
438 alignof(std::max_align_t)));
439}
440
441template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
442 size_t GrowthDelay>
443void operator delete(void *,
444 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
445 SizeThreshold, GrowthDelay> &) {
446}
447
448#endif // LLVM_SUPPORT_ALLOCATOR_H

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h

1//===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains types to represent alignments.
10// They are instrumented to guarantee some invariants are preserved and prevent
11// invalid manipulations.
12//
13// - Align represents an alignment in bytes, it is always set and always a valid
14// power of two, its minimum value is 1 which means no alignment requirements.
15//
16// - MaybeAlign is an optional type, it may be undefined or set. When it's set
17// you can get the underlying Align type by using the getValue() method.
18//
19//===----------------------------------------------------------------------===//
20
21#ifndef LLVM_SUPPORT_ALIGNMENT_H_
22#define LLVM_SUPPORT_ALIGNMENT_H_
23
24#include "llvm/ADT/Optional.h"
25#include "llvm/Support/MathExtras.h"
26#include <cassert>
27#ifndef NDEBUG1
28#include <string>
29#endif // NDEBUG
30
31namespace llvm {
32
33#define ALIGN_CHECK_ISPOSITIVE(decl) \
34 assert(decl > 0 && (#decl " should be defined"))((void)0)
35
36/// This struct is a compact representation of a valid (non-zero power of two)
37/// alignment.
38/// It is suitable for use as static global constants.
39struct Align {
40private:
41 uint8_t ShiftValue = 0; /// The log2 of the required alignment.
42 /// ShiftValue is less than 64 by construction.
43
44 friend struct MaybeAlign;
45 friend unsigned Log2(Align);
46 friend bool operator==(Align Lhs, Align Rhs);
47 friend bool operator!=(Align Lhs, Align Rhs);
48 friend bool operator<=(Align Lhs, Align Rhs);
49 friend bool operator>=(Align Lhs, Align Rhs);
50 friend bool operator<(Align Lhs, Align Rhs);
51 friend bool operator>(Align Lhs, Align Rhs);
52 friend unsigned encode(struct MaybeAlign A);
53 friend struct MaybeAlign decodeMaybeAlign(unsigned Value);
54
55 /// A trivial type to allow construction of constexpr Align.
56 /// This is currently needed to workaround a bug in GCC 5.3 which prevents
57 /// definition of constexpr assign operators.
58 /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic
59 /// FIXME: Remove this, make all assign operators constexpr and introduce user
60 /// defined literals when we don't have to support GCC 5.3 anymore.
61 /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain
62 struct LogValue {
63 uint8_t Log;
64 };
65
66public:
67 /// Default is byte-aligned.
68 constexpr Align() = default;
69 /// Do not perform checks in case of copy/move construct/assign, because the
70 /// checks have been performed when building `Other`.
71 constexpr Align(const Align &Other) = default;
72 constexpr Align(Align &&Other) = default;
73 Align &operator=(const Align &Other) = default;
74 Align &operator=(Align &&Other) = default;
75
76 explicit Align(uint64_t Value) {
77 assert(Value > 0 && "Value must not be 0")((void)0);
78 assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2")((void)0);
79 ShiftValue = Log2_64(Value);
80 assert(ShiftValue < 64 && "Broken invariant")((void)0);
81 }
82
83 /// This is a hole in the type system and should not be abused.
84 /// Needed to interact with C for instance.
85 uint64_t value() const { return uint64_t(1) << ShiftValue; }
9
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'
86
87 /// Allow constructions of constexpr Align.
88 template <size_t kValue> constexpr static LogValue Constant() {
89 return LogValue{static_cast<uint8_t>(CTLog2<kValue>())};
90 }
91
92 /// Allow constructions of constexpr Align from types.
93 /// Compile time equivalent to Align(alignof(T)).
94 template <typename T> constexpr static LogValue Of() {
95 return Constant<std::alignment_of<T>::value>();
96 }
97
98 /// Constexpr constructor from LogValue type.
99 constexpr Align(LogValue CA) : ShiftValue(CA.Log) {}
100};
101
102/// Treats the value 0 as a 1, so Align is always at least 1.
103inline Align assumeAligned(uint64_t Value) {
104 return Value ? Align(Value) : Align();
105}
106
107/// This struct is a compact representation of a valid (power of two) or
108/// undefined (0) alignment.
109struct MaybeAlign : public llvm::Optional<Align> {
110private:
111 using UP = llvm::Optional<Align>;
112
113public:
114 /// Default is undefined.
115 MaybeAlign() = default;
116 /// Do not perform checks in case of copy/move construct/assign, because the
117 /// checks have been performed when building `Other`.
118 MaybeAlign(const MaybeAlign &Other) = default;
119 MaybeAlign &operator=(const MaybeAlign &Other) = default;
120 MaybeAlign(MaybeAlign &&Other) = default;
121 MaybeAlign &operator=(MaybeAlign &&Other) = default;
122
123 /// Use llvm::Optional<Align> constructor.
124 using UP::UP;
125
126 explicit MaybeAlign(uint64_t Value) {
127 assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&((void)0)
128 "Alignment is neither 0 nor a power of 2")((void)0);
129 if (Value)
130 emplace(Value);
131 }
132
133 /// For convenience, returns a valid alignment or 1 if undefined.
134 Align valueOrOne() const { return hasValue() ? getValue() : Align(); }
135};
136
137/// Checks that SizeInBytes is a multiple of the alignment.
138inline bool isAligned(Align Lhs, uint64_t SizeInBytes) {
139 return SizeInBytes % Lhs.value() == 0;
140}
141
142/// Checks that Addr is a multiple of the alignment.
143inline bool isAddrAligned(Align Lhs, const void *Addr) {
144 return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr));
145}
146
147/// Returns a multiple of A needed to store `Size` bytes.
148inline uint64_t alignTo(uint64_t Size, Align A) {
149 const uint64_t Value = A.value();
8
Calling 'Align::value'
150 // The following line is equivalent to `(Size + Value - 1) / Value * Value`.
151
152 // The division followed by a multiplication can be thought of as a right
153 // shift followed by a left shift which zeros out the extra bits produced in
154 // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out
155 // are just zero.
156
157 // Most compilers can generate this code but the pattern may be missed when
158 // multiple functions gets inlined.
159 return (Size + Value - 1) & ~(Value - 1U);
160}
161
162/// If non-zero \p Skew is specified, the return value will be a minimal integer
163/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
164/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
165/// Skew mod \p A'.
166///
167/// Examples:
168/// \code
169/// alignTo(5, Align(8), 7) = 7
170/// alignTo(17, Align(8), 1) = 17
171/// alignTo(~0LL, Align(8), 3) = 3
172/// \endcode
173inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) {
174 const uint64_t Value = A.value();
175 Skew %= Value;
176 return ((Size + Value - 1 - Skew) & ~(Value - 1U)) + Skew;
177}
178
179/// Returns a multiple of A needed to store `Size` bytes.
180/// Returns `Size` if current alignment is undefined.
181inline uint64_t alignTo(uint64_t Size, MaybeAlign A) {
182 return A ? alignTo(Size, A.getValue()) : Size;
183}
184
185/// Aligns `Addr` to `Alignment` bytes, rounding up.
186inline uintptr_t alignAddr(const void *Addr, Align Alignment) {
187 uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr);
188 assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=((void)0)
189 ArithAddr &&((void)0)
190 "Overflow")((void)0);
191 return alignTo(ArithAddr, Alignment);
192}
193
194/// Returns the offset to the next integer (mod 2**64) that is greater than
195/// or equal to \p Value and is a multiple of \p Align.
196inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
197 return alignTo(Value, Alignment) - Value;
6
The value 255 is assigned to 'A.ShiftValue'
7
Calling 'alignTo'
198}
199
200/// Returns the necessary adjustment for aligning `Addr` to `Alignment`
201/// bytes, rounding up.
202inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) {
203 return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment);
5
Calling 'offsetToAlignment'
204}
205
206/// Returns the log2 of the alignment.
207inline unsigned Log2(Align A) { return A.ShiftValue; }
208
209/// Returns the alignment that satisfies both alignments.
210/// Same semantic as MinAlign.
211inline Align commonAlignment(Align A, Align B) { return std::min(A, B); }
212
213/// Returns the alignment that satisfies both alignments.
214/// Same semantic as MinAlign.
215inline Align commonAlignment(Align A, uint64_t Offset) {
216 return Align(MinAlign(A.value(), Offset));
217}
218
219/// Returns the alignment that satisfies both alignments.
220/// Same semantic as MinAlign.
221inline MaybeAlign commonAlignment(MaybeAlign A, MaybeAlign B) {
222 return A && B ? commonAlignment(*A, *B) : A ? A : B;
223}
224
225/// Returns the alignment that satisfies both alignments.
226/// Same semantic as MinAlign.
227inline MaybeAlign commonAlignment(MaybeAlign A, uint64_t Offset) {
228 return MaybeAlign(MinAlign((*A).value(), Offset));
229}
230
231/// Returns a representation of the alignment that encodes undefined as 0.
232inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; }
233
234/// Dual operation of the encode function above.
235inline MaybeAlign decodeMaybeAlign(unsigned Value) {
236 if (Value == 0)
237 return MaybeAlign();
238 Align Out;
239 Out.ShiftValue = Value - 1;
240 return Out;
241}
242
243/// Returns a representation of the alignment, the encoded value is positive by
244/// definition.
245inline unsigned encode(Align A) { return encode(MaybeAlign(A)); }
246
247/// Comparisons between Align and scalars. Rhs must be positive.
248inline bool operator==(Align Lhs, uint64_t Rhs) {
249 ALIGN_CHECK_ISPOSITIVE(Rhs);
250 return Lhs.value() == Rhs;
251}
252inline bool operator!=(Align Lhs, uint64_t Rhs) {
253 ALIGN_CHECK_ISPOSITIVE(Rhs);
254 return Lhs.value() != Rhs;
255}
256inline bool operator<=(Align Lhs, uint64_t Rhs) {
257 ALIGN_CHECK_ISPOSITIVE(Rhs);
258 return Lhs.value() <= Rhs;
259}
260inline bool operator>=(Align Lhs, uint64_t Rhs) {
261 ALIGN_CHECK_ISPOSITIVE(Rhs);
262 return Lhs.value() >= Rhs;
263}
264inline bool operator<(Align Lhs, uint64_t Rhs) {
265 ALIGN_CHECK_ISPOSITIVE(Rhs);
266 return Lhs.value() < Rhs;
267}
268inline bool operator>(Align Lhs, uint64_t Rhs) {
269 ALIGN_CHECK_ISPOSITIVE(Rhs);
270 return Lhs.value() > Rhs;
271}
272
273/// Comparisons between MaybeAlign and scalars.
274inline bool operator==(MaybeAlign Lhs, uint64_t Rhs) {
275 return Lhs ? (*Lhs).value() == Rhs : Rhs == 0;
276}
277inline bool operator!=(MaybeAlign Lhs, uint64_t Rhs) {
278 return Lhs ? (*Lhs).value() != Rhs : Rhs != 0;
279}
280
281/// Comparisons operators between Align.
282inline bool operator==(Align Lhs, Align Rhs) {
283 return Lhs.ShiftValue == Rhs.ShiftValue;
284}
285inline bool operator!=(Align Lhs, Align Rhs) {
286 return Lhs.ShiftValue != Rhs.ShiftValue;
287}
288inline bool operator<=(Align Lhs, Align Rhs) {
289 return Lhs.ShiftValue <= Rhs.ShiftValue;
290}
291inline bool operator>=(Align Lhs, Align Rhs) {
292 return Lhs.ShiftValue >= Rhs.ShiftValue;
293}
294inline bool operator<(Align Lhs, Align Rhs) {
295 return Lhs.ShiftValue < Rhs.ShiftValue;
296}
297inline bool operator>(Align Lhs, Align Rhs) {
298 return Lhs.ShiftValue > Rhs.ShiftValue;
299}
300
301// Don't allow relational comparisons with MaybeAlign.
302bool operator<=(Align Lhs, MaybeAlign Rhs) = delete;
303bool operator>=(Align Lhs, MaybeAlign Rhs) = delete;
304bool operator<(Align Lhs, MaybeAlign Rhs) = delete;
305bool operator>(Align Lhs, MaybeAlign Rhs) = delete;
306
307bool operator<=(MaybeAlign Lhs, Align Rhs) = delete;
308bool operator>=(MaybeAlign Lhs, Align Rhs) = delete;
309bool operator<(MaybeAlign Lhs, Align Rhs) = delete;
310bool operator>(MaybeAlign Lhs, Align Rhs) = delete;
311
312bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
313bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
314bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
315bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
316
317inline Align operator*(Align Lhs, uint64_t Rhs) {
318 assert(Rhs > 0 && "Rhs must be positive")((void)0);
319 return Align(Lhs.value() * Rhs);
320}
321
322inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) {
323 assert(Rhs > 0 && "Rhs must be positive")((void)0);
324 return Lhs ? Lhs.getValue() * Rhs : MaybeAlign();
325}
326
327inline Align operator/(Align Lhs, uint64_t Divisor) {
328 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
329 "Divisor must be positive and a power of 2")((void)0);
330 assert(Lhs != 1 && "Can't halve byte alignment")((void)0);
331 return Align(Lhs.value() / Divisor);
332}
333
334inline MaybeAlign operator/(MaybeAlign Lhs, uint64_t Divisor) {
335 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
336 "Divisor must be positive and a power of 2")((void)0);
337 return Lhs ? Lhs.getValue() / Divisor : MaybeAlign();
338}
339
340inline Align max(MaybeAlign Lhs, Align Rhs) {
341 return Lhs && *Lhs > Rhs ? *Lhs : Rhs;
342}
343
344inline Align max(Align Lhs, MaybeAlign Rhs) {
345 return Rhs && *Rhs > Lhs ? *Rhs : Lhs;
346}
347
348#ifndef NDEBUG1
349// For usage in LLVM_DEBUG macros.
350inline std::string DebugStr(const Align &A) {
351 return std::to_string(A.value());
352}
353// For usage in LLVM_DEBUG macros.
354inline std::string DebugStr(const MaybeAlign &MA) {
355 if (MA)
356 return std::to_string(MA->value());
357 return "None";
358}
359#endif // NDEBUG
360
361#undef ALIGN_CHECK_ISPOSITIVE
362
363} // namespace llvm
364
365#endif // LLVM_SUPPORT_ALIGNMENT_H_