File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h |
Warning: | line 85, column 47 The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- llvm/CodeGen/DwarfCompileUnit.cpp - Dwarf Compile Units ------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file contains support for constructing a dwarf compile unit. | |||
10 | // | |||
11 | //===----------------------------------------------------------------------===// | |||
12 | ||||
13 | #include "DwarfCompileUnit.h" | |||
14 | #include "AddressPool.h" | |||
15 | #include "DwarfExpression.h" | |||
16 | #include "llvm/ADT/None.h" | |||
17 | #include "llvm/ADT/STLExtras.h" | |||
18 | #include "llvm/ADT/SmallString.h" | |||
19 | #include "llvm/BinaryFormat/Dwarf.h" | |||
20 | #include "llvm/CodeGen/AsmPrinter.h" | |||
21 | #include "llvm/CodeGen/DIE.h" | |||
22 | #include "llvm/CodeGen/MachineFunction.h" | |||
23 | #include "llvm/CodeGen/MachineInstr.h" | |||
24 | #include "llvm/CodeGen/MachineOperand.h" | |||
25 | #include "llvm/CodeGen/TargetFrameLowering.h" | |||
26 | #include "llvm/CodeGen/TargetRegisterInfo.h" | |||
27 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | |||
28 | #include "llvm/IR/DataLayout.h" | |||
29 | #include "llvm/IR/DebugInfo.h" | |||
30 | #include "llvm/IR/GlobalVariable.h" | |||
31 | #include "llvm/MC/MCSection.h" | |||
32 | #include "llvm/MC/MCStreamer.h" | |||
33 | #include "llvm/MC/MCSymbol.h" | |||
34 | #include "llvm/MC/MCSymbolWasm.h" | |||
35 | #include "llvm/MC/MachineLocation.h" | |||
36 | #include "llvm/Target/TargetLoweringObjectFile.h" | |||
37 | #include "llvm/Target/TargetMachine.h" | |||
38 | #include "llvm/Target/TargetOptions.h" | |||
39 | #include <iterator> | |||
40 | #include <string> | |||
41 | #include <utility> | |||
42 | ||||
43 | using namespace llvm; | |||
44 | ||||
45 | static dwarf::Tag GetCompileUnitType(UnitKind Kind, DwarfDebug *DW) { | |||
46 | ||||
47 | // According to DWARF Debugging Information Format Version 5, | |||
48 | // 3.1.2 Skeleton Compilation Unit Entries: | |||
49 | // "When generating a split DWARF object file (see Section 7.3.2 | |||
50 | // on page 187), the compilation unit in the .debug_info section | |||
51 | // is a "skeleton" compilation unit with the tag DW_TAG_skeleton_unit" | |||
52 | if (DW->getDwarfVersion() >= 5 && Kind == UnitKind::Skeleton) | |||
53 | return dwarf::DW_TAG_skeleton_unit; | |||
54 | ||||
55 | return dwarf::DW_TAG_compile_unit; | |||
56 | } | |||
57 | ||||
58 | DwarfCompileUnit::DwarfCompileUnit(unsigned UID, const DICompileUnit *Node, | |||
59 | AsmPrinter *A, DwarfDebug *DW, | |||
60 | DwarfFile *DWU, UnitKind Kind) | |||
61 | : DwarfUnit(GetCompileUnitType(Kind, DW), Node, A, DW, DWU), UniqueID(UID) { | |||
62 | insertDIE(Node, &getUnitDie()); | |||
63 | MacroLabelBegin = Asm->createTempSymbol("cu_macro_begin"); | |||
64 | } | |||
65 | ||||
66 | /// addLabelAddress - Add a dwarf label attribute data and value using | |||
67 | /// DW_FORM_addr or DW_FORM_GNU_addr_index. | |||
68 | void DwarfCompileUnit::addLabelAddress(DIE &Die, dwarf::Attribute Attribute, | |||
69 | const MCSymbol *Label) { | |||
70 | // Don't use the address pool in non-fission or in the skeleton unit itself. | |||
71 | if ((!DD->useSplitDwarf() || !Skeleton) && DD->getDwarfVersion() < 5) | |||
72 | return addLocalLabelAddress(Die, Attribute, Label); | |||
73 | ||||
74 | if (Label) | |||
75 | DD->addArangeLabel(SymbolCU(this, Label)); | |||
76 | ||||
77 | bool UseAddrOffsetFormOrExpressions = | |||
78 | DD->useAddrOffsetForm() || DD->useAddrOffsetExpressions(); | |||
79 | ||||
80 | const MCSymbol *Base = nullptr; | |||
81 | if (Label->isInSection() && UseAddrOffsetFormOrExpressions) | |||
82 | Base = DD->getSectionLabel(&Label->getSection()); | |||
83 | ||||
84 | if (!Base || Base == Label) { | |||
85 | unsigned idx = DD->getAddressPool().getIndex(Label); | |||
86 | addAttribute(Die, Attribute, | |||
87 | DD->getDwarfVersion() >= 5 ? dwarf::DW_FORM_addrx | |||
88 | : dwarf::DW_FORM_GNU_addr_index, | |||
89 | DIEInteger(idx)); | |||
90 | return; | |||
91 | } | |||
92 | ||||
93 | // Could be extended to work with DWARFv4 Split DWARF if that's important for | |||
94 | // someone. In that case DW_FORM_data would be used. | |||
95 | assert(DD->getDwarfVersion() >= 5 &&((void)0) | |||
96 | "Addr+offset expressions are only valuable when using debug_addr (to "((void)0) | |||
97 | "reduce relocations) available in DWARFv5 or higher")((void)0); | |||
98 | if (DD->useAddrOffsetExpressions()) { | |||
99 | auto *Loc = new (DIEValueAllocator) DIEBlock(); | |||
100 | addPoolOpAddress(*Loc, Label); | |||
101 | addBlock(Die, Attribute, dwarf::DW_FORM_exprloc, Loc); | |||
102 | } else | |||
103 | addAttribute(Die, Attribute, dwarf::DW_FORM_LLVM_addrx_offset, | |||
104 | new (DIEValueAllocator) DIEAddrOffset( | |||
105 | DD->getAddressPool().getIndex(Base), Label, Base)); | |||
106 | } | |||
107 | ||||
108 | void DwarfCompileUnit::addLocalLabelAddress(DIE &Die, | |||
109 | dwarf::Attribute Attribute, | |||
110 | const MCSymbol *Label) { | |||
111 | if (Label) | |||
112 | DD->addArangeLabel(SymbolCU(this, Label)); | |||
113 | ||||
114 | if (Label) | |||
115 | addAttribute(Die, Attribute, dwarf::DW_FORM_addr, DIELabel(Label)); | |||
116 | else | |||
117 | addAttribute(Die, Attribute, dwarf::DW_FORM_addr, DIEInteger(0)); | |||
118 | } | |||
119 | ||||
120 | unsigned DwarfCompileUnit::getOrCreateSourceID(const DIFile *File) { | |||
121 | // If we print assembly, we can't separate .file entries according to | |||
122 | // compile units. Thus all files will belong to the default compile unit. | |||
123 | ||||
124 | // FIXME: add a better feature test than hasRawTextSupport. Even better, | |||
125 | // extend .file to support this. | |||
126 | unsigned CUID = Asm->OutStreamer->hasRawTextSupport() ? 0 : getUniqueID(); | |||
127 | if (!File) | |||
128 | return Asm->OutStreamer->emitDwarfFileDirective(0, "", "", None, None, | |||
129 | CUID); | |||
130 | return Asm->OutStreamer->emitDwarfFileDirective( | |||
131 | 0, File->getDirectory(), File->getFilename(), DD->getMD5AsBytes(File), | |||
132 | File->getSource(), CUID); | |||
133 | } | |||
134 | ||||
135 | DIE *DwarfCompileUnit::getOrCreateGlobalVariableDIE( | |||
136 | const DIGlobalVariable *GV, ArrayRef<GlobalExpr> GlobalExprs) { | |||
137 | // Check for pre-existence. | |||
138 | if (DIE *Die = getDIE(GV)) | |||
139 | return Die; | |||
140 | ||||
141 | assert(GV)((void)0); | |||
142 | ||||
143 | auto *GVContext = GV->getScope(); | |||
144 | const DIType *GTy = GV->getType(); | |||
145 | ||||
146 | // Construct the context before querying for the existence of the DIE in | |||
147 | // case such construction creates the DIE. | |||
148 | auto *CB = GVContext ? dyn_cast<DICommonBlock>(GVContext) : nullptr; | |||
149 | DIE *ContextDIE = CB ? getOrCreateCommonBlock(CB, GlobalExprs) | |||
150 | : getOrCreateContextDIE(GVContext); | |||
151 | ||||
152 | // Add to map. | |||
153 | DIE *VariableDIE = &createAndAddDIE(GV->getTag(), *ContextDIE, GV); | |||
154 | DIScope *DeclContext; | |||
155 | if (auto *SDMDecl = GV->getStaticDataMemberDeclaration()) { | |||
156 | DeclContext = SDMDecl->getScope(); | |||
157 | assert(SDMDecl->isStaticMember() && "Expected static member decl")((void)0); | |||
158 | assert(GV->isDefinition())((void)0); | |||
159 | // We need the declaration DIE that is in the static member's class. | |||
160 | DIE *VariableSpecDIE = getOrCreateStaticMemberDIE(SDMDecl); | |||
161 | addDIEEntry(*VariableDIE, dwarf::DW_AT_specification, *VariableSpecDIE); | |||
162 | // If the global variable's type is different from the one in the class | |||
163 | // member type, assume that it's more specific and also emit it. | |||
164 | if (GTy != SDMDecl->getBaseType()) | |||
165 | addType(*VariableDIE, GTy); | |||
166 | } else { | |||
167 | DeclContext = GV->getScope(); | |||
168 | // Add name and type. | |||
169 | addString(*VariableDIE, dwarf::DW_AT_name, GV->getDisplayName()); | |||
170 | if (GTy) | |||
171 | addType(*VariableDIE, GTy); | |||
172 | ||||
173 | // Add scoping info. | |||
174 | if (!GV->isLocalToUnit()) | |||
175 | addFlag(*VariableDIE, dwarf::DW_AT_external); | |||
176 | ||||
177 | // Add line number info. | |||
178 | addSourceLine(*VariableDIE, GV); | |||
179 | } | |||
180 | ||||
181 | if (!GV->isDefinition()) | |||
182 | addFlag(*VariableDIE, dwarf::DW_AT_declaration); | |||
183 | else | |||
184 | addGlobalName(GV->getName(), *VariableDIE, DeclContext); | |||
185 | ||||
186 | if (uint32_t AlignInBytes = GV->getAlignInBytes()) | |||
187 | addUInt(*VariableDIE, dwarf::DW_AT_alignment, dwarf::DW_FORM_udata, | |||
188 | AlignInBytes); | |||
189 | ||||
190 | if (MDTuple *TP = GV->getTemplateParams()) | |||
191 | addTemplateParams(*VariableDIE, DINodeArray(TP)); | |||
192 | ||||
193 | // Add location. | |||
194 | addLocationAttribute(VariableDIE, GV, GlobalExprs); | |||
195 | ||||
196 | return VariableDIE; | |||
197 | } | |||
198 | ||||
199 | void DwarfCompileUnit::addLocationAttribute( | |||
200 | DIE *VariableDIE, const DIGlobalVariable *GV, ArrayRef<GlobalExpr> GlobalExprs) { | |||
201 | bool addToAccelTable = false; | |||
202 | DIELoc *Loc = nullptr; | |||
203 | Optional<unsigned> NVPTXAddressSpace; | |||
204 | std::unique_ptr<DIEDwarfExpression> DwarfExpr; | |||
205 | for (const auto &GE : GlobalExprs) { | |||
206 | const GlobalVariable *Global = GE.Var; | |||
207 | const DIExpression *Expr = GE.Expr; | |||
208 | ||||
209 | // For compatibility with DWARF 3 and earlier, | |||
210 | // DW_AT_location(DW_OP_constu, X, DW_OP_stack_value) or | |||
211 | // DW_AT_location(DW_OP_consts, X, DW_OP_stack_value) becomes | |||
212 | // DW_AT_const_value(X). | |||
213 | if (GlobalExprs.size() == 1 && Expr && Expr->isConstant()) { | |||
214 | addToAccelTable = true; | |||
215 | addConstantValue( | |||
216 | *VariableDIE, | |||
217 | DIExpression::SignedOrUnsignedConstant::UnsignedConstant == | |||
218 | *Expr->isConstant(), | |||
219 | Expr->getElement(1)); | |||
220 | break; | |||
221 | } | |||
222 | ||||
223 | // We cannot describe the location of dllimport'd variables: the | |||
224 | // computation of their address requires loads from the IAT. | |||
225 | if (Global && Global->hasDLLImportStorageClass()) | |||
226 | continue; | |||
227 | ||||
228 | // Nothing to describe without address or constant. | |||
229 | if (!Global && (!Expr || !Expr->isConstant())) | |||
230 | continue; | |||
231 | ||||
232 | if (Global && Global->isThreadLocal() && | |||
233 | !Asm->getObjFileLowering().supportDebugThreadLocalLocation()) | |||
234 | continue; | |||
235 | ||||
236 | if (!Loc) { | |||
237 | addToAccelTable = true; | |||
238 | Loc = new (DIEValueAllocator) DIELoc; | |||
239 | DwarfExpr = std::make_unique<DIEDwarfExpression>(*Asm, *this, *Loc); | |||
240 | } | |||
241 | ||||
242 | if (Expr) { | |||
243 | // According to | |||
244 | // https://docs.nvidia.com/cuda/archive/10.0/ptx-writers-guide-to-interoperability/index.html#cuda-specific-dwarf | |||
245 | // cuda-gdb requires DW_AT_address_class for all variables to be able to | |||
246 | // correctly interpret address space of the variable address. | |||
247 | // Decode DW_OP_constu <DWARF Address Space> DW_OP_swap DW_OP_xderef | |||
248 | // sequence for the NVPTX + gdb target. | |||
249 | unsigned LocalNVPTXAddressSpace; | |||
250 | if (Asm->TM.getTargetTriple().isNVPTX() && DD->tuneForGDB()) { | |||
251 | const DIExpression *NewExpr = | |||
252 | DIExpression::extractAddressClass(Expr, LocalNVPTXAddressSpace); | |||
253 | if (NewExpr != Expr) { | |||
254 | Expr = NewExpr; | |||
255 | NVPTXAddressSpace = LocalNVPTXAddressSpace; | |||
256 | } | |||
257 | } | |||
258 | DwarfExpr->addFragmentOffset(Expr); | |||
259 | } | |||
260 | ||||
261 | if (Global) { | |||
262 | const MCSymbol *Sym = Asm->getSymbol(Global); | |||
263 | if (Global->isThreadLocal()) { | |||
264 | if (Asm->TM.useEmulatedTLS()) { | |||
265 | // TODO: add debug info for emulated thread local mode. | |||
266 | } else { | |||
267 | // FIXME: Make this work with -gsplit-dwarf. | |||
268 | unsigned PointerSize = Asm->getDataLayout().getPointerSize(); | |||
269 | assert((PointerSize == 4 || PointerSize == 8) &&((void)0) | |||
270 | "Add support for other sizes if necessary")((void)0); | |||
271 | // Based on GCC's support for TLS: | |||
272 | if (!DD->useSplitDwarf()) { | |||
273 | // 1) Start with a constNu of the appropriate pointer size | |||
274 | addUInt(*Loc, dwarf::DW_FORM_data1, | |||
275 | PointerSize == 4 ? dwarf::DW_OP_const4u | |||
276 | : dwarf::DW_OP_const8u); | |||
277 | // 2) containing the (relocated) offset of the TLS variable | |||
278 | // within the module's TLS block. | |||
279 | addExpr(*Loc, | |||
280 | PointerSize == 4 ? dwarf::DW_FORM_data4 | |||
281 | : dwarf::DW_FORM_data8, | |||
282 | Asm->getObjFileLowering().getDebugThreadLocalSymbol(Sym)); | |||
283 | } else { | |||
284 | addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_GNU_const_index); | |||
285 | addUInt(*Loc, dwarf::DW_FORM_udata, | |||
286 | DD->getAddressPool().getIndex(Sym, /* TLS */ true)); | |||
287 | } | |||
288 | // 3) followed by an OP to make the debugger do a TLS lookup. | |||
289 | addUInt(*Loc, dwarf::DW_FORM_data1, | |||
290 | DD->useGNUTLSOpcode() ? dwarf::DW_OP_GNU_push_tls_address | |||
291 | : dwarf::DW_OP_form_tls_address); | |||
292 | } | |||
293 | } else { | |||
294 | DD->addArangeLabel(SymbolCU(this, Sym)); | |||
295 | addOpAddress(*Loc, Sym); | |||
296 | } | |||
297 | } | |||
298 | // Global variables attached to symbols are memory locations. | |||
299 | // It would be better if this were unconditional, but malformed input that | |||
300 | // mixes non-fragments and fragments for the same variable is too expensive | |||
301 | // to detect in the verifier. | |||
302 | if (DwarfExpr->isUnknownLocation()) | |||
303 | DwarfExpr->setMemoryLocationKind(); | |||
304 | DwarfExpr->addExpression(Expr); | |||
305 | } | |||
306 | if (Asm->TM.getTargetTriple().isNVPTX() && DD->tuneForGDB()) { | |||
307 | // According to | |||
308 | // https://docs.nvidia.com/cuda/archive/10.0/ptx-writers-guide-to-interoperability/index.html#cuda-specific-dwarf | |||
309 | // cuda-gdb requires DW_AT_address_class for all variables to be able to | |||
310 | // correctly interpret address space of the variable address. | |||
311 | const unsigned NVPTX_ADDR_global_space = 5; | |||
312 | addUInt(*VariableDIE, dwarf::DW_AT_address_class, dwarf::DW_FORM_data1, | |||
313 | NVPTXAddressSpace ? *NVPTXAddressSpace : NVPTX_ADDR_global_space); | |||
314 | } | |||
315 | if (Loc) | |||
316 | addBlock(*VariableDIE, dwarf::DW_AT_location, DwarfExpr->finalize()); | |||
317 | ||||
318 | if (DD->useAllLinkageNames()) | |||
319 | addLinkageName(*VariableDIE, GV->getLinkageName()); | |||
320 | ||||
321 | if (addToAccelTable) { | |||
322 | DD->addAccelName(*CUNode, GV->getName(), *VariableDIE); | |||
323 | ||||
324 | // If the linkage name is different than the name, go ahead and output | |||
325 | // that as well into the name table. | |||
326 | if (GV->getLinkageName() != "" && GV->getName() != GV->getLinkageName() && | |||
327 | DD->useAllLinkageNames()) | |||
328 | DD->addAccelName(*CUNode, GV->getLinkageName(), *VariableDIE); | |||
329 | } | |||
330 | } | |||
331 | ||||
332 | DIE *DwarfCompileUnit::getOrCreateCommonBlock( | |||
333 | const DICommonBlock *CB, ArrayRef<GlobalExpr> GlobalExprs) { | |||
334 | // Construct the context before querying for the existence of the DIE in case | |||
335 | // such construction creates the DIE. | |||
336 | DIE *ContextDIE = getOrCreateContextDIE(CB->getScope()); | |||
337 | ||||
338 | if (DIE *NDie = getDIE(CB)) | |||
339 | return NDie; | |||
340 | DIE &NDie = createAndAddDIE(dwarf::DW_TAG_common_block, *ContextDIE, CB); | |||
341 | StringRef Name = CB->getName().empty() ? "_BLNK_" : CB->getName(); | |||
342 | addString(NDie, dwarf::DW_AT_name, Name); | |||
343 | addGlobalName(Name, NDie, CB->getScope()); | |||
344 | if (CB->getFile()) | |||
345 | addSourceLine(NDie, CB->getLineNo(), CB->getFile()); | |||
346 | if (DIGlobalVariable *V = CB->getDecl()) | |||
347 | getCU().addLocationAttribute(&NDie, V, GlobalExprs); | |||
348 | return &NDie; | |||
349 | } | |||
350 | ||||
351 | void DwarfCompileUnit::addRange(RangeSpan Range) { | |||
352 | DD->insertSectionLabel(Range.Begin); | |||
353 | ||||
354 | bool SameAsPrevCU = this == DD->getPrevCU(); | |||
355 | DD->setPrevCU(this); | |||
356 | // If we have no current ranges just add the range and return, otherwise, | |||
357 | // check the current section and CU against the previous section and CU we | |||
358 | // emitted into and the subprogram was contained within. If these are the | |||
359 | // same then extend our current range, otherwise add this as a new range. | |||
360 | if (CURanges.empty() || !SameAsPrevCU || | |||
361 | (&CURanges.back().End->getSection() != | |||
362 | &Range.End->getSection())) { | |||
363 | CURanges.push_back(Range); | |||
364 | return; | |||
365 | } | |||
366 | ||||
367 | CURanges.back().End = Range.End; | |||
368 | } | |||
369 | ||||
370 | void DwarfCompileUnit::initStmtList() { | |||
371 | if (CUNode->isDebugDirectivesOnly()) | |||
372 | return; | |||
373 | ||||
374 | const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering(); | |||
375 | if (DD->useSectionsAsReferences()) { | |||
376 | LineTableStartSym = TLOF.getDwarfLineSection()->getBeginSymbol(); | |||
377 | } else { | |||
378 | LineTableStartSym = | |||
379 | Asm->OutStreamer->getDwarfLineTableSymbol(getUniqueID()); | |||
380 | } | |||
381 | ||||
382 | // DW_AT_stmt_list is a offset of line number information for this | |||
383 | // compile unit in debug_line section. For split dwarf this is | |||
384 | // left in the skeleton CU and so not included. | |||
385 | // The line table entries are not always emitted in assembly, so it | |||
386 | // is not okay to use line_table_start here. | |||
387 | addSectionLabel(getUnitDie(), dwarf::DW_AT_stmt_list, LineTableStartSym, | |||
388 | TLOF.getDwarfLineSection()->getBeginSymbol()); | |||
389 | } | |||
390 | ||||
391 | void DwarfCompileUnit::applyStmtList(DIE &D) { | |||
392 | const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering(); | |||
393 | addSectionLabel(D, dwarf::DW_AT_stmt_list, LineTableStartSym, | |||
394 | TLOF.getDwarfLineSection()->getBeginSymbol()); | |||
395 | } | |||
396 | ||||
397 | void DwarfCompileUnit::attachLowHighPC(DIE &D, const MCSymbol *Begin, | |||
398 | const MCSymbol *End) { | |||
399 | assert(Begin && "Begin label should not be null!")((void)0); | |||
400 | assert(End && "End label should not be null!")((void)0); | |||
401 | assert(Begin->isDefined() && "Invalid starting label")((void)0); | |||
402 | assert(End->isDefined() && "Invalid end label")((void)0); | |||
403 | ||||
404 | addLabelAddress(D, dwarf::DW_AT_low_pc, Begin); | |||
405 | if (DD->getDwarfVersion() < 4) | |||
406 | addLabelAddress(D, dwarf::DW_AT_high_pc, End); | |||
407 | else | |||
408 | addLabelDelta(D, dwarf::DW_AT_high_pc, End, Begin); | |||
409 | } | |||
410 | ||||
411 | // Find DIE for the given subprogram and attach appropriate DW_AT_low_pc | |||
412 | // and DW_AT_high_pc attributes. If there are global variables in this | |||
413 | // scope then create and insert DIEs for these variables. | |||
414 | DIE &DwarfCompileUnit::updateSubprogramScopeDIE(const DISubprogram *SP) { | |||
415 | DIE *SPDie = getOrCreateSubprogramDIE(SP, includeMinimalInlineScopes()); | |||
416 | ||||
417 | SmallVector<RangeSpan, 2> BB_List; | |||
418 | // If basic block sections are on, ranges for each basic block section has | |||
419 | // to be emitted separately. | |||
420 | for (const auto &R : Asm->MBBSectionRanges) | |||
421 | BB_List.push_back({R.second.BeginLabel, R.second.EndLabel}); | |||
422 | ||||
423 | attachRangesOrLowHighPC(*SPDie, BB_List); | |||
424 | ||||
425 | if (DD->useAppleExtensionAttributes() && | |||
426 | !DD->getCurrentFunction()->getTarget().Options.DisableFramePointerElim( | |||
427 | *DD->getCurrentFunction())) | |||
428 | addFlag(*SPDie, dwarf::DW_AT_APPLE_omit_frame_ptr); | |||
429 | ||||
430 | // Only include DW_AT_frame_base in full debug info | |||
431 | if (!includeMinimalInlineScopes()) { | |||
432 | const TargetFrameLowering *TFI = Asm->MF->getSubtarget().getFrameLowering(); | |||
433 | TargetFrameLowering::DwarfFrameBase FrameBase = | |||
434 | TFI->getDwarfFrameBase(*Asm->MF); | |||
435 | switch (FrameBase.Kind) { | |||
436 | case TargetFrameLowering::DwarfFrameBase::Register: { | |||
437 | if (Register::isPhysicalRegister(FrameBase.Location.Reg)) { | |||
438 | MachineLocation Location(FrameBase.Location.Reg); | |||
439 | addAddress(*SPDie, dwarf::DW_AT_frame_base, Location); | |||
440 | } | |||
441 | break; | |||
442 | } | |||
443 | case TargetFrameLowering::DwarfFrameBase::CFA: { | |||
444 | DIELoc *Loc = new (DIEValueAllocator) DIELoc; | |||
445 | addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_call_frame_cfa); | |||
446 | addBlock(*SPDie, dwarf::DW_AT_frame_base, Loc); | |||
447 | break; | |||
448 | } | |||
449 | case TargetFrameLowering::DwarfFrameBase::WasmFrameBase: { | |||
450 | // FIXME: duplicated from Target/WebAssembly/WebAssembly.h | |||
451 | // don't want to depend on target specific headers in this code? | |||
452 | const unsigned TI_GLOBAL_RELOC = 3; | |||
453 | if (FrameBase.Location.WasmLoc.Kind == TI_GLOBAL_RELOC) { | |||
454 | // These need to be relocatable. | |||
455 | assert(FrameBase.Location.WasmLoc.Index == 0)((void)0); // Only SP so far. | |||
456 | auto SPSym = cast<MCSymbolWasm>( | |||
457 | Asm->GetExternalSymbolSymbol("__stack_pointer")); | |||
458 | // FIXME: this repeats what WebAssemblyMCInstLower:: | |||
459 | // GetExternalSymbolSymbol does, since if there's no code that | |||
460 | // refers to this symbol, we have to set it here. | |||
461 | SPSym->setType(wasm::WASM_SYMBOL_TYPE_GLOBAL); | |||
462 | SPSym->setGlobalType(wasm::WasmGlobalType{ | |||
463 | uint8_t(Asm->getSubtargetInfo().getTargetTriple().getArch() == | |||
464 | Triple::wasm64 | |||
465 | ? wasm::WASM_TYPE_I64 | |||
466 | : wasm::WASM_TYPE_I32), | |||
467 | true}); | |||
468 | DIELoc *Loc = new (DIEValueAllocator) DIELoc; | |||
469 | addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_WASM_location); | |||
470 | addSInt(*Loc, dwarf::DW_FORM_sdata, TI_GLOBAL_RELOC); | |||
471 | if (!isDwoUnit()) { | |||
472 | addLabel(*Loc, dwarf::DW_FORM_data4, SPSym); | |||
473 | DD->addArangeLabel(SymbolCU(this, SPSym)); | |||
474 | } else { | |||
475 | // FIXME: when writing dwo, we need to avoid relocations. Probably | |||
476 | // the "right" solution is to treat globals the way func and data | |||
477 | // symbols are (with entries in .debug_addr). | |||
478 | // For now, since we only ever use index 0, this should work as-is. | |||
479 | addUInt(*Loc, dwarf::DW_FORM_data4, FrameBase.Location.WasmLoc.Index); | |||
480 | } | |||
481 | addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_stack_value); | |||
482 | addBlock(*SPDie, dwarf::DW_AT_frame_base, Loc); | |||
483 | } else { | |||
484 | DIELoc *Loc = new (DIEValueAllocator) DIELoc; | |||
485 | DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); | |||
486 | DIExpressionCursor Cursor({}); | |||
487 | DwarfExpr.addWasmLocation(FrameBase.Location.WasmLoc.Kind, | |||
488 | FrameBase.Location.WasmLoc.Index); | |||
489 | DwarfExpr.addExpression(std::move(Cursor)); | |||
490 | addBlock(*SPDie, dwarf::DW_AT_frame_base, DwarfExpr.finalize()); | |||
491 | } | |||
492 | break; | |||
493 | } | |||
494 | } | |||
495 | } | |||
496 | ||||
497 | // Add name to the name table, we do this here because we're guaranteed | |||
498 | // to have concrete versions of our DW_TAG_subprogram nodes. | |||
499 | DD->addSubprogramNames(*CUNode, SP, *SPDie); | |||
500 | ||||
501 | return *SPDie; | |||
502 | } | |||
503 | ||||
504 | // Construct a DIE for this scope. | |||
505 | void DwarfCompileUnit::constructScopeDIE( | |||
506 | LexicalScope *Scope, SmallVectorImpl<DIE *> &FinalChildren) { | |||
507 | if (!Scope || !Scope->getScopeNode()) | |||
508 | return; | |||
509 | ||||
510 | auto *DS = Scope->getScopeNode(); | |||
511 | ||||
512 | assert((Scope->getInlinedAt() || !isa<DISubprogram>(DS)) &&((void)0) | |||
513 | "Only handle inlined subprograms here, use "((void)0) | |||
514 | "constructSubprogramScopeDIE for non-inlined "((void)0) | |||
515 | "subprograms")((void)0); | |||
516 | ||||
517 | SmallVector<DIE *, 8> Children; | |||
518 | ||||
519 | // We try to create the scope DIE first, then the children DIEs. This will | |||
520 | // avoid creating un-used children then removing them later when we find out | |||
521 | // the scope DIE is null. | |||
522 | DIE *ScopeDIE; | |||
523 | if (Scope->getParent() && isa<DISubprogram>(DS)) { | |||
524 | ScopeDIE = constructInlinedScopeDIE(Scope); | |||
525 | if (!ScopeDIE) | |||
526 | return; | |||
527 | // We create children when the scope DIE is not null. | |||
528 | createScopeChildrenDIE(Scope, Children); | |||
529 | } else { | |||
530 | // Early exit when we know the scope DIE is going to be null. | |||
531 | if (DD->isLexicalScopeDIENull(Scope)) | |||
532 | return; | |||
533 | ||||
534 | bool HasNonScopeChildren = false; | |||
535 | ||||
536 | // We create children here when we know the scope DIE is not going to be | |||
537 | // null and the children will be added to the scope DIE. | |||
538 | createScopeChildrenDIE(Scope, Children, &HasNonScopeChildren); | |||
539 | ||||
540 | // If there are only other scopes as children, put them directly in the | |||
541 | // parent instead, as this scope would serve no purpose. | |||
542 | if (!HasNonScopeChildren) { | |||
543 | FinalChildren.insert(FinalChildren.end(), | |||
544 | std::make_move_iterator(Children.begin()), | |||
545 | std::make_move_iterator(Children.end())); | |||
546 | return; | |||
547 | } | |||
548 | ScopeDIE = constructLexicalScopeDIE(Scope); | |||
549 | assert(ScopeDIE && "Scope DIE should not be null.")((void)0); | |||
550 | } | |||
551 | ||||
552 | // Add children | |||
553 | for (auto &I : Children) | |||
554 | ScopeDIE->addChild(std::move(I)); | |||
555 | ||||
556 | FinalChildren.push_back(std::move(ScopeDIE)); | |||
557 | } | |||
558 | ||||
559 | void DwarfCompileUnit::addScopeRangeList(DIE &ScopeDIE, | |||
560 | SmallVector<RangeSpan, 2> Range) { | |||
561 | ||||
562 | HasRangeLists = true; | |||
563 | ||||
564 | // Add the range list to the set of ranges to be emitted. | |||
565 | auto IndexAndList = | |||
566 | (DD->getDwarfVersion() < 5 && Skeleton ? Skeleton->DU : DU) | |||
567 | ->addRange(*(Skeleton ? Skeleton : this), std::move(Range)); | |||
568 | ||||
569 | uint32_t Index = IndexAndList.first; | |||
570 | auto &List = *IndexAndList.second; | |||
571 | ||||
572 | // Under fission, ranges are specified by constant offsets relative to the | |||
573 | // CU's DW_AT_GNU_ranges_base. | |||
574 | // FIXME: For DWARF v5, do not generate the DW_AT_ranges attribute under | |||
575 | // fission until we support the forms using the .debug_addr section | |||
576 | // (DW_RLE_startx_endx etc.). | |||
577 | if (DD->getDwarfVersion() >= 5) | |||
578 | addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_rnglistx, Index); | |||
579 | else { | |||
580 | const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering(); | |||
581 | const MCSymbol *RangeSectionSym = | |||
582 | TLOF.getDwarfRangesSection()->getBeginSymbol(); | |||
583 | if (isDwoUnit()) | |||
584 | addSectionDelta(ScopeDIE, dwarf::DW_AT_ranges, List.Label, | |||
585 | RangeSectionSym); | |||
586 | else | |||
587 | addSectionLabel(ScopeDIE, dwarf::DW_AT_ranges, List.Label, | |||
588 | RangeSectionSym); | |||
589 | } | |||
590 | } | |||
591 | ||||
592 | void DwarfCompileUnit::attachRangesOrLowHighPC( | |||
593 | DIE &Die, SmallVector<RangeSpan, 2> Ranges) { | |||
594 | assert(!Ranges.empty())((void)0); | |||
595 | if (!DD->useRangesSection() || | |||
596 | (Ranges.size() == 1 && | |||
597 | (!DD->alwaysUseRanges() || | |||
598 | DD->getSectionLabel(&Ranges.front().Begin->getSection()) == | |||
599 | Ranges.front().Begin))) { | |||
600 | const RangeSpan &Front = Ranges.front(); | |||
601 | const RangeSpan &Back = Ranges.back(); | |||
602 | attachLowHighPC(Die, Front.Begin, Back.End); | |||
603 | } else | |||
604 | addScopeRangeList(Die, std::move(Ranges)); | |||
605 | } | |||
606 | ||||
607 | void DwarfCompileUnit::attachRangesOrLowHighPC( | |||
608 | DIE &Die, const SmallVectorImpl<InsnRange> &Ranges) { | |||
609 | SmallVector<RangeSpan, 2> List; | |||
610 | List.reserve(Ranges.size()); | |||
611 | for (const InsnRange &R : Ranges) { | |||
612 | auto *BeginLabel = DD->getLabelBeforeInsn(R.first); | |||
613 | auto *EndLabel = DD->getLabelAfterInsn(R.second); | |||
614 | ||||
615 | const auto *BeginMBB = R.first->getParent(); | |||
616 | const auto *EndMBB = R.second->getParent(); | |||
617 | ||||
618 | const auto *MBB = BeginMBB; | |||
619 | // Basic block sections allows basic block subsets to be placed in unique | |||
620 | // sections. For each section, the begin and end label must be added to the | |||
621 | // list. If there is more than one range, debug ranges must be used. | |||
622 | // Otherwise, low/high PC can be used. | |||
623 | // FIXME: Debug Info Emission depends on block order and this assumes that | |||
624 | // the order of blocks will be frozen beyond this point. | |||
625 | do { | |||
626 | if (MBB->sameSection(EndMBB) || MBB->isEndSection()) { | |||
627 | auto MBBSectionRange = Asm->MBBSectionRanges[MBB->getSectionIDNum()]; | |||
628 | List.push_back( | |||
629 | {MBB->sameSection(BeginMBB) ? BeginLabel | |||
630 | : MBBSectionRange.BeginLabel, | |||
631 | MBB->sameSection(EndMBB) ? EndLabel : MBBSectionRange.EndLabel}); | |||
632 | } | |||
633 | if (MBB->sameSection(EndMBB)) | |||
634 | break; | |||
635 | MBB = MBB->getNextNode(); | |||
636 | } while (true); | |||
637 | } | |||
638 | attachRangesOrLowHighPC(Die, std::move(List)); | |||
639 | } | |||
640 | ||||
641 | // This scope represents inlined body of a function. Construct DIE to | |||
642 | // represent this concrete inlined copy of the function. | |||
643 | DIE *DwarfCompileUnit::constructInlinedScopeDIE(LexicalScope *Scope) { | |||
644 | assert(Scope->getScopeNode())((void)0); | |||
645 | auto *DS = Scope->getScopeNode(); | |||
646 | auto *InlinedSP = getDISubprogram(DS); | |||
647 | // Find the subprogram's DwarfCompileUnit in the SPMap in case the subprogram | |||
648 | // was inlined from another compile unit. | |||
649 | DIE *OriginDIE = getAbstractSPDies()[InlinedSP]; | |||
650 | assert(OriginDIE && "Unable to find original DIE for an inlined subprogram.")((void)0); | |||
651 | ||||
652 | auto ScopeDIE = DIE::get(DIEValueAllocator, dwarf::DW_TAG_inlined_subroutine); | |||
653 | addDIEEntry(*ScopeDIE, dwarf::DW_AT_abstract_origin, *OriginDIE); | |||
654 | ||||
655 | attachRangesOrLowHighPC(*ScopeDIE, Scope->getRanges()); | |||
656 | ||||
657 | // Add the call site information to the DIE. | |||
658 | const DILocation *IA = Scope->getInlinedAt(); | |||
659 | addUInt(*ScopeDIE, dwarf::DW_AT_call_file, None, | |||
660 | getOrCreateSourceID(IA->getFile())); | |||
661 | addUInt(*ScopeDIE, dwarf::DW_AT_call_line, None, IA->getLine()); | |||
662 | if (IA->getColumn()) | |||
663 | addUInt(*ScopeDIE, dwarf::DW_AT_call_column, None, IA->getColumn()); | |||
664 | if (IA->getDiscriminator() && DD->getDwarfVersion() >= 4) | |||
665 | addUInt(*ScopeDIE, dwarf::DW_AT_GNU_discriminator, None, | |||
666 | IA->getDiscriminator()); | |||
667 | ||||
668 | // Add name to the name table, we do this here because we're guaranteed | |||
669 | // to have concrete versions of our DW_TAG_inlined_subprogram nodes. | |||
670 | DD->addSubprogramNames(*CUNode, InlinedSP, *ScopeDIE); | |||
671 | ||||
672 | return ScopeDIE; | |||
673 | } | |||
674 | ||||
675 | // Construct new DW_TAG_lexical_block for this scope and attach | |||
676 | // DW_AT_low_pc/DW_AT_high_pc labels. | |||
677 | DIE *DwarfCompileUnit::constructLexicalScopeDIE(LexicalScope *Scope) { | |||
678 | if (DD->isLexicalScopeDIENull(Scope)) | |||
679 | return nullptr; | |||
680 | ||||
681 | auto ScopeDIE = DIE::get(DIEValueAllocator, dwarf::DW_TAG_lexical_block); | |||
682 | if (Scope->isAbstractScope()) | |||
683 | return ScopeDIE; | |||
684 | ||||
685 | attachRangesOrLowHighPC(*ScopeDIE, Scope->getRanges()); | |||
686 | ||||
687 | return ScopeDIE; | |||
688 | } | |||
689 | ||||
690 | /// constructVariableDIE - Construct a DIE for the given DbgVariable. | |||
691 | DIE *DwarfCompileUnit::constructVariableDIE(DbgVariable &DV, bool Abstract) { | |||
692 | auto D = constructVariableDIEImpl(DV, Abstract); | |||
693 | DV.setDIE(*D); | |||
694 | return D; | |||
695 | } | |||
696 | ||||
697 | DIE *DwarfCompileUnit::constructLabelDIE(DbgLabel &DL, | |||
698 | const LexicalScope &Scope) { | |||
699 | auto LabelDie = DIE::get(DIEValueAllocator, DL.getTag()); | |||
700 | insertDIE(DL.getLabel(), LabelDie); | |||
701 | DL.setDIE(*LabelDie); | |||
702 | ||||
703 | if (Scope.isAbstractScope()) | |||
704 | applyLabelAttributes(DL, *LabelDie); | |||
705 | ||||
706 | return LabelDie; | |||
707 | } | |||
708 | ||||
709 | DIE *DwarfCompileUnit::constructVariableDIEImpl(const DbgVariable &DV, | |||
710 | bool Abstract) { | |||
711 | // Define variable debug information entry. | |||
712 | auto VariableDie = DIE::get(DIEValueAllocator, DV.getTag()); | |||
713 | insertDIE(DV.getVariable(), VariableDie); | |||
714 | ||||
715 | if (Abstract) { | |||
716 | applyVariableAttributes(DV, *VariableDie); | |||
717 | return VariableDie; | |||
718 | } | |||
719 | ||||
720 | // Add variable address. | |||
721 | ||||
722 | unsigned Index = DV.getDebugLocListIndex(); | |||
723 | if (Index != ~0U) { | |||
724 | addLocationList(*VariableDie, dwarf::DW_AT_location, Index); | |||
725 | auto TagOffset = DV.getDebugLocListTagOffset(); | |||
726 | if (TagOffset) | |||
727 | addUInt(*VariableDie, dwarf::DW_AT_LLVM_tag_offset, dwarf::DW_FORM_data1, | |||
728 | *TagOffset); | |||
729 | return VariableDie; | |||
730 | } | |||
731 | ||||
732 | // Check if variable has a single location description. | |||
733 | if (auto *DVal = DV.getValueLoc()) { | |||
734 | if (!DVal->isVariadic()) { | |||
735 | const DbgValueLocEntry *Entry = DVal->getLocEntries().begin(); | |||
736 | if (Entry->isLocation()) { | |||
737 | addVariableAddress(DV, *VariableDie, Entry->getLoc()); | |||
738 | } else if (Entry->isInt()) { | |||
739 | auto *Expr = DV.getSingleExpression(); | |||
740 | if (Expr && Expr->getNumElements()) { | |||
741 | DIELoc *Loc = new (DIEValueAllocator) DIELoc; | |||
742 | DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); | |||
743 | // If there is an expression, emit raw unsigned bytes. | |||
744 | DwarfExpr.addFragmentOffset(Expr); | |||
745 | DwarfExpr.addUnsignedConstant(Entry->getInt()); | |||
746 | DwarfExpr.addExpression(Expr); | |||
747 | addBlock(*VariableDie, dwarf::DW_AT_location, DwarfExpr.finalize()); | |||
748 | if (DwarfExpr.TagOffset) | |||
749 | addUInt(*VariableDie, dwarf::DW_AT_LLVM_tag_offset, | |||
750 | dwarf::DW_FORM_data1, *DwarfExpr.TagOffset); | |||
751 | } else | |||
752 | addConstantValue(*VariableDie, Entry->getInt(), DV.getType()); | |||
753 | } else if (Entry->isConstantFP()) { | |||
754 | addConstantFPValue(*VariableDie, Entry->getConstantFP()); | |||
755 | } else if (Entry->isConstantInt()) { | |||
756 | addConstantValue(*VariableDie, Entry->getConstantInt(), DV.getType()); | |||
757 | } else if (Entry->isTargetIndexLocation()) { | |||
758 | DIELoc *Loc = new (DIEValueAllocator) DIELoc; | |||
759 | DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); | |||
760 | const DIBasicType *BT = dyn_cast<DIBasicType>( | |||
761 | static_cast<const Metadata *>(DV.getVariable()->getType())); | |||
762 | DwarfDebug::emitDebugLocValue(*Asm, BT, *DVal, DwarfExpr); | |||
763 | addBlock(*VariableDie, dwarf::DW_AT_location, DwarfExpr.finalize()); | |||
764 | } | |||
765 | return VariableDie; | |||
766 | } | |||
767 | // If any of the location entries are registers with the value 0, then the | |||
768 | // location is undefined. | |||
769 | if (any_of(DVal->getLocEntries(), [](const DbgValueLocEntry &Entry) { | |||
770 | return Entry.isLocation() && !Entry.getLoc().getReg(); | |||
771 | })) | |||
772 | return VariableDie; | |||
773 | const DIExpression *Expr = DV.getSingleExpression(); | |||
774 | assert(Expr && "Variadic Debug Value must have an Expression.")((void)0); | |||
775 | DIELoc *Loc = new (DIEValueAllocator) DIELoc; | |||
776 | DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); | |||
777 | DwarfExpr.addFragmentOffset(Expr); | |||
778 | DIExpressionCursor Cursor(Expr); | |||
779 | const TargetRegisterInfo &TRI = *Asm->MF->getSubtarget().getRegisterInfo(); | |||
780 | ||||
781 | auto AddEntry = [&](const DbgValueLocEntry &Entry, | |||
782 | DIExpressionCursor &Cursor) { | |||
783 | if (Entry.isLocation()) { | |||
784 | if (!DwarfExpr.addMachineRegExpression(TRI, Cursor, | |||
785 | Entry.getLoc().getReg())) | |||
786 | return false; | |||
787 | } else if (Entry.isInt()) { | |||
788 | // If there is an expression, emit raw unsigned bytes. | |||
789 | DwarfExpr.addUnsignedConstant(Entry.getInt()); | |||
790 | } else if (Entry.isConstantFP()) { | |||
791 | APInt RawBytes = Entry.getConstantFP()->getValueAPF().bitcastToAPInt(); | |||
792 | DwarfExpr.addUnsignedConstant(RawBytes); | |||
793 | } else if (Entry.isConstantInt()) { | |||
794 | APInt RawBytes = Entry.getConstantInt()->getValue(); | |||
795 | DwarfExpr.addUnsignedConstant(RawBytes); | |||
796 | } else if (Entry.isTargetIndexLocation()) { | |||
797 | TargetIndexLocation Loc = Entry.getTargetIndexLocation(); | |||
798 | // TODO TargetIndexLocation is a target-independent. Currently only the | |||
799 | // WebAssembly-specific encoding is supported. | |||
800 | assert(Asm->TM.getTargetTriple().isWasm())((void)0); | |||
801 | DwarfExpr.addWasmLocation(Loc.Index, static_cast<uint64_t>(Loc.Offset)); | |||
802 | } else { | |||
803 | llvm_unreachable("Unsupported Entry type.")__builtin_unreachable(); | |||
804 | } | |||
805 | return true; | |||
806 | }; | |||
807 | ||||
808 | DwarfExpr.addExpression( | |||
809 | std::move(Cursor), | |||
810 | [&](unsigned Idx, DIExpressionCursor &Cursor) -> bool { | |||
811 | return AddEntry(DVal->getLocEntries()[Idx], Cursor); | |||
812 | }); | |||
813 | ||||
814 | // Now attach the location information to the DIE. | |||
815 | addBlock(*VariableDie, dwarf::DW_AT_location, DwarfExpr.finalize()); | |||
816 | if (DwarfExpr.TagOffset) | |||
817 | addUInt(*VariableDie, dwarf::DW_AT_LLVM_tag_offset, dwarf::DW_FORM_data1, | |||
818 | *DwarfExpr.TagOffset); | |||
819 | ||||
820 | return VariableDie; | |||
821 | } | |||
822 | ||||
823 | // .. else use frame index. | |||
824 | if (!DV.hasFrameIndexExprs()) | |||
825 | return VariableDie; | |||
826 | ||||
827 | Optional<unsigned> NVPTXAddressSpace; | |||
828 | DIELoc *Loc = new (DIEValueAllocator) DIELoc; | |||
829 | DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); | |||
830 | for (auto &Fragment : DV.getFrameIndexExprs()) { | |||
831 | Register FrameReg; | |||
832 | const DIExpression *Expr = Fragment.Expr; | |||
833 | const TargetFrameLowering *TFI = Asm->MF->getSubtarget().getFrameLowering(); | |||
834 | StackOffset Offset = | |||
835 | TFI->getFrameIndexReference(*Asm->MF, Fragment.FI, FrameReg); | |||
836 | DwarfExpr.addFragmentOffset(Expr); | |||
837 | ||||
838 | auto *TRI = Asm->MF->getSubtarget().getRegisterInfo(); | |||
839 | SmallVector<uint64_t, 8> Ops; | |||
840 | TRI->getOffsetOpcodes(Offset, Ops); | |||
841 | ||||
842 | // According to | |||
843 | // https://docs.nvidia.com/cuda/archive/10.0/ptx-writers-guide-to-interoperability/index.html#cuda-specific-dwarf | |||
844 | // cuda-gdb requires DW_AT_address_class for all variables to be able to | |||
845 | // correctly interpret address space of the variable address. | |||
846 | // Decode DW_OP_constu <DWARF Address Space> DW_OP_swap DW_OP_xderef | |||
847 | // sequence for the NVPTX + gdb target. | |||
848 | unsigned LocalNVPTXAddressSpace; | |||
849 | if (Asm->TM.getTargetTriple().isNVPTX() && DD->tuneForGDB()) { | |||
850 | const DIExpression *NewExpr = | |||
851 | DIExpression::extractAddressClass(Expr, LocalNVPTXAddressSpace); | |||
852 | if (NewExpr != Expr) { | |||
853 | Expr = NewExpr; | |||
854 | NVPTXAddressSpace = LocalNVPTXAddressSpace; | |||
855 | } | |||
856 | } | |||
857 | if (Expr) | |||
858 | Ops.append(Expr->elements_begin(), Expr->elements_end()); | |||
859 | DIExpressionCursor Cursor(Ops); | |||
860 | DwarfExpr.setMemoryLocationKind(); | |||
861 | if (const MCSymbol *FrameSymbol = Asm->getFunctionFrameSymbol()) | |||
862 | addOpAddress(*Loc, FrameSymbol); | |||
863 | else | |||
864 | DwarfExpr.addMachineRegExpression( | |||
865 | *Asm->MF->getSubtarget().getRegisterInfo(), Cursor, FrameReg); | |||
866 | DwarfExpr.addExpression(std::move(Cursor)); | |||
867 | } | |||
868 | if (Asm->TM.getTargetTriple().isNVPTX() && DD->tuneForGDB()) { | |||
869 | // According to | |||
870 | // https://docs.nvidia.com/cuda/archive/10.0/ptx-writers-guide-to-interoperability/index.html#cuda-specific-dwarf | |||
871 | // cuda-gdb requires DW_AT_address_class for all variables to be able to | |||
872 | // correctly interpret address space of the variable address. | |||
873 | const unsigned NVPTX_ADDR_local_space = 6; | |||
874 | addUInt(*VariableDie, dwarf::DW_AT_address_class, dwarf::DW_FORM_data1, | |||
875 | NVPTXAddressSpace ? *NVPTXAddressSpace : NVPTX_ADDR_local_space); | |||
876 | } | |||
877 | addBlock(*VariableDie, dwarf::DW_AT_location, DwarfExpr.finalize()); | |||
878 | if (DwarfExpr.TagOffset) | |||
879 | addUInt(*VariableDie, dwarf::DW_AT_LLVM_tag_offset, dwarf::DW_FORM_data1, | |||
880 | *DwarfExpr.TagOffset); | |||
881 | ||||
882 | return VariableDie; | |||
883 | } | |||
884 | ||||
885 | DIE *DwarfCompileUnit::constructVariableDIE(DbgVariable &DV, | |||
886 | const LexicalScope &Scope, | |||
887 | DIE *&ObjectPointer) { | |||
888 | auto Var = constructVariableDIE(DV, Scope.isAbstractScope()); | |||
889 | if (DV.isObjectPointer()) | |||
890 | ObjectPointer = Var; | |||
891 | return Var; | |||
892 | } | |||
893 | ||||
894 | /// Return all DIVariables that appear in count: expressions. | |||
895 | static SmallVector<const DIVariable *, 2> dependencies(DbgVariable *Var) { | |||
896 | SmallVector<const DIVariable *, 2> Result; | |||
897 | auto *Array = dyn_cast<DICompositeType>(Var->getType()); | |||
898 | if (!Array || Array->getTag() != dwarf::DW_TAG_array_type) | |||
899 | return Result; | |||
900 | if (auto *DLVar = Array->getDataLocation()) | |||
901 | Result.push_back(DLVar); | |||
902 | if (auto *AsVar = Array->getAssociated()) | |||
903 | Result.push_back(AsVar); | |||
904 | if (auto *AlVar = Array->getAllocated()) | |||
905 | Result.push_back(AlVar); | |||
906 | for (auto *El : Array->getElements()) { | |||
907 | if (auto *Subrange = dyn_cast<DISubrange>(El)) { | |||
908 | if (auto Count = Subrange->getCount()) | |||
909 | if (auto *Dependency = Count.dyn_cast<DIVariable *>()) | |||
910 | Result.push_back(Dependency); | |||
911 | if (auto LB = Subrange->getLowerBound()) | |||
912 | if (auto *Dependency = LB.dyn_cast<DIVariable *>()) | |||
913 | Result.push_back(Dependency); | |||
914 | if (auto UB = Subrange->getUpperBound()) | |||
915 | if (auto *Dependency = UB.dyn_cast<DIVariable *>()) | |||
916 | Result.push_back(Dependency); | |||
917 | if (auto ST = Subrange->getStride()) | |||
918 | if (auto *Dependency = ST.dyn_cast<DIVariable *>()) | |||
919 | Result.push_back(Dependency); | |||
920 | } else if (auto *GenericSubrange = dyn_cast<DIGenericSubrange>(El)) { | |||
921 | if (auto Count = GenericSubrange->getCount()) | |||
922 | if (auto *Dependency = Count.dyn_cast<DIVariable *>()) | |||
923 | Result.push_back(Dependency); | |||
924 | if (auto LB = GenericSubrange->getLowerBound()) | |||
925 | if (auto *Dependency = LB.dyn_cast<DIVariable *>()) | |||
926 | Result.push_back(Dependency); | |||
927 | if (auto UB = GenericSubrange->getUpperBound()) | |||
928 | if (auto *Dependency = UB.dyn_cast<DIVariable *>()) | |||
929 | Result.push_back(Dependency); | |||
930 | if (auto ST = GenericSubrange->getStride()) | |||
931 | if (auto *Dependency = ST.dyn_cast<DIVariable *>()) | |||
932 | Result.push_back(Dependency); | |||
933 | } | |||
934 | } | |||
935 | return Result; | |||
936 | } | |||
937 | ||||
938 | /// Sort local variables so that variables appearing inside of helper | |||
939 | /// expressions come first. | |||
940 | static SmallVector<DbgVariable *, 8> | |||
941 | sortLocalVars(SmallVectorImpl<DbgVariable *> &Input) { | |||
942 | SmallVector<DbgVariable *, 8> Result; | |||
943 | SmallVector<PointerIntPair<DbgVariable *, 1>, 8> WorkList; | |||
944 | // Map back from a DIVariable to its containing DbgVariable. | |||
945 | SmallDenseMap<const DILocalVariable *, DbgVariable *> DbgVar; | |||
946 | // Set of DbgVariables in Result. | |||
947 | SmallDenseSet<DbgVariable *, 8> Visited; | |||
948 | // For cycle detection. | |||
949 | SmallDenseSet<DbgVariable *, 8> Visiting; | |||
950 | ||||
951 | // Initialize the worklist and the DIVariable lookup table. | |||
952 | for (auto Var : reverse(Input)) { | |||
953 | DbgVar.insert({Var->getVariable(), Var}); | |||
954 | WorkList.push_back({Var, 0}); | |||
955 | } | |||
956 | ||||
957 | // Perform a stable topological sort by doing a DFS. | |||
958 | while (!WorkList.empty()) { | |||
959 | auto Item = WorkList.back(); | |||
960 | DbgVariable *Var = Item.getPointer(); | |||
961 | bool visitedAllDependencies = Item.getInt(); | |||
962 | WorkList.pop_back(); | |||
963 | ||||
964 | // Dependency is in a different lexical scope or a global. | |||
965 | if (!Var) | |||
966 | continue; | |||
967 | ||||
968 | // Already handled. | |||
969 | if (Visited.count(Var)) | |||
970 | continue; | |||
971 | ||||
972 | // Add to Result if all dependencies are visited. | |||
973 | if (visitedAllDependencies) { | |||
974 | Visited.insert(Var); | |||
975 | Result.push_back(Var); | |||
976 | continue; | |||
977 | } | |||
978 | ||||
979 | // Detect cycles. | |||
980 | auto Res = Visiting.insert(Var); | |||
981 | if (!Res.second) { | |||
982 | assert(false && "dependency cycle in local variables")((void)0); | |||
983 | return Result; | |||
984 | } | |||
985 | ||||
986 | // Push dependencies and this node onto the worklist, so that this node is | |||
987 | // visited again after all of its dependencies are handled. | |||
988 | WorkList.push_back({Var, 1}); | |||
989 | for (auto *Dependency : dependencies(Var)) { | |||
990 | auto Dep = dyn_cast_or_null<const DILocalVariable>(Dependency); | |||
991 | WorkList.push_back({DbgVar[Dep], 0}); | |||
992 | } | |||
993 | } | |||
994 | return Result; | |||
995 | } | |||
996 | ||||
997 | DIE *DwarfCompileUnit::createScopeChildrenDIE(LexicalScope *Scope, | |||
998 | SmallVectorImpl<DIE *> &Children, | |||
999 | bool *HasNonScopeChildren) { | |||
1000 | assert(Children.empty())((void)0); | |||
1001 | DIE *ObjectPointer = nullptr; | |||
1002 | ||||
1003 | // Emit function arguments (order is significant). | |||
1004 | auto Vars = DU->getScopeVariables().lookup(Scope); | |||
1005 | for (auto &DV : Vars.Args) | |||
1006 | Children.push_back(constructVariableDIE(*DV.second, *Scope, ObjectPointer)); | |||
1007 | ||||
1008 | // Emit local variables. | |||
1009 | auto Locals = sortLocalVars(Vars.Locals); | |||
1010 | for (DbgVariable *DV : Locals) | |||
1011 | Children.push_back(constructVariableDIE(*DV, *Scope, ObjectPointer)); | |||
1012 | ||||
1013 | // Skip imported directives in gmlt-like data. | |||
1014 | if (!includeMinimalInlineScopes()) { | |||
1015 | // There is no need to emit empty lexical block DIE. | |||
1016 | for (const auto *IE : ImportedEntities[Scope->getScopeNode()]) | |||
1017 | Children.push_back( | |||
1018 | constructImportedEntityDIE(cast<DIImportedEntity>(IE))); | |||
1019 | } | |||
1020 | ||||
1021 | if (HasNonScopeChildren) | |||
1022 | *HasNonScopeChildren = !Children.empty(); | |||
1023 | ||||
1024 | for (DbgLabel *DL : DU->getScopeLabels().lookup(Scope)) | |||
1025 | Children.push_back(constructLabelDIE(*DL, *Scope)); | |||
1026 | ||||
1027 | for (LexicalScope *LS : Scope->getChildren()) | |||
1028 | constructScopeDIE(LS, Children); | |||
1029 | ||||
1030 | return ObjectPointer; | |||
1031 | } | |||
1032 | ||||
1033 | DIE &DwarfCompileUnit::constructSubprogramScopeDIE(const DISubprogram *Sub, | |||
1034 | LexicalScope *Scope) { | |||
1035 | DIE &ScopeDIE = updateSubprogramScopeDIE(Sub); | |||
1036 | ||||
1037 | if (Scope) { | |||
1038 | assert(!Scope->getInlinedAt())((void)0); | |||
1039 | assert(!Scope->isAbstractScope())((void)0); | |||
1040 | // Collect lexical scope children first. | |||
1041 | // ObjectPointer might be a local (non-argument) local variable if it's a | |||
1042 | // block's synthetic this pointer. | |||
1043 | if (DIE *ObjectPointer = createAndAddScopeChildren(Scope, ScopeDIE)) | |||
1044 | addDIEEntry(ScopeDIE, dwarf::DW_AT_object_pointer, *ObjectPointer); | |||
1045 | } | |||
1046 | ||||
1047 | // If this is a variadic function, add an unspecified parameter. | |||
1048 | DITypeRefArray FnArgs = Sub->getType()->getTypeArray(); | |||
1049 | ||||
1050 | // If we have a single element of null, it is a function that returns void. | |||
1051 | // If we have more than one elements and the last one is null, it is a | |||
1052 | // variadic function. | |||
1053 | if (FnArgs.size() > 1 && !FnArgs[FnArgs.size() - 1] && | |||
1054 | !includeMinimalInlineScopes()) | |||
1055 | ScopeDIE.addChild( | |||
1056 | DIE::get(DIEValueAllocator, dwarf::DW_TAG_unspecified_parameters)); | |||
1057 | ||||
1058 | return ScopeDIE; | |||
1059 | } | |||
1060 | ||||
1061 | DIE *DwarfCompileUnit::createAndAddScopeChildren(LexicalScope *Scope, | |||
1062 | DIE &ScopeDIE) { | |||
1063 | // We create children when the scope DIE is not null. | |||
1064 | SmallVector<DIE *, 8> Children; | |||
1065 | DIE *ObjectPointer = createScopeChildrenDIE(Scope, Children); | |||
1066 | ||||
1067 | // Add children | |||
1068 | for (auto &I : Children) | |||
1069 | ScopeDIE.addChild(std::move(I)); | |||
1070 | ||||
1071 | return ObjectPointer; | |||
1072 | } | |||
1073 | ||||
1074 | void DwarfCompileUnit::constructAbstractSubprogramScopeDIE( | |||
1075 | LexicalScope *Scope) { | |||
1076 | DIE *&AbsDef = getAbstractSPDies()[Scope->getScopeNode()]; | |||
1077 | if (AbsDef) | |||
1078 | return; | |||
1079 | ||||
1080 | auto *SP = cast<DISubprogram>(Scope->getScopeNode()); | |||
1081 | ||||
1082 | DIE *ContextDIE; | |||
1083 | DwarfCompileUnit *ContextCU = this; | |||
1084 | ||||
1085 | if (includeMinimalInlineScopes()) | |||
1086 | ContextDIE = &getUnitDie(); | |||
1087 | // Some of this is duplicated from DwarfUnit::getOrCreateSubprogramDIE, with | |||
1088 | // the important distinction that the debug node is not associated with the | |||
1089 | // DIE (since the debug node will be associated with the concrete DIE, if | |||
1090 | // any). It could be refactored to some common utility function. | |||
1091 | else if (auto *SPDecl = SP->getDeclaration()) { | |||
1092 | ContextDIE = &getUnitDie(); | |||
1093 | getOrCreateSubprogramDIE(SPDecl); | |||
1094 | } else { | |||
1095 | ContextDIE = getOrCreateContextDIE(SP->getScope()); | |||
1096 | // The scope may be shared with a subprogram that has already been | |||
1097 | // constructed in another CU, in which case we need to construct this | |||
1098 | // subprogram in the same CU. | |||
1099 | ContextCU = DD->lookupCU(ContextDIE->getUnitDie()); | |||
1100 | } | |||
1101 | ||||
1102 | // Passing null as the associated node because the abstract definition | |||
1103 | // shouldn't be found by lookup. | |||
1104 | AbsDef = &ContextCU->createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, nullptr); | |||
1105 | ContextCU->applySubprogramAttributesToDefinition(SP, *AbsDef); | |||
1106 | ||||
1107 | if (!ContextCU->includeMinimalInlineScopes()) | |||
1108 | ContextCU->addUInt(*AbsDef, dwarf::DW_AT_inline, None, dwarf::DW_INL_inlined); | |||
1109 | if (DIE *ObjectPointer = ContextCU->createAndAddScopeChildren(Scope, *AbsDef)) | |||
1110 | ContextCU->addDIEEntry(*AbsDef, dwarf::DW_AT_object_pointer, *ObjectPointer); | |||
1111 | } | |||
1112 | ||||
1113 | bool DwarfCompileUnit::useGNUAnalogForDwarf5Feature() const { | |||
1114 | return DD->getDwarfVersion() == 4 && !DD->tuneForLLDB(); | |||
1115 | } | |||
1116 | ||||
1117 | dwarf::Tag DwarfCompileUnit::getDwarf5OrGNUTag(dwarf::Tag Tag) const { | |||
1118 | if (!useGNUAnalogForDwarf5Feature()) | |||
1119 | return Tag; | |||
1120 | switch (Tag) { | |||
1121 | case dwarf::DW_TAG_call_site: | |||
1122 | return dwarf::DW_TAG_GNU_call_site; | |||
1123 | case dwarf::DW_TAG_call_site_parameter: | |||
1124 | return dwarf::DW_TAG_GNU_call_site_parameter; | |||
1125 | default: | |||
1126 | llvm_unreachable("DWARF5 tag with no GNU analog")__builtin_unreachable(); | |||
1127 | } | |||
1128 | } | |||
1129 | ||||
1130 | dwarf::Attribute | |||
1131 | DwarfCompileUnit::getDwarf5OrGNUAttr(dwarf::Attribute Attr) const { | |||
1132 | if (!useGNUAnalogForDwarf5Feature()) | |||
1133 | return Attr; | |||
1134 | switch (Attr) { | |||
1135 | case dwarf::DW_AT_call_all_calls: | |||
1136 | return dwarf::DW_AT_GNU_all_call_sites; | |||
1137 | case dwarf::DW_AT_call_target: | |||
1138 | return dwarf::DW_AT_GNU_call_site_target; | |||
1139 | case dwarf::DW_AT_call_origin: | |||
1140 | return dwarf::DW_AT_abstract_origin; | |||
1141 | case dwarf::DW_AT_call_return_pc: | |||
1142 | return dwarf::DW_AT_low_pc; | |||
1143 | case dwarf::DW_AT_call_value: | |||
1144 | return dwarf::DW_AT_GNU_call_site_value; | |||
1145 | case dwarf::DW_AT_call_tail_call: | |||
1146 | return dwarf::DW_AT_GNU_tail_call; | |||
1147 | default: | |||
1148 | llvm_unreachable("DWARF5 attribute with no GNU analog")__builtin_unreachable(); | |||
1149 | } | |||
1150 | } | |||
1151 | ||||
1152 | dwarf::LocationAtom | |||
1153 | DwarfCompileUnit::getDwarf5OrGNULocationAtom(dwarf::LocationAtom Loc) const { | |||
1154 | if (!useGNUAnalogForDwarf5Feature()) | |||
1155 | return Loc; | |||
1156 | switch (Loc) { | |||
1157 | case dwarf::DW_OP_entry_value: | |||
1158 | return dwarf::DW_OP_GNU_entry_value; | |||
1159 | default: | |||
1160 | llvm_unreachable("DWARF5 location atom with no GNU analog")__builtin_unreachable(); | |||
1161 | } | |||
1162 | } | |||
1163 | ||||
1164 | DIE &DwarfCompileUnit::constructCallSiteEntryDIE(DIE &ScopeDIE, | |||
1165 | const DISubprogram *CalleeSP, | |||
1166 | bool IsTail, | |||
1167 | const MCSymbol *PCAddr, | |||
1168 | const MCSymbol *CallAddr, | |||
1169 | unsigned CallReg) { | |||
1170 | // Insert a call site entry DIE within ScopeDIE. | |||
1171 | DIE &CallSiteDIE = createAndAddDIE(getDwarf5OrGNUTag(dwarf::DW_TAG_call_site), | |||
1172 | ScopeDIE, nullptr); | |||
1173 | ||||
1174 | if (CallReg) { | |||
1175 | // Indirect call. | |||
1176 | addAddress(CallSiteDIE, getDwarf5OrGNUAttr(dwarf::DW_AT_call_target), | |||
1177 | MachineLocation(CallReg)); | |||
1178 | } else { | |||
1179 | DIE *CalleeDIE = getOrCreateSubprogramDIE(CalleeSP); | |||
1180 | assert(CalleeDIE && "Could not create DIE for call site entry origin")((void)0); | |||
1181 | addDIEEntry(CallSiteDIE, getDwarf5OrGNUAttr(dwarf::DW_AT_call_origin), | |||
1182 | *CalleeDIE); | |||
1183 | } | |||
1184 | ||||
1185 | if (IsTail) { | |||
1186 | // Attach DW_AT_call_tail_call to tail calls for standards compliance. | |||
1187 | addFlag(CallSiteDIE, getDwarf5OrGNUAttr(dwarf::DW_AT_call_tail_call)); | |||
1188 | ||||
1189 | // Attach the address of the branch instruction to allow the debugger to | |||
1190 | // show where the tail call occurred. This attribute has no GNU analog. | |||
1191 | // | |||
1192 | // GDB works backwards from non-standard usage of DW_AT_low_pc (in DWARF4 | |||
1193 | // mode -- equivalently, in DWARF5 mode, DW_AT_call_return_pc) at tail-call | |||
1194 | // site entries to figure out the PC of tail-calling branch instructions. | |||
1195 | // This means it doesn't need the compiler to emit DW_AT_call_pc, so we | |||
1196 | // don't emit it here. | |||
1197 | // | |||
1198 | // There's no need to tie non-GDB debuggers to this non-standardness, as it | |||
1199 | // adds unnecessary complexity to the debugger. For non-GDB debuggers, emit | |||
1200 | // the standard DW_AT_call_pc info. | |||
1201 | if (!useGNUAnalogForDwarf5Feature()) | |||
1202 | addLabelAddress(CallSiteDIE, dwarf::DW_AT_call_pc, CallAddr); | |||
1203 | } | |||
1204 | ||||
1205 | // Attach the return PC to allow the debugger to disambiguate call paths | |||
1206 | // from one function to another. | |||
1207 | // | |||
1208 | // The return PC is only really needed when the call /isn't/ a tail call, but | |||
1209 | // GDB expects it in DWARF4 mode, even for tail calls (see the comment above | |||
1210 | // the DW_AT_call_pc emission logic for an explanation). | |||
1211 | if (!IsTail || useGNUAnalogForDwarf5Feature()) { | |||
1212 | assert(PCAddr && "Missing return PC information for a call")((void)0); | |||
1213 | addLabelAddress(CallSiteDIE, | |||
1214 | getDwarf5OrGNUAttr(dwarf::DW_AT_call_return_pc), PCAddr); | |||
1215 | } | |||
1216 | ||||
1217 | return CallSiteDIE; | |||
1218 | } | |||
1219 | ||||
1220 | void DwarfCompileUnit::constructCallSiteParmEntryDIEs( | |||
1221 | DIE &CallSiteDIE, SmallVector<DbgCallSiteParam, 4> &Params) { | |||
1222 | for (const auto &Param : Params) { | |||
1223 | unsigned Register = Param.getRegister(); | |||
1224 | auto CallSiteDieParam = | |||
1225 | DIE::get(DIEValueAllocator, | |||
1226 | getDwarf5OrGNUTag(dwarf::DW_TAG_call_site_parameter)); | |||
1227 | insertDIE(CallSiteDieParam); | |||
1228 | addAddress(*CallSiteDieParam, dwarf::DW_AT_location, | |||
1229 | MachineLocation(Register)); | |||
1230 | ||||
1231 | DIELoc *Loc = new (DIEValueAllocator) DIELoc; | |||
1232 | DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); | |||
1233 | DwarfExpr.setCallSiteParamValueFlag(); | |||
1234 | ||||
1235 | DwarfDebug::emitDebugLocValue(*Asm, nullptr, Param.getValue(), DwarfExpr); | |||
1236 | ||||
1237 | addBlock(*CallSiteDieParam, getDwarf5OrGNUAttr(dwarf::DW_AT_call_value), | |||
1238 | DwarfExpr.finalize()); | |||
1239 | ||||
1240 | CallSiteDIE.addChild(CallSiteDieParam); | |||
1241 | } | |||
1242 | } | |||
1243 | ||||
1244 | DIE *DwarfCompileUnit::constructImportedEntityDIE( | |||
1245 | const DIImportedEntity *Module) { | |||
1246 | DIE *IMDie = DIE::get(DIEValueAllocator, (dwarf::Tag)Module->getTag()); | |||
1247 | insertDIE(Module, IMDie); | |||
1248 | DIE *EntityDie; | |||
1249 | auto *Entity = Module->getEntity(); | |||
1250 | if (auto *NS = dyn_cast<DINamespace>(Entity)) | |||
1251 | EntityDie = getOrCreateNameSpace(NS); | |||
1252 | else if (auto *M = dyn_cast<DIModule>(Entity)) | |||
1253 | EntityDie = getOrCreateModule(M); | |||
1254 | else if (auto *SP = dyn_cast<DISubprogram>(Entity)) | |||
1255 | EntityDie = getOrCreateSubprogramDIE(SP); | |||
1256 | else if (auto *T = dyn_cast<DIType>(Entity)) | |||
1257 | EntityDie = getOrCreateTypeDIE(T); | |||
1258 | else if (auto *GV = dyn_cast<DIGlobalVariable>(Entity)) | |||
1259 | EntityDie = getOrCreateGlobalVariableDIE(GV, {}); | |||
1260 | else | |||
1261 | EntityDie = getDIE(Entity); | |||
1262 | assert(EntityDie)((void)0); | |||
1263 | addSourceLine(*IMDie, Module->getLine(), Module->getFile()); | |||
1264 | addDIEEntry(*IMDie, dwarf::DW_AT_import, *EntityDie); | |||
1265 | StringRef Name = Module->getName(); | |||
1266 | if (!Name.empty()) | |||
1267 | addString(*IMDie, dwarf::DW_AT_name, Name); | |||
1268 | ||||
1269 | return IMDie; | |||
1270 | } | |||
1271 | ||||
1272 | void DwarfCompileUnit::finishSubprogramDefinition(const DISubprogram *SP) { | |||
1273 | DIE *D = getDIE(SP); | |||
1274 | if (DIE *AbsSPDIE = getAbstractSPDies().lookup(SP)) { | |||
1275 | if (D) | |||
1276 | // If this subprogram has an abstract definition, reference that | |||
1277 | addDIEEntry(*D, dwarf::DW_AT_abstract_origin, *AbsSPDIE); | |||
1278 | } else { | |||
1279 | assert(D || includeMinimalInlineScopes())((void)0); | |||
1280 | if (D) | |||
1281 | // And attach the attributes | |||
1282 | applySubprogramAttributesToDefinition(SP, *D); | |||
1283 | } | |||
1284 | } | |||
1285 | ||||
1286 | void DwarfCompileUnit::finishEntityDefinition(const DbgEntity *Entity) { | |||
1287 | DbgEntity *AbsEntity = getExistingAbstractEntity(Entity->getEntity()); | |||
1288 | ||||
1289 | auto *Die = Entity->getDIE(); | |||
1290 | /// Label may be used to generate DW_AT_low_pc, so put it outside | |||
1291 | /// if/else block. | |||
1292 | const DbgLabel *Label = nullptr; | |||
1293 | if (AbsEntity && AbsEntity->getDIE()) { | |||
1294 | addDIEEntry(*Die, dwarf::DW_AT_abstract_origin, *AbsEntity->getDIE()); | |||
1295 | Label = dyn_cast<const DbgLabel>(Entity); | |||
1296 | } else { | |||
1297 | if (const DbgVariable *Var = dyn_cast<const DbgVariable>(Entity)) | |||
1298 | applyVariableAttributes(*Var, *Die); | |||
1299 | else if ((Label = dyn_cast<const DbgLabel>(Entity))) | |||
1300 | applyLabelAttributes(*Label, *Die); | |||
1301 | else | |||
1302 | llvm_unreachable("DbgEntity must be DbgVariable or DbgLabel.")__builtin_unreachable(); | |||
1303 | } | |||
1304 | ||||
1305 | if (Label) | |||
1306 | if (const auto *Sym = Label->getSymbol()) | |||
1307 | addLabelAddress(*Die, dwarf::DW_AT_low_pc, Sym); | |||
1308 | } | |||
1309 | ||||
1310 | DbgEntity *DwarfCompileUnit::getExistingAbstractEntity(const DINode *Node) { | |||
1311 | auto &AbstractEntities = getAbstractEntities(); | |||
1312 | auto I = AbstractEntities.find(Node); | |||
1313 | if (I != AbstractEntities.end()) | |||
1314 | return I->second.get(); | |||
1315 | return nullptr; | |||
1316 | } | |||
1317 | ||||
1318 | void DwarfCompileUnit::createAbstractEntity(const DINode *Node, | |||
1319 | LexicalScope *Scope) { | |||
1320 | assert(Scope && Scope->isAbstractScope())((void)0); | |||
1321 | auto &Entity = getAbstractEntities()[Node]; | |||
1322 | if (isa<const DILocalVariable>(Node)) { | |||
1323 | Entity = std::make_unique<DbgVariable>( | |||
1324 | cast<const DILocalVariable>(Node), nullptr /* IA */);; | |||
1325 | DU->addScopeVariable(Scope, cast<DbgVariable>(Entity.get())); | |||
1326 | } else if (isa<const DILabel>(Node)) { | |||
1327 | Entity = std::make_unique<DbgLabel>( | |||
1328 | cast<const DILabel>(Node), nullptr /* IA */); | |||
1329 | DU->addScopeLabel(Scope, cast<DbgLabel>(Entity.get())); | |||
1330 | } | |||
1331 | } | |||
1332 | ||||
1333 | void DwarfCompileUnit::emitHeader(bool UseOffsets) { | |||
1334 | // Don't bother labeling the .dwo unit, as its offset isn't used. | |||
1335 | if (!Skeleton && !DD->useSectionsAsReferences()) { | |||
1336 | LabelBegin = Asm->createTempSymbol("cu_begin"); | |||
1337 | Asm->OutStreamer->emitLabel(LabelBegin); | |||
1338 | } | |||
1339 | ||||
1340 | dwarf::UnitType UT = Skeleton ? dwarf::DW_UT_split_compile | |||
1341 | : DD->useSplitDwarf() ? dwarf::DW_UT_skeleton | |||
1342 | : dwarf::DW_UT_compile; | |||
1343 | DwarfUnit::emitCommonHeader(UseOffsets, UT); | |||
1344 | if (DD->getDwarfVersion() >= 5 && UT != dwarf::DW_UT_compile) | |||
1345 | Asm->emitInt64(getDWOId()); | |||
1346 | } | |||
1347 | ||||
1348 | bool DwarfCompileUnit::hasDwarfPubSections() const { | |||
1349 | switch (CUNode->getNameTableKind()) { | |||
1350 | case DICompileUnit::DebugNameTableKind::None: | |||
1351 | return false; | |||
1352 | // Opting in to GNU Pubnames/types overrides the default to ensure these are | |||
1353 | // generated for things like Gold's gdb_index generation. | |||
1354 | case DICompileUnit::DebugNameTableKind::GNU: | |||
1355 | return true; | |||
1356 | case DICompileUnit::DebugNameTableKind::Default: | |||
1357 | return DD->tuneForGDB() && !includeMinimalInlineScopes() && | |||
1358 | !CUNode->isDebugDirectivesOnly() && | |||
1359 | DD->getAccelTableKind() != AccelTableKind::Apple && | |||
1360 | DD->getDwarfVersion() < 5; | |||
1361 | } | |||
1362 | llvm_unreachable("Unhandled DICompileUnit::DebugNameTableKind enum")__builtin_unreachable(); | |||
1363 | } | |||
1364 | ||||
1365 | /// addGlobalName - Add a new global name to the compile unit. | |||
1366 | void DwarfCompileUnit::addGlobalName(StringRef Name, const DIE &Die, | |||
1367 | const DIScope *Context) { | |||
1368 | if (!hasDwarfPubSections()) | |||
1369 | return; | |||
1370 | std::string FullName = getParentContextString(Context) + Name.str(); | |||
1371 | GlobalNames[FullName] = &Die; | |||
1372 | } | |||
1373 | ||||
1374 | void DwarfCompileUnit::addGlobalNameForTypeUnit(StringRef Name, | |||
1375 | const DIScope *Context) { | |||
1376 | if (!hasDwarfPubSections()) | |||
1377 | return; | |||
1378 | std::string FullName = getParentContextString(Context) + Name.str(); | |||
1379 | // Insert, allowing the entry to remain as-is if it's already present | |||
1380 | // This way the CU-level type DIE is preferred over the "can't describe this | |||
1381 | // type as a unit offset because it's not really in the CU at all, it's only | |||
1382 | // in a type unit" | |||
1383 | GlobalNames.insert(std::make_pair(std::move(FullName), &getUnitDie())); | |||
1384 | } | |||
1385 | ||||
1386 | /// Add a new global type to the unit. | |||
1387 | void DwarfCompileUnit::addGlobalType(const DIType *Ty, const DIE &Die, | |||
1388 | const DIScope *Context) { | |||
1389 | if (!hasDwarfPubSections()) | |||
1390 | return; | |||
1391 | std::string FullName = getParentContextString(Context) + Ty->getName().str(); | |||
1392 | GlobalTypes[FullName] = &Die; | |||
1393 | } | |||
1394 | ||||
1395 | void DwarfCompileUnit::addGlobalTypeUnitType(const DIType *Ty, | |||
1396 | const DIScope *Context) { | |||
1397 | if (!hasDwarfPubSections()) | |||
1398 | return; | |||
1399 | std::string FullName = getParentContextString(Context) + Ty->getName().str(); | |||
1400 | // Insert, allowing the entry to remain as-is if it's already present | |||
1401 | // This way the CU-level type DIE is preferred over the "can't describe this | |||
1402 | // type as a unit offset because it's not really in the CU at all, it's only | |||
1403 | // in a type unit" | |||
1404 | GlobalTypes.insert(std::make_pair(std::move(FullName), &getUnitDie())); | |||
1405 | } | |||
1406 | ||||
1407 | void DwarfCompileUnit::addVariableAddress(const DbgVariable &DV, DIE &Die, | |||
1408 | MachineLocation Location) { | |||
1409 | if (DV.hasComplexAddress()) | |||
1410 | addComplexAddress(DV, Die, dwarf::DW_AT_location, Location); | |||
1411 | else | |||
1412 | addAddress(Die, dwarf::DW_AT_location, Location); | |||
1413 | } | |||
1414 | ||||
1415 | /// Add an address attribute to a die based on the location provided. | |||
1416 | void DwarfCompileUnit::addAddress(DIE &Die, dwarf::Attribute Attribute, | |||
1417 | const MachineLocation &Location) { | |||
1418 | DIELoc *Loc = new (DIEValueAllocator) DIELoc; | |||
1419 | DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); | |||
1420 | if (Location.isIndirect()) | |||
1421 | DwarfExpr.setMemoryLocationKind(); | |||
1422 | ||||
1423 | DIExpressionCursor Cursor({}); | |||
1424 | const TargetRegisterInfo &TRI = *Asm->MF->getSubtarget().getRegisterInfo(); | |||
1425 | if (!DwarfExpr.addMachineRegExpression(TRI, Cursor, Location.getReg())) | |||
1426 | return; | |||
1427 | DwarfExpr.addExpression(std::move(Cursor)); | |||
1428 | ||||
1429 | // Now attach the location information to the DIE. | |||
1430 | addBlock(Die, Attribute, DwarfExpr.finalize()); | |||
1431 | ||||
1432 | if (DwarfExpr.TagOffset) | |||
1433 | addUInt(Die, dwarf::DW_AT_LLVM_tag_offset, dwarf::DW_FORM_data1, | |||
1434 | *DwarfExpr.TagOffset); | |||
1435 | } | |||
1436 | ||||
1437 | /// Start with the address based on the location provided, and generate the | |||
1438 | /// DWARF information necessary to find the actual variable given the extra | |||
1439 | /// address information encoded in the DbgVariable, starting from the starting | |||
1440 | /// location. Add the DWARF information to the die. | |||
1441 | void DwarfCompileUnit::addComplexAddress(const DbgVariable &DV, DIE &Die, | |||
1442 | dwarf::Attribute Attribute, | |||
1443 | const MachineLocation &Location) { | |||
1444 | DIELoc *Loc = new (DIEValueAllocator) DIELoc; | |||
1445 | DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); | |||
1446 | const DIExpression *DIExpr = DV.getSingleExpression(); | |||
1447 | DwarfExpr.addFragmentOffset(DIExpr); | |||
1448 | DwarfExpr.setLocation(Location, DIExpr); | |||
1449 | ||||
1450 | DIExpressionCursor Cursor(DIExpr); | |||
1451 | ||||
1452 | if (DIExpr->isEntryValue()) | |||
1453 | DwarfExpr.beginEntryValueExpression(Cursor); | |||
1454 | ||||
1455 | const TargetRegisterInfo &TRI = *Asm->MF->getSubtarget().getRegisterInfo(); | |||
1456 | if (!DwarfExpr.addMachineRegExpression(TRI, Cursor, Location.getReg())) | |||
1457 | return; | |||
1458 | DwarfExpr.addExpression(std::move(Cursor)); | |||
1459 | ||||
1460 | // Now attach the location information to the DIE. | |||
1461 | addBlock(Die, Attribute, DwarfExpr.finalize()); | |||
1462 | ||||
1463 | if (DwarfExpr.TagOffset) | |||
1464 | addUInt(Die, dwarf::DW_AT_LLVM_tag_offset, dwarf::DW_FORM_data1, | |||
1465 | *DwarfExpr.TagOffset); | |||
1466 | } | |||
1467 | ||||
1468 | /// Add a Dwarf loclistptr attribute data and value. | |||
1469 | void DwarfCompileUnit::addLocationList(DIE &Die, dwarf::Attribute Attribute, | |||
1470 | unsigned Index) { | |||
1471 | dwarf::Form Form = (DD->getDwarfVersion() >= 5) | |||
1472 | ? dwarf::DW_FORM_loclistx | |||
1473 | : DD->getDwarfSectionOffsetForm(); | |||
1474 | addAttribute(Die, Attribute, Form, DIELocList(Index)); | |||
1475 | } | |||
1476 | ||||
1477 | void DwarfCompileUnit::applyVariableAttributes(const DbgVariable &Var, | |||
1478 | DIE &VariableDie) { | |||
1479 | StringRef Name = Var.getName(); | |||
1480 | if (!Name.empty()) | |||
1481 | addString(VariableDie, dwarf::DW_AT_name, Name); | |||
1482 | const auto *DIVar = Var.getVariable(); | |||
1483 | if (DIVar) | |||
1484 | if (uint32_t AlignInBytes = DIVar->getAlignInBytes()) | |||
1485 | addUInt(VariableDie, dwarf::DW_AT_alignment, dwarf::DW_FORM_udata, | |||
1486 | AlignInBytes); | |||
1487 | ||||
1488 | addSourceLine(VariableDie, DIVar); | |||
1489 | addType(VariableDie, Var.getType()); | |||
1490 | if (Var.isArtificial()) | |||
1491 | addFlag(VariableDie, dwarf::DW_AT_artificial); | |||
1492 | } | |||
1493 | ||||
1494 | void DwarfCompileUnit::applyLabelAttributes(const DbgLabel &Label, | |||
1495 | DIE &LabelDie) { | |||
1496 | StringRef Name = Label.getName(); | |||
1497 | if (!Name.empty()) | |||
1498 | addString(LabelDie, dwarf::DW_AT_name, Name); | |||
1499 | const auto *DILabel = Label.getLabel(); | |||
1500 | addSourceLine(LabelDie, DILabel); | |||
1501 | } | |||
1502 | ||||
1503 | /// Add a Dwarf expression attribute data and value. | |||
1504 | void DwarfCompileUnit::addExpr(DIELoc &Die, dwarf::Form Form, | |||
1505 | const MCExpr *Expr) { | |||
1506 | addAttribute(Die, (dwarf::Attribute)0, Form, DIEExpr(Expr)); | |||
1507 | } | |||
1508 | ||||
1509 | void DwarfCompileUnit::applySubprogramAttributesToDefinition( | |||
1510 | const DISubprogram *SP, DIE &SPDie) { | |||
1511 | auto *SPDecl = SP->getDeclaration(); | |||
1512 | auto *Context = SPDecl ? SPDecl->getScope() : SP->getScope(); | |||
1513 | applySubprogramAttributes(SP, SPDie, includeMinimalInlineScopes()); | |||
1514 | addGlobalName(SP->getName(), SPDie, Context); | |||
1515 | } | |||
1516 | ||||
1517 | bool DwarfCompileUnit::isDwoUnit() const { | |||
1518 | return DD->useSplitDwarf() && Skeleton; | |||
1519 | } | |||
1520 | ||||
1521 | void DwarfCompileUnit::finishNonUnitTypeDIE(DIE& D, const DICompositeType *CTy) { | |||
1522 | constructTypeDIE(D, CTy); | |||
1523 | } | |||
1524 | ||||
1525 | bool DwarfCompileUnit::includeMinimalInlineScopes() const { | |||
1526 | return getCUNode()->getEmissionKind() == DICompileUnit::LineTablesOnly || | |||
1527 | (DD->useSplitDwarf() && !Skeleton); | |||
1528 | } | |||
1529 | ||||
1530 | void DwarfCompileUnit::addAddrTableBase() { | |||
1531 | const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering(); | |||
1532 | MCSymbol *Label = DD->getAddressPool().getLabel(); | |||
1533 | addSectionLabel(getUnitDie(), | |||
1534 | DD->getDwarfVersion() >= 5 ? dwarf::DW_AT_addr_base | |||
1535 | : dwarf::DW_AT_GNU_addr_base, | |||
1536 | Label, TLOF.getDwarfAddrSection()->getBeginSymbol()); | |||
1537 | } | |||
1538 | ||||
1539 | void DwarfCompileUnit::addBaseTypeRef(DIEValueList &Die, int64_t Idx) { | |||
1540 | addAttribute(Die, (dwarf::Attribute)0, dwarf::DW_FORM_udata, | |||
1541 | new (DIEValueAllocator) DIEBaseTypeRef(this, Idx)); | |||
1542 | } | |||
1543 | ||||
1544 | void DwarfCompileUnit::createBaseTypeDIEs() { | |||
1545 | // Insert the base_type DIEs directly after the CU so that their offsets will | |||
1546 | // fit in the fixed size ULEB128 used inside the location expressions. | |||
1547 | // Maintain order by iterating backwards and inserting to the front of CU | |||
1548 | // child list. | |||
1549 | for (auto &Btr : reverse(ExprRefedBaseTypes)) { | |||
1550 | DIE &Die = getUnitDie().addChildFront( | |||
1551 | DIE::get(DIEValueAllocator, dwarf::DW_TAG_base_type)); | |||
| ||||
1552 | SmallString<32> Str; | |||
1553 | addString(Die, dwarf::DW_AT_name, | |||
1554 | Twine(dwarf::AttributeEncodingString(Btr.Encoding) + | |||
1555 | "_" + Twine(Btr.BitSize)).toStringRef(Str)); | |||
1556 | addUInt(Die, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1, Btr.Encoding); | |||
1557 | addUInt(Die, dwarf::DW_AT_byte_size, None, Btr.BitSize / 8); | |||
1558 | ||||
1559 | Btr.Die = &Die; | |||
1560 | } | |||
1561 | } |
1 | //===- lib/CodeGen/DIE.h - DWARF Info Entries -------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Data structures for DWARF info entries. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_CODEGEN_DIE_H |
14 | #define LLVM_CODEGEN_DIE_H |
15 | |
16 | #include "llvm/ADT/FoldingSet.h" |
17 | #include "llvm/ADT/PointerIntPair.h" |
18 | #include "llvm/ADT/PointerUnion.h" |
19 | #include "llvm/ADT/SmallVector.h" |
20 | #include "llvm/ADT/StringRef.h" |
21 | #include "llvm/ADT/iterator.h" |
22 | #include "llvm/ADT/iterator_range.h" |
23 | #include "llvm/BinaryFormat/Dwarf.h" |
24 | #include "llvm/CodeGen/DwarfStringPoolEntry.h" |
25 | #include "llvm/Support/AlignOf.h" |
26 | #include "llvm/Support/Allocator.h" |
27 | #include <cassert> |
28 | #include <cstddef> |
29 | #include <cstdint> |
30 | #include <iterator> |
31 | #include <new> |
32 | #include <type_traits> |
33 | #include <utility> |
34 | #include <vector> |
35 | |
36 | namespace llvm { |
37 | |
38 | class AsmPrinter; |
39 | class DIE; |
40 | class DIEUnit; |
41 | class DwarfCompileUnit; |
42 | class MCExpr; |
43 | class MCSection; |
44 | class MCSymbol; |
45 | class raw_ostream; |
46 | |
47 | //===--------------------------------------------------------------------===// |
48 | /// Dwarf abbreviation data, describes one attribute of a Dwarf abbreviation. |
49 | class DIEAbbrevData { |
50 | /// Dwarf attribute code. |
51 | dwarf::Attribute Attribute; |
52 | |
53 | /// Dwarf form code. |
54 | dwarf::Form Form; |
55 | |
56 | /// Dwarf attribute value for DW_FORM_implicit_const |
57 | int64_t Value = 0; |
58 | |
59 | public: |
60 | DIEAbbrevData(dwarf::Attribute A, dwarf::Form F) |
61 | : Attribute(A), Form(F) {} |
62 | DIEAbbrevData(dwarf::Attribute A, int64_t V) |
63 | : Attribute(A), Form(dwarf::DW_FORM_implicit_const), Value(V) {} |
64 | |
65 | /// Accessors. |
66 | /// @{ |
67 | dwarf::Attribute getAttribute() const { return Attribute; } |
68 | dwarf::Form getForm() const { return Form; } |
69 | int64_t getValue() const { return Value; } |
70 | /// @} |
71 | |
72 | /// Used to gather unique data for the abbreviation folding set. |
73 | void Profile(FoldingSetNodeID &ID) const; |
74 | }; |
75 | |
76 | //===--------------------------------------------------------------------===// |
77 | /// Dwarf abbreviation, describes the organization of a debug information |
78 | /// object. |
79 | class DIEAbbrev : public FoldingSetNode { |
80 | /// Unique number for node. |
81 | unsigned Number = 0; |
82 | |
83 | /// Dwarf tag code. |
84 | dwarf::Tag Tag; |
85 | |
86 | /// Whether or not this node has children. |
87 | /// |
88 | /// This cheats a bit in all of the uses since the values in the standard |
89 | /// are 0 and 1 for no children and children respectively. |
90 | bool Children; |
91 | |
92 | /// Raw data bytes for abbreviation. |
93 | SmallVector<DIEAbbrevData, 12> Data; |
94 | |
95 | public: |
96 | DIEAbbrev(dwarf::Tag T, bool C) : Tag(T), Children(C) {} |
97 | |
98 | /// Accessors. |
99 | /// @{ |
100 | dwarf::Tag getTag() const { return Tag; } |
101 | unsigned getNumber() const { return Number; } |
102 | bool hasChildren() const { return Children; } |
103 | const SmallVectorImpl<DIEAbbrevData> &getData() const { return Data; } |
104 | void setChildrenFlag(bool hasChild) { Children = hasChild; } |
105 | void setNumber(unsigned N) { Number = N; } |
106 | /// @} |
107 | |
108 | /// Adds another set of attribute information to the abbreviation. |
109 | void AddAttribute(dwarf::Attribute Attribute, dwarf::Form Form) { |
110 | Data.push_back(DIEAbbrevData(Attribute, Form)); |
111 | } |
112 | |
113 | /// Adds attribute with DW_FORM_implicit_const value |
114 | void AddImplicitConstAttribute(dwarf::Attribute Attribute, int64_t Value) { |
115 | Data.push_back(DIEAbbrevData(Attribute, Value)); |
116 | } |
117 | |
118 | /// Used to gather unique data for the abbreviation folding set. |
119 | void Profile(FoldingSetNodeID &ID) const; |
120 | |
121 | /// Print the abbreviation using the specified asm printer. |
122 | void Emit(const AsmPrinter *AP) const; |
123 | |
124 | void print(raw_ostream &O) const; |
125 | void dump() const; |
126 | }; |
127 | |
128 | //===--------------------------------------------------------------------===// |
129 | /// Helps unique DIEAbbrev objects and assigns abbreviation numbers. |
130 | /// |
131 | /// This class will unique the DIE abbreviations for a llvm::DIE object and |
132 | /// assign a unique abbreviation number to each unique DIEAbbrev object it |
133 | /// finds. The resulting collection of DIEAbbrev objects can then be emitted |
134 | /// into the .debug_abbrev section. |
135 | class DIEAbbrevSet { |
136 | /// The bump allocator to use when creating DIEAbbrev objects in the uniqued |
137 | /// storage container. |
138 | BumpPtrAllocator &Alloc; |
139 | /// FoldingSet that uniques the abbreviations. |
140 | FoldingSet<DIEAbbrev> AbbreviationsSet; |
141 | /// A list of all the unique abbreviations in use. |
142 | std::vector<DIEAbbrev *> Abbreviations; |
143 | |
144 | public: |
145 | DIEAbbrevSet(BumpPtrAllocator &A) : Alloc(A) {} |
146 | ~DIEAbbrevSet(); |
147 | |
148 | /// Generate the abbreviation declaration for a DIE and return a pointer to |
149 | /// the generated abbreviation. |
150 | /// |
151 | /// \param Die the debug info entry to generate the abbreviation for. |
152 | /// \returns A reference to the uniqued abbreviation declaration that is |
153 | /// owned by this class. |
154 | DIEAbbrev &uniqueAbbreviation(DIE &Die); |
155 | |
156 | /// Print all abbreviations using the specified asm printer. |
157 | void Emit(const AsmPrinter *AP, MCSection *Section) const; |
158 | }; |
159 | |
160 | //===--------------------------------------------------------------------===// |
161 | /// An integer value DIE. |
162 | /// |
163 | class DIEInteger { |
164 | uint64_t Integer; |
165 | |
166 | public: |
167 | explicit DIEInteger(uint64_t I) : Integer(I) {} |
168 | |
169 | /// Choose the best form for integer. |
170 | static dwarf::Form BestForm(bool IsSigned, uint64_t Int) { |
171 | if (IsSigned) { |
172 | const int64_t SignedInt = Int; |
173 | if ((char)Int == SignedInt) |
174 | return dwarf::DW_FORM_data1; |
175 | if ((short)Int == SignedInt) |
176 | return dwarf::DW_FORM_data2; |
177 | if ((int)Int == SignedInt) |
178 | return dwarf::DW_FORM_data4; |
179 | } else { |
180 | if ((unsigned char)Int == Int) |
181 | return dwarf::DW_FORM_data1; |
182 | if ((unsigned short)Int == Int) |
183 | return dwarf::DW_FORM_data2; |
184 | if ((unsigned int)Int == Int) |
185 | return dwarf::DW_FORM_data4; |
186 | } |
187 | return dwarf::DW_FORM_data8; |
188 | } |
189 | |
190 | uint64_t getValue() const { return Integer; } |
191 | void setValue(uint64_t Val) { Integer = Val; } |
192 | |
193 | void emitValue(const AsmPrinter *Asm, dwarf::Form Form) const; |
194 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
195 | |
196 | void print(raw_ostream &O) const; |
197 | }; |
198 | |
199 | //===--------------------------------------------------------------------===// |
200 | /// An expression DIE. |
201 | class DIEExpr { |
202 | const MCExpr *Expr; |
203 | |
204 | public: |
205 | explicit DIEExpr(const MCExpr *E) : Expr(E) {} |
206 | |
207 | /// Get MCExpr. |
208 | const MCExpr *getValue() const { return Expr; } |
209 | |
210 | void emitValue(const AsmPrinter *AP, dwarf::Form Form) const; |
211 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
212 | |
213 | void print(raw_ostream &O) const; |
214 | }; |
215 | |
216 | //===--------------------------------------------------------------------===// |
217 | /// A label DIE. |
218 | class DIELabel { |
219 | const MCSymbol *Label; |
220 | |
221 | public: |
222 | explicit DIELabel(const MCSymbol *L) : Label(L) {} |
223 | |
224 | /// Get MCSymbol. |
225 | const MCSymbol *getValue() const { return Label; } |
226 | |
227 | void emitValue(const AsmPrinter *AP, dwarf::Form Form) const; |
228 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
229 | |
230 | void print(raw_ostream &O) const; |
231 | }; |
232 | |
233 | //===--------------------------------------------------------------------===// |
234 | /// A BaseTypeRef DIE. |
235 | class DIEBaseTypeRef { |
236 | const DwarfCompileUnit *CU; |
237 | const uint64_t Index; |
238 | static constexpr unsigned ULEB128PadSize = 4; |
239 | |
240 | public: |
241 | explicit DIEBaseTypeRef(const DwarfCompileUnit *TheCU, uint64_t Idx) |
242 | : CU(TheCU), Index(Idx) {} |
243 | |
244 | /// EmitValue - Emit base type reference. |
245 | void emitValue(const AsmPrinter *AP, dwarf::Form Form) const; |
246 | /// SizeOf - Determine size of the base type reference in bytes. |
247 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
248 | |
249 | void print(raw_ostream &O) const; |
250 | uint64_t getIndex() const { return Index; } |
251 | }; |
252 | |
253 | //===--------------------------------------------------------------------===// |
254 | /// A simple label difference DIE. |
255 | /// |
256 | class DIEDelta { |
257 | const MCSymbol *LabelHi; |
258 | const MCSymbol *LabelLo; |
259 | |
260 | public: |
261 | DIEDelta(const MCSymbol *Hi, const MCSymbol *Lo) : LabelHi(Hi), LabelLo(Lo) {} |
262 | |
263 | void emitValue(const AsmPrinter *AP, dwarf::Form Form) const; |
264 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
265 | |
266 | void print(raw_ostream &O) const; |
267 | }; |
268 | |
269 | //===--------------------------------------------------------------------===// |
270 | /// A container for string pool string values. |
271 | /// |
272 | /// This class is used with the DW_FORM_strp and DW_FORM_GNU_str_index forms. |
273 | class DIEString { |
274 | DwarfStringPoolEntryRef S; |
275 | |
276 | public: |
277 | DIEString(DwarfStringPoolEntryRef S) : S(S) {} |
278 | |
279 | /// Grab the string out of the object. |
280 | StringRef getString() const { return S.getString(); } |
281 | |
282 | void emitValue(const AsmPrinter *AP, dwarf::Form Form) const; |
283 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
284 | |
285 | void print(raw_ostream &O) const; |
286 | }; |
287 | |
288 | //===--------------------------------------------------------------------===// |
289 | /// A container for inline string values. |
290 | /// |
291 | /// This class is used with the DW_FORM_string form. |
292 | class DIEInlineString { |
293 | StringRef S; |
294 | |
295 | public: |
296 | template <typename Allocator> |
297 | explicit DIEInlineString(StringRef Str, Allocator &A) : S(Str.copy(A)) {} |
298 | |
299 | ~DIEInlineString() = default; |
300 | |
301 | /// Grab the string out of the object. |
302 | StringRef getString() const { return S; } |
303 | |
304 | void emitValue(const AsmPrinter *AP, dwarf::Form Form) const; |
305 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
306 | |
307 | void print(raw_ostream &O) const; |
308 | }; |
309 | |
310 | //===--------------------------------------------------------------------===// |
311 | /// A pointer to another debug information entry. An instance of this class can |
312 | /// also be used as a proxy for a debug information entry not yet defined |
313 | /// (ie. types.) |
314 | class DIEEntry { |
315 | DIE *Entry; |
316 | |
317 | public: |
318 | DIEEntry() = delete; |
319 | explicit DIEEntry(DIE &E) : Entry(&E) {} |
320 | |
321 | DIE &getEntry() const { return *Entry; } |
322 | |
323 | void emitValue(const AsmPrinter *AP, dwarf::Form Form) const; |
324 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
325 | |
326 | void print(raw_ostream &O) const; |
327 | }; |
328 | |
329 | //===--------------------------------------------------------------------===// |
330 | /// Represents a pointer to a location list in the debug_loc |
331 | /// section. |
332 | class DIELocList { |
333 | /// Index into the .debug_loc vector. |
334 | size_t Index; |
335 | |
336 | public: |
337 | DIELocList(size_t I) : Index(I) {} |
338 | |
339 | /// Grab the current index out. |
340 | size_t getValue() const { return Index; } |
341 | |
342 | void emitValue(const AsmPrinter *AP, dwarf::Form Form) const; |
343 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
344 | |
345 | void print(raw_ostream &O) const; |
346 | }; |
347 | |
348 | //===--------------------------------------------------------------------===// |
349 | /// A BaseTypeRef DIE. |
350 | class DIEAddrOffset { |
351 | DIEInteger Addr; |
352 | DIEDelta Offset; |
353 | |
354 | public: |
355 | explicit DIEAddrOffset(uint64_t Idx, const MCSymbol *Hi, const MCSymbol *Lo) |
356 | : Addr(Idx), Offset(Hi, Lo) {} |
357 | |
358 | void emitValue(const AsmPrinter *AP, dwarf::Form Form) const; |
359 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
360 | |
361 | void print(raw_ostream &O) const; |
362 | }; |
363 | |
364 | //===--------------------------------------------------------------------===// |
365 | /// A debug information entry value. Some of these roughly correlate |
366 | /// to DWARF attribute classes. |
367 | class DIEBlock; |
368 | class DIELoc; |
369 | class DIEValue { |
370 | public: |
371 | enum Type { |
372 | isNone, |
373 | #define HANDLE_DIEVALUE(T) is##T, |
374 | #include "llvm/CodeGen/DIEValue.def" |
375 | }; |
376 | |
377 | private: |
378 | /// Type of data stored in the value. |
379 | Type Ty = isNone; |
380 | dwarf::Attribute Attribute = (dwarf::Attribute)0; |
381 | dwarf::Form Form = (dwarf::Form)0; |
382 | |
383 | /// Storage for the value. |
384 | /// |
385 | /// All values that aren't standard layout (or are larger than 8 bytes) |
386 | /// should be stored by reference instead of by value. |
387 | using ValTy = |
388 | AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel, |
389 | DIEDelta *, DIEEntry, DIEBlock *, DIELoc *, |
390 | DIELocList, DIEBaseTypeRef *, DIEAddrOffset *>; |
391 | |
392 | static_assert(sizeof(ValTy) <= sizeof(uint64_t) || |
393 | sizeof(ValTy) <= sizeof(void *), |
394 | "Expected all large types to be stored via pointer"); |
395 | |
396 | /// Underlying stored value. |
397 | ValTy Val; |
398 | |
399 | template <class T> void construct(T V) { |
400 | static_assert(std::is_standard_layout<T>::value || |
401 | std::is_pointer<T>::value, |
402 | "Expected standard layout or pointer"); |
403 | new (reinterpret_cast<void *>(&Val)) T(V); |
404 | } |
405 | |
406 | template <class T> T *get() { return reinterpret_cast<T *>(&Val); } |
407 | template <class T> const T *get() const { |
408 | return reinterpret_cast<const T *>(&Val); |
409 | } |
410 | template <class T> void destruct() { get<T>()->~T(); } |
411 | |
412 | /// Destroy the underlying value. |
413 | /// |
414 | /// This should get optimized down to a no-op. We could skip it if we could |
415 | /// add a static assert on \a std::is_trivially_copyable(), but we currently |
416 | /// support versions of GCC that don't understand that. |
417 | void destroyVal() { |
418 | switch (Ty) { |
419 | case isNone: |
420 | return; |
421 | #define HANDLE_DIEVALUE_SMALL(T) \ |
422 | case is##T: \ |
423 | destruct<DIE##T>(); \ |
424 | return; |
425 | #define HANDLE_DIEVALUE_LARGE(T) \ |
426 | case is##T: \ |
427 | destruct<const DIE##T *>(); \ |
428 | return; |
429 | #include "llvm/CodeGen/DIEValue.def" |
430 | } |
431 | } |
432 | |
433 | /// Copy the underlying value. |
434 | /// |
435 | /// This should get optimized down to a simple copy. We need to actually |
436 | /// construct the value, rather than calling memcpy, to satisfy strict |
437 | /// aliasing rules. |
438 | void copyVal(const DIEValue &X) { |
439 | switch (Ty) { |
440 | case isNone: |
441 | return; |
442 | #define HANDLE_DIEVALUE_SMALL(T) \ |
443 | case is##T: \ |
444 | construct<DIE##T>(*X.get<DIE##T>()); \ |
445 | return; |
446 | #define HANDLE_DIEVALUE_LARGE(T) \ |
447 | case is##T: \ |
448 | construct<const DIE##T *>(*X.get<const DIE##T *>()); \ |
449 | return; |
450 | #include "llvm/CodeGen/DIEValue.def" |
451 | } |
452 | } |
453 | |
454 | public: |
455 | DIEValue() = default; |
456 | |
457 | DIEValue(const DIEValue &X) : Ty(X.Ty), Attribute(X.Attribute), Form(X.Form) { |
458 | copyVal(X); |
459 | } |
460 | |
461 | DIEValue &operator=(const DIEValue &X) { |
462 | destroyVal(); |
463 | Ty = X.Ty; |
464 | Attribute = X.Attribute; |
465 | Form = X.Form; |
466 | copyVal(X); |
467 | return *this; |
468 | } |
469 | |
470 | ~DIEValue() { destroyVal(); } |
471 | |
472 | #define HANDLE_DIEVALUE_SMALL(T) \ |
473 | DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T &V) \ |
474 | : Ty(is##T), Attribute(Attribute), Form(Form) { \ |
475 | construct<DIE##T>(V); \ |
476 | } |
477 | #define HANDLE_DIEVALUE_LARGE(T) \ |
478 | DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T *V) \ |
479 | : Ty(is##T), Attribute(Attribute), Form(Form) { \ |
480 | assert(V && "Expected valid value")((void)0); \ |
481 | construct<const DIE##T *>(V); \ |
482 | } |
483 | #include "llvm/CodeGen/DIEValue.def" |
484 | |
485 | /// Accessors. |
486 | /// @{ |
487 | Type getType() const { return Ty; } |
488 | dwarf::Attribute getAttribute() const { return Attribute; } |
489 | dwarf::Form getForm() const { return Form; } |
490 | explicit operator bool() const { return Ty; } |
491 | /// @} |
492 | |
493 | #define HANDLE_DIEVALUE_SMALL(T) \ |
494 | const DIE##T &getDIE##T() const { \ |
495 | assert(getType() == is##T && "Expected " #T)((void)0); \ |
496 | return *get<DIE##T>(); \ |
497 | } |
498 | #define HANDLE_DIEVALUE_LARGE(T) \ |
499 | const DIE##T &getDIE##T() const { \ |
500 | assert(getType() == is##T && "Expected " #T)((void)0); \ |
501 | return **get<const DIE##T *>(); \ |
502 | } |
503 | #include "llvm/CodeGen/DIEValue.def" |
504 | |
505 | /// Emit value via the Dwarf writer. |
506 | void emitValue(const AsmPrinter *AP) const; |
507 | |
508 | /// Return the size of a value in bytes. |
509 | unsigned SizeOf(const AsmPrinter *AP) const; |
510 | |
511 | void print(raw_ostream &O) const; |
512 | void dump() const; |
513 | }; |
514 | |
515 | struct IntrusiveBackListNode { |
516 | PointerIntPair<IntrusiveBackListNode *, 1> Next; |
517 | |
518 | IntrusiveBackListNode() : Next(this, true) {} |
519 | |
520 | IntrusiveBackListNode *getNext() const { |
521 | return Next.getInt() ? nullptr : Next.getPointer(); |
522 | } |
523 | }; |
524 | |
525 | struct IntrusiveBackListBase { |
526 | using Node = IntrusiveBackListNode; |
527 | |
528 | Node *Last = nullptr; |
529 | |
530 | bool empty() const { return !Last; } |
531 | |
532 | void push_back(Node &N) { |
533 | assert(N.Next.getPointer() == &N && "Expected unlinked node")((void)0); |
534 | assert(N.Next.getInt() == true && "Expected unlinked node")((void)0); |
535 | |
536 | if (Last) { |
537 | N.Next = Last->Next; |
538 | Last->Next.setPointerAndInt(&N, false); |
539 | } |
540 | Last = &N; |
541 | } |
542 | |
543 | void push_front(Node &N) { |
544 | assert(N.Next.getPointer() == &N && "Expected unlinked node")((void)0); |
545 | assert(N.Next.getInt() == true && "Expected unlinked node")((void)0); |
546 | |
547 | if (Last) { |
548 | N.Next.setPointerAndInt(Last->Next.getPointer(), false); |
549 | Last->Next.setPointerAndInt(&N, true); |
550 | } else { |
551 | Last = &N; |
552 | } |
553 | } |
554 | }; |
555 | |
556 | template <class T> class IntrusiveBackList : IntrusiveBackListBase { |
557 | public: |
558 | using IntrusiveBackListBase::empty; |
559 | |
560 | void push_back(T &N) { IntrusiveBackListBase::push_back(N); } |
561 | void push_front(T &N) { IntrusiveBackListBase::push_front(N); } |
562 | T &back() { return *static_cast<T *>(Last); } |
563 | const T &back() const { return *static_cast<T *>(Last); } |
564 | T &front() { |
565 | return *static_cast<T *>(Last ? Last->Next.getPointer() : nullptr); |
566 | } |
567 | const T &front() const { |
568 | return *static_cast<T *>(Last ? Last->Next.getPointer() : nullptr); |
569 | } |
570 | |
571 | void takeNodes(IntrusiveBackList<T> &Other) { |
572 | if (Other.empty()) |
573 | return; |
574 | |
575 | T *FirstNode = static_cast<T *>(Other.Last->Next.getPointer()); |
576 | T *IterNode = FirstNode; |
577 | do { |
578 | // Keep a pointer to the node and increment the iterator. |
579 | T *TmpNode = IterNode; |
580 | IterNode = static_cast<T *>(IterNode->Next.getPointer()); |
581 | |
582 | // Unlink the node and push it back to this list. |
583 | TmpNode->Next.setPointerAndInt(TmpNode, true); |
584 | push_back(*TmpNode); |
585 | } while (IterNode != FirstNode); |
586 | |
587 | Other.Last = nullptr; |
588 | } |
589 | |
590 | class const_iterator; |
591 | class iterator |
592 | : public iterator_facade_base<iterator, std::forward_iterator_tag, T> { |
593 | friend class const_iterator; |
594 | |
595 | Node *N = nullptr; |
596 | |
597 | public: |
598 | iterator() = default; |
599 | explicit iterator(T *N) : N(N) {} |
600 | |
601 | iterator &operator++() { |
602 | N = N->getNext(); |
603 | return *this; |
604 | } |
605 | |
606 | explicit operator bool() const { return N; } |
607 | T &operator*() const { return *static_cast<T *>(N); } |
608 | |
609 | bool operator==(const iterator &X) const { return N == X.N; } |
610 | }; |
611 | |
612 | class const_iterator |
613 | : public iterator_facade_base<const_iterator, std::forward_iterator_tag, |
614 | const T> { |
615 | const Node *N = nullptr; |
616 | |
617 | public: |
618 | const_iterator() = default; |
619 | // Placate MSVC by explicitly scoping 'iterator'. |
620 | const_iterator(typename IntrusiveBackList<T>::iterator X) : N(X.N) {} |
621 | explicit const_iterator(const T *N) : N(N) {} |
622 | |
623 | const_iterator &operator++() { |
624 | N = N->getNext(); |
625 | return *this; |
626 | } |
627 | |
628 | explicit operator bool() const { return N; } |
629 | const T &operator*() const { return *static_cast<const T *>(N); } |
630 | |
631 | bool operator==(const const_iterator &X) const { return N == X.N; } |
632 | }; |
633 | |
634 | iterator begin() { |
635 | return Last ? iterator(static_cast<T *>(Last->Next.getPointer())) : end(); |
636 | } |
637 | const_iterator begin() const { |
638 | return const_cast<IntrusiveBackList *>(this)->begin(); |
639 | } |
640 | iterator end() { return iterator(); } |
641 | const_iterator end() const { return const_iterator(); } |
642 | |
643 | static iterator toIterator(T &N) { return iterator(&N); } |
644 | static const_iterator toIterator(const T &N) { return const_iterator(&N); } |
645 | }; |
646 | |
647 | /// A list of DIE values. |
648 | /// |
649 | /// This is a singly-linked list, but instead of reversing the order of |
650 | /// insertion, we keep a pointer to the back of the list so we can push in |
651 | /// order. |
652 | /// |
653 | /// There are two main reasons to choose a linked list over a customized |
654 | /// vector-like data structure. |
655 | /// |
656 | /// 1. For teardown efficiency, we want DIEs to be BumpPtrAllocated. Using a |
657 | /// linked list here makes this way easier to accomplish. |
658 | /// 2. Carrying an extra pointer per \a DIEValue isn't expensive. 45% of DIEs |
659 | /// have 2 or fewer values, and 90% have 5 or fewer. A vector would be |
660 | /// over-allocated by 50% on average anyway, the same cost as the |
661 | /// linked-list node. |
662 | class DIEValueList { |
663 | struct Node : IntrusiveBackListNode { |
664 | DIEValue V; |
665 | |
666 | explicit Node(DIEValue V) : V(V) {} |
667 | }; |
668 | |
669 | using ListTy = IntrusiveBackList<Node>; |
670 | |
671 | ListTy List; |
672 | |
673 | public: |
674 | class const_value_iterator; |
675 | class value_iterator |
676 | : public iterator_adaptor_base<value_iterator, ListTy::iterator, |
677 | std::forward_iterator_tag, DIEValue> { |
678 | friend class const_value_iterator; |
679 | |
680 | using iterator_adaptor = |
681 | iterator_adaptor_base<value_iterator, ListTy::iterator, |
682 | std::forward_iterator_tag, DIEValue>; |
683 | |
684 | public: |
685 | value_iterator() = default; |
686 | explicit value_iterator(ListTy::iterator X) : iterator_adaptor(X) {} |
687 | |
688 | explicit operator bool() const { return bool(wrapped()); } |
689 | DIEValue &operator*() const { return wrapped()->V; } |
690 | }; |
691 | |
692 | class const_value_iterator : public iterator_adaptor_base< |
693 | const_value_iterator, ListTy::const_iterator, |
694 | std::forward_iterator_tag, const DIEValue> { |
695 | using iterator_adaptor = |
696 | iterator_adaptor_base<const_value_iterator, ListTy::const_iterator, |
697 | std::forward_iterator_tag, const DIEValue>; |
698 | |
699 | public: |
700 | const_value_iterator() = default; |
701 | const_value_iterator(DIEValueList::value_iterator X) |
702 | : iterator_adaptor(X.wrapped()) {} |
703 | explicit const_value_iterator(ListTy::const_iterator X) |
704 | : iterator_adaptor(X) {} |
705 | |
706 | explicit operator bool() const { return bool(wrapped()); } |
707 | const DIEValue &operator*() const { return wrapped()->V; } |
708 | }; |
709 | |
710 | using value_range = iterator_range<value_iterator>; |
711 | using const_value_range = iterator_range<const_value_iterator>; |
712 | |
713 | value_iterator addValue(BumpPtrAllocator &Alloc, const DIEValue &V) { |
714 | List.push_back(*new (Alloc) Node(V)); |
715 | return value_iterator(ListTy::toIterator(List.back())); |
716 | } |
717 | template <class T> |
718 | value_iterator addValue(BumpPtrAllocator &Alloc, dwarf::Attribute Attribute, |
719 | dwarf::Form Form, T &&Value) { |
720 | return addValue(Alloc, DIEValue(Attribute, Form, std::forward<T>(Value))); |
721 | } |
722 | |
723 | /// Take ownership of the nodes in \p Other, and append them to the back of |
724 | /// the list. |
725 | void takeValues(DIEValueList &Other) { List.takeNodes(Other.List); } |
726 | |
727 | value_range values() { |
728 | return make_range(value_iterator(List.begin()), value_iterator(List.end())); |
729 | } |
730 | const_value_range values() const { |
731 | return make_range(const_value_iterator(List.begin()), |
732 | const_value_iterator(List.end())); |
733 | } |
734 | }; |
735 | |
736 | //===--------------------------------------------------------------------===// |
737 | /// A structured debug information entry. Has an abbreviation which |
738 | /// describes its organization. |
739 | class DIE : IntrusiveBackListNode, public DIEValueList { |
740 | friend class IntrusiveBackList<DIE>; |
741 | friend class DIEUnit; |
742 | |
743 | /// Dwarf unit relative offset. |
744 | unsigned Offset = 0; |
745 | /// Size of instance + children. |
746 | unsigned Size = 0; |
747 | unsigned AbbrevNumber = ~0u; |
748 | /// Dwarf tag code. |
749 | dwarf::Tag Tag = (dwarf::Tag)0; |
750 | /// Set to true to force a DIE to emit an abbreviation that says it has |
751 | /// children even when it doesn't. This is used for unit testing purposes. |
752 | bool ForceChildren = false; |
753 | /// Children DIEs. |
754 | IntrusiveBackList<DIE> Children; |
755 | |
756 | /// The owner is either the parent DIE for children of other DIEs, or a |
757 | /// DIEUnit which contains this DIE as its unit DIE. |
758 | PointerUnion<DIE *, DIEUnit *> Owner; |
759 | |
760 | explicit DIE(dwarf::Tag Tag) : Tag(Tag) {} |
761 | |
762 | public: |
763 | DIE() = delete; |
764 | DIE(const DIE &RHS) = delete; |
765 | DIE(DIE &&RHS) = delete; |
766 | DIE &operator=(const DIE &RHS) = delete; |
767 | DIE &operator=(const DIE &&RHS) = delete; |
768 | |
769 | static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) { |
770 | return new (Alloc) DIE(Tag); |
771 | } |
772 | |
773 | // Accessors. |
774 | unsigned getAbbrevNumber() const { return AbbrevNumber; } |
775 | dwarf::Tag getTag() const { return Tag; } |
776 | /// Get the compile/type unit relative offset of this DIE. |
777 | unsigned getOffset() const { return Offset; } |
778 | unsigned getSize() const { return Size; } |
779 | bool hasChildren() const { return ForceChildren || !Children.empty(); } |
780 | void setForceChildren(bool B) { ForceChildren = B; } |
781 | |
782 | using child_iterator = IntrusiveBackList<DIE>::iterator; |
783 | using const_child_iterator = IntrusiveBackList<DIE>::const_iterator; |
784 | using child_range = iterator_range<child_iterator>; |
785 | using const_child_range = iterator_range<const_child_iterator>; |
786 | |
787 | child_range children() { |
788 | return make_range(Children.begin(), Children.end()); |
789 | } |
790 | const_child_range children() const { |
791 | return make_range(Children.begin(), Children.end()); |
792 | } |
793 | |
794 | DIE *getParent() const; |
795 | |
796 | /// Generate the abbreviation for this DIE. |
797 | /// |
798 | /// Calculate the abbreviation for this, which should be uniqued and |
799 | /// eventually used to call \a setAbbrevNumber(). |
800 | DIEAbbrev generateAbbrev() const; |
801 | |
802 | /// Set the abbreviation number for this DIE. |
803 | void setAbbrevNumber(unsigned I) { AbbrevNumber = I; } |
804 | |
805 | /// Get the absolute offset within the .debug_info or .debug_types section |
806 | /// for this DIE. |
807 | uint64_t getDebugSectionOffset() const; |
808 | |
809 | /// Compute the offset of this DIE and all its children. |
810 | /// |
811 | /// This function gets called just before we are going to generate the debug |
812 | /// information and gives each DIE a chance to figure out its CU relative DIE |
813 | /// offset, unique its abbreviation and fill in the abbreviation code, and |
814 | /// return the unit offset that points to where the next DIE will be emitted |
815 | /// within the debug unit section. After this function has been called for all |
816 | /// DIE objects, the DWARF can be generated since all DIEs will be able to |
817 | /// properly refer to other DIE objects since all DIEs have calculated their |
818 | /// offsets. |
819 | /// |
820 | /// \param AP AsmPrinter to use when calculating sizes. |
821 | /// \param AbbrevSet the abbreviation used to unique DIE abbreviations. |
822 | /// \param CUOffset the compile/type unit relative offset in bytes. |
823 | /// \returns the offset for the DIE that follows this DIE within the |
824 | /// current compile/type unit. |
825 | unsigned computeOffsetsAndAbbrevs(const AsmPrinter *AP, |
826 | DIEAbbrevSet &AbbrevSet, unsigned CUOffset); |
827 | |
828 | /// Climb up the parent chain to get the compile unit or type unit DIE that |
829 | /// this DIE belongs to. |
830 | /// |
831 | /// \returns the compile or type unit DIE that owns this DIE, or NULL if |
832 | /// this DIE hasn't been added to a unit DIE. |
833 | const DIE *getUnitDie() const; |
834 | |
835 | /// Climb up the parent chain to get the compile unit or type unit that this |
836 | /// DIE belongs to. |
837 | /// |
838 | /// \returns the DIEUnit that represents the compile or type unit that owns |
839 | /// this DIE, or NULL if this DIE hasn't been added to a unit DIE. |
840 | DIEUnit *getUnit() const; |
841 | |
842 | void setOffset(unsigned O) { Offset = O; } |
843 | void setSize(unsigned S) { Size = S; } |
844 | |
845 | /// Add a child to the DIE. |
846 | DIE &addChild(DIE *Child) { |
847 | assert(!Child->getParent() && "Child should be orphaned")((void)0); |
848 | Child->Owner = this; |
849 | Children.push_back(*Child); |
850 | return Children.back(); |
851 | } |
852 | |
853 | DIE &addChildFront(DIE *Child) { |
854 | assert(!Child->getParent() && "Child should be orphaned")((void)0); |
855 | Child->Owner = this; |
856 | Children.push_front(*Child); |
857 | return Children.front(); |
858 | } |
859 | |
860 | /// Find a value in the DIE with the attribute given. |
861 | /// |
862 | /// Returns a default-constructed DIEValue (where \a DIEValue::getType() |
863 | /// gives \a DIEValue::isNone) if no such attribute exists. |
864 | DIEValue findAttribute(dwarf::Attribute Attribute) const; |
865 | |
866 | void print(raw_ostream &O, unsigned IndentCount = 0) const; |
867 | void dump() const; |
868 | }; |
869 | |
870 | //===--------------------------------------------------------------------===// |
871 | /// Represents a compile or type unit. |
872 | class DIEUnit { |
873 | /// The compile unit or type unit DIE. This variable must be an instance of |
874 | /// DIE so that we can calculate the DIEUnit from any DIE by traversing the |
875 | /// parent backchain and getting the Unit DIE, and then casting itself to a |
876 | /// DIEUnit. This allows us to be able to find the DIEUnit for any DIE without |
877 | /// having to store a pointer to the DIEUnit in each DIE instance. |
878 | DIE Die; |
879 | /// The section this unit will be emitted in. This may or may not be set to |
880 | /// a valid section depending on the client that is emitting DWARF. |
881 | MCSection *Section; |
882 | uint64_t Offset; /// .debug_info or .debug_types absolute section offset. |
883 | protected: |
884 | virtual ~DIEUnit() = default; |
885 | |
886 | public: |
887 | explicit DIEUnit(dwarf::Tag UnitTag); |
888 | DIEUnit(const DIEUnit &RHS) = delete; |
889 | DIEUnit(DIEUnit &&RHS) = delete; |
890 | void operator=(const DIEUnit &RHS) = delete; |
891 | void operator=(const DIEUnit &&RHS) = delete; |
892 | /// Set the section that this DIEUnit will be emitted into. |
893 | /// |
894 | /// This function is used by some clients to set the section. Not all clients |
895 | /// that emit DWARF use this section variable. |
896 | void setSection(MCSection *Section) { |
897 | assert(!this->Section)((void)0); |
898 | this->Section = Section; |
899 | } |
900 | |
901 | virtual const MCSymbol *getCrossSectionRelativeBaseAddress() const { |
902 | return nullptr; |
903 | } |
904 | |
905 | /// Return the section that this DIEUnit will be emitted into. |
906 | /// |
907 | /// \returns Section pointer which can be NULL. |
908 | MCSection *getSection() const { return Section; } |
909 | void setDebugSectionOffset(uint64_t O) { Offset = O; } |
910 | uint64_t getDebugSectionOffset() const { return Offset; } |
911 | DIE &getUnitDie() { return Die; } |
912 | const DIE &getUnitDie() const { return Die; } |
913 | }; |
914 | |
915 | struct BasicDIEUnit final : DIEUnit { |
916 | explicit BasicDIEUnit(dwarf::Tag UnitTag) : DIEUnit(UnitTag) {} |
917 | }; |
918 | |
919 | //===--------------------------------------------------------------------===// |
920 | /// DIELoc - Represents an expression location. |
921 | // |
922 | class DIELoc : public DIEValueList { |
923 | mutable unsigned Size = 0; // Size in bytes excluding size header. |
924 | |
925 | public: |
926 | DIELoc() = default; |
927 | |
928 | /// ComputeSize - Calculate the size of the location expression. |
929 | /// |
930 | unsigned ComputeSize(const AsmPrinter *AP) const; |
931 | |
932 | // TODO: move setSize() and Size to DIEValueList. |
933 | void setSize(unsigned size) { Size = size; } |
934 | |
935 | /// BestForm - Choose the best form for data. |
936 | /// |
937 | dwarf::Form BestForm(unsigned DwarfVersion) const { |
938 | if (DwarfVersion > 3) |
939 | return dwarf::DW_FORM_exprloc; |
940 | // Pre-DWARF4 location expressions were blocks and not exprloc. |
941 | if ((unsigned char)Size == Size) |
942 | return dwarf::DW_FORM_block1; |
943 | if ((unsigned short)Size == Size) |
944 | return dwarf::DW_FORM_block2; |
945 | if ((unsigned int)Size == Size) |
946 | return dwarf::DW_FORM_block4; |
947 | return dwarf::DW_FORM_block; |
948 | } |
949 | |
950 | void emitValue(const AsmPrinter *Asm, dwarf::Form Form) const; |
951 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
952 | |
953 | void print(raw_ostream &O) const; |
954 | }; |
955 | |
956 | //===--------------------------------------------------------------------===// |
957 | /// DIEBlock - Represents a block of values. |
958 | // |
959 | class DIEBlock : public DIEValueList { |
960 | mutable unsigned Size = 0; // Size in bytes excluding size header. |
961 | |
962 | public: |
963 | DIEBlock() = default; |
964 | |
965 | /// ComputeSize - Calculate the size of the location expression. |
966 | /// |
967 | unsigned ComputeSize(const AsmPrinter *AP) const; |
968 | |
969 | // TODO: move setSize() and Size to DIEValueList. |
970 | void setSize(unsigned size) { Size = size; } |
971 | |
972 | /// BestForm - Choose the best form for data. |
973 | /// |
974 | dwarf::Form BestForm() const { |
975 | if ((unsigned char)Size == Size) |
976 | return dwarf::DW_FORM_block1; |
977 | if ((unsigned short)Size == Size) |
978 | return dwarf::DW_FORM_block2; |
979 | if ((unsigned int)Size == Size) |
980 | return dwarf::DW_FORM_block4; |
981 | return dwarf::DW_FORM_block; |
982 | } |
983 | |
984 | void emitValue(const AsmPrinter *Asm, dwarf::Form Form) const; |
985 | unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const; |
986 | |
987 | void print(raw_ostream &O) const; |
988 | }; |
989 | |
990 | } // end namespace llvm |
991 | |
992 | #endif // LLVM_CODEGEN_DIE_H |
1 | //===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// \file |
9 | /// |
10 | /// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms |
11 | /// to the LLVM "Allocator" concept and is similar to MallocAllocator, but |
12 | /// objects cannot be deallocated. Their lifetime is tied to the lifetime of the |
13 | /// allocator. |
14 | /// |
15 | //===----------------------------------------------------------------------===// |
16 | |
17 | #ifndef LLVM_SUPPORT_ALLOCATOR_H |
18 | #define LLVM_SUPPORT_ALLOCATOR_H |
19 | |
20 | #include "llvm/ADT/Optional.h" |
21 | #include "llvm/ADT/SmallVector.h" |
22 | #include "llvm/Support/Alignment.h" |
23 | #include "llvm/Support/AllocatorBase.h" |
24 | #include "llvm/Support/Compiler.h" |
25 | #include "llvm/Support/ErrorHandling.h" |
26 | #include "llvm/Support/MathExtras.h" |
27 | #include "llvm/Support/MemAlloc.h" |
28 | #include <algorithm> |
29 | #include <cassert> |
30 | #include <cstddef> |
31 | #include <cstdint> |
32 | #include <cstdlib> |
33 | #include <iterator> |
34 | #include <type_traits> |
35 | #include <utility> |
36 | |
37 | namespace llvm { |
38 | |
39 | namespace detail { |
40 | |
41 | // We call out to an external function to actually print the message as the |
42 | // printing code uses Allocator.h in its implementation. |
43 | void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, |
44 | size_t TotalMemory); |
45 | |
46 | } // end namespace detail |
47 | |
48 | /// Allocate memory in an ever growing pool, as if by bump-pointer. |
49 | /// |
50 | /// This isn't strictly a bump-pointer allocator as it uses backing slabs of |
51 | /// memory rather than relying on a boundless contiguous heap. However, it has |
52 | /// bump-pointer semantics in that it is a monotonically growing pool of memory |
53 | /// where every allocation is found by merely allocating the next N bytes in |
54 | /// the slab, or the next N bytes in the next slab. |
55 | /// |
56 | /// Note that this also has a threshold for forcing allocations above a certain |
57 | /// size into their own slab. |
58 | /// |
59 | /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator |
60 | /// object, which wraps malloc, to allocate memory, but it can be changed to |
61 | /// use a custom allocator. |
62 | /// |
63 | /// The GrowthDelay specifies after how many allocated slabs the allocator |
64 | /// increases the size of the slabs. |
65 | template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096, |
66 | size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128> |
67 | class BumpPtrAllocatorImpl |
68 | : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize, |
69 | SizeThreshold, GrowthDelay>>, |
70 | private AllocatorT { |
71 | public: |
72 | static_assert(SizeThreshold <= SlabSize, |
73 | "The SizeThreshold must be at most the SlabSize to ensure " |
74 | "that objects larger than a slab go into their own memory " |
75 | "allocation."); |
76 | static_assert(GrowthDelay > 0, |
77 | "GrowthDelay must be at least 1 which already increases the" |
78 | "slab size after each allocated slab."); |
79 | |
80 | BumpPtrAllocatorImpl() = default; |
81 | |
82 | template <typename T> |
83 | BumpPtrAllocatorImpl(T &&Allocator) |
84 | : AllocatorT(std::forward<T &&>(Allocator)) {} |
85 | |
86 | // Manually implement a move constructor as we must clear the old allocator's |
87 | // slabs as a matter of correctness. |
88 | BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old) |
89 | : AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr), |
90 | End(Old.End), Slabs(std::move(Old.Slabs)), |
91 | CustomSizedSlabs(std::move(Old.CustomSizedSlabs)), |
92 | BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) { |
93 | Old.CurPtr = Old.End = nullptr; |
94 | Old.BytesAllocated = 0; |
95 | Old.Slabs.clear(); |
96 | Old.CustomSizedSlabs.clear(); |
97 | } |
98 | |
99 | ~BumpPtrAllocatorImpl() { |
100 | DeallocateSlabs(Slabs.begin(), Slabs.end()); |
101 | DeallocateCustomSizedSlabs(); |
102 | } |
103 | |
104 | BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) { |
105 | DeallocateSlabs(Slabs.begin(), Slabs.end()); |
106 | DeallocateCustomSizedSlabs(); |
107 | |
108 | CurPtr = RHS.CurPtr; |
109 | End = RHS.End; |
110 | BytesAllocated = RHS.BytesAllocated; |
111 | RedZoneSize = RHS.RedZoneSize; |
112 | Slabs = std::move(RHS.Slabs); |
113 | CustomSizedSlabs = std::move(RHS.CustomSizedSlabs); |
114 | AllocatorT::operator=(static_cast<AllocatorT &&>(RHS)); |
115 | |
116 | RHS.CurPtr = RHS.End = nullptr; |
117 | RHS.BytesAllocated = 0; |
118 | RHS.Slabs.clear(); |
119 | RHS.CustomSizedSlabs.clear(); |
120 | return *this; |
121 | } |
122 | |
123 | /// Deallocate all but the current slab and reset the current pointer |
124 | /// to the beginning of it, freeing all memory allocated so far. |
125 | void Reset() { |
126 | // Deallocate all but the first slab, and deallocate all custom-sized slabs. |
127 | DeallocateCustomSizedSlabs(); |
128 | CustomSizedSlabs.clear(); |
129 | |
130 | if (Slabs.empty()) |
131 | return; |
132 | |
133 | // Reset the state. |
134 | BytesAllocated = 0; |
135 | CurPtr = (char *)Slabs.front(); |
136 | End = CurPtr + SlabSize; |
137 | |
138 | __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0)); |
139 | DeallocateSlabs(std::next(Slabs.begin()), Slabs.end()); |
140 | Slabs.erase(std::next(Slabs.begin()), Slabs.end()); |
141 | } |
142 | |
143 | /// Allocate space at the specified alignment. |
144 | LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void * |
145 | Allocate(size_t Size, Align Alignment) { |
146 | // Keep track of how many bytes we've allocated. |
147 | BytesAllocated += Size; |
148 | |
149 | size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment); |
150 | assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow")((void)0); |
151 | |
152 | size_t SizeToAllocate = Size; |
153 | #if LLVM_ADDRESS_SANITIZER_BUILD0 |
154 | // Add trailing bytes as a "red zone" under ASan. |
155 | SizeToAllocate += RedZoneSize; |
156 | #endif |
157 | |
158 | // Check if we have enough space. |
159 | if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) { |
160 | char *AlignedPtr = CurPtr + Adjustment; |
161 | CurPtr = AlignedPtr + SizeToAllocate; |
162 | // Update the allocation point of this memory block in MemorySanitizer. |
163 | // Without this, MemorySanitizer messages for values originated from here |
164 | // will point to the allocation of the entire slab. |
165 | __msan_allocated_memory(AlignedPtr, Size); |
166 | // Similarly, tell ASan about this space. |
167 | __asan_unpoison_memory_region(AlignedPtr, Size); |
168 | return AlignedPtr; |
169 | } |
170 | |
171 | // If Size is really big, allocate a separate slab for it. |
172 | size_t PaddedSize = SizeToAllocate + Alignment.value() - 1; |
173 | if (PaddedSize > SizeThreshold) { |
174 | void *NewSlab = |
175 | AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t)); |
176 | // We own the new slab and don't want anyone reading anyting other than |
177 | // pieces returned from this method. So poison the whole slab. |
178 | __asan_poison_memory_region(NewSlab, PaddedSize); |
179 | CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize)); |
180 | |
181 | uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment); |
182 | assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize)((void)0); |
183 | char *AlignedPtr = (char*)AlignedAddr; |
184 | __msan_allocated_memory(AlignedPtr, Size); |
185 | __asan_unpoison_memory_region(AlignedPtr, Size); |
186 | return AlignedPtr; |
187 | } |
188 | |
189 | // Otherwise, start a new slab and try again. |
190 | StartNewSlab(); |
191 | uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment); |
192 | assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&((void)0) |
193 | "Unable to allocate memory!")((void)0); |
194 | char *AlignedPtr = (char*)AlignedAddr; |
195 | CurPtr = AlignedPtr + SizeToAllocate; |
196 | __msan_allocated_memory(AlignedPtr, Size); |
197 | __asan_unpoison_memory_region(AlignedPtr, Size); |
198 | return AlignedPtr; |
199 | } |
200 | |
201 | inline LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void * |
202 | Allocate(size_t Size, size_t Alignment) { |
203 | assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.")((void)0); |
204 | return Allocate(Size, Align(Alignment)); |
205 | } |
206 | |
207 | // Pull in base class overloads. |
208 | using AllocatorBase<BumpPtrAllocatorImpl>::Allocate; |
209 | |
210 | // Bump pointer allocators are expected to never free their storage; and |
211 | // clients expect pointers to remain valid for non-dereferencing uses even |
212 | // after deallocation. |
213 | void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) { |
214 | __asan_poison_memory_region(Ptr, Size); |
215 | } |
216 | |
217 | // Pull in base class overloads. |
218 | using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate; |
219 | |
220 | size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); } |
221 | |
222 | /// \return An index uniquely and reproducibly identifying |
223 | /// an input pointer \p Ptr in the given allocator. |
224 | /// The returned value is negative iff the object is inside a custom-size |
225 | /// slab. |
226 | /// Returns an empty optional if the pointer is not found in the allocator. |
227 | llvm::Optional<int64_t> identifyObject(const void *Ptr) { |
228 | const char *P = static_cast<const char *>(Ptr); |
229 | int64_t InSlabIdx = 0; |
230 | for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) { |
231 | const char *S = static_cast<const char *>(Slabs[Idx]); |
232 | if (P >= S && P < S + computeSlabSize(Idx)) |
233 | return InSlabIdx + static_cast<int64_t>(P - S); |
234 | InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx)); |
235 | } |
236 | |
237 | // Use negative index to denote custom sized slabs. |
238 | int64_t InCustomSizedSlabIdx = -1; |
239 | for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) { |
240 | const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first); |
241 | size_t Size = CustomSizedSlabs[Idx].second; |
242 | if (P >= S && P < S + Size) |
243 | return InCustomSizedSlabIdx - static_cast<int64_t>(P - S); |
244 | InCustomSizedSlabIdx -= static_cast<int64_t>(Size); |
245 | } |
246 | return None; |
247 | } |
248 | |
249 | /// A wrapper around identifyObject that additionally asserts that |
250 | /// the object is indeed within the allocator. |
251 | /// \return An index uniquely and reproducibly identifying |
252 | /// an input pointer \p Ptr in the given allocator. |
253 | int64_t identifyKnownObject(const void *Ptr) { |
254 | Optional<int64_t> Out = identifyObject(Ptr); |
255 | assert(Out && "Wrong allocator used")((void)0); |
256 | return *Out; |
257 | } |
258 | |
259 | /// A wrapper around identifyKnownObject. Accepts type information |
260 | /// about the object and produces a smaller identifier by relying on |
261 | /// the alignment information. Note that sub-classes may have different |
262 | /// alignment, so the most base class should be passed as template parameter |
263 | /// in order to obtain correct results. For that reason automatic template |
264 | /// parameter deduction is disabled. |
265 | /// \return An index uniquely and reproducibly identifying |
266 | /// an input pointer \p Ptr in the given allocator. This identifier is |
267 | /// different from the ones produced by identifyObject and |
268 | /// identifyAlignedObject. |
269 | template <typename T> |
270 | int64_t identifyKnownAlignedObject(const void *Ptr) { |
271 | int64_t Out = identifyKnownObject(Ptr); |
272 | assert(Out % alignof(T) == 0 && "Wrong alignment information")((void)0); |
273 | return Out / alignof(T); |
274 | } |
275 | |
276 | size_t getTotalMemory() const { |
277 | size_t TotalMemory = 0; |
278 | for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I) |
279 | TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I)); |
280 | for (auto &PtrAndSize : CustomSizedSlabs) |
281 | TotalMemory += PtrAndSize.second; |
282 | return TotalMemory; |
283 | } |
284 | |
285 | size_t getBytesAllocated() const { return BytesAllocated; } |
286 | |
287 | void setRedZoneSize(size_t NewSize) { |
288 | RedZoneSize = NewSize; |
289 | } |
290 | |
291 | void PrintStats() const { |
292 | detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated, |
293 | getTotalMemory()); |
294 | } |
295 | |
296 | private: |
297 | /// The current pointer into the current slab. |
298 | /// |
299 | /// This points to the next free byte in the slab. |
300 | char *CurPtr = nullptr; |
301 | |
302 | /// The end of the current slab. |
303 | char *End = nullptr; |
304 | |
305 | /// The slabs allocated so far. |
306 | SmallVector<void *, 4> Slabs; |
307 | |
308 | /// Custom-sized slabs allocated for too-large allocation requests. |
309 | SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs; |
310 | |
311 | /// How many bytes we've allocated. |
312 | /// |
313 | /// Used so that we can compute how much space was wasted. |
314 | size_t BytesAllocated = 0; |
315 | |
316 | /// The number of bytes to put between allocations when running under |
317 | /// a sanitizer. |
318 | size_t RedZoneSize = 1; |
319 | |
320 | static size_t computeSlabSize(unsigned SlabIdx) { |
321 | // Scale the actual allocated slab size based on the number of slabs |
322 | // allocated. Every GrowthDelay slabs allocated, we double |
323 | // the allocated size to reduce allocation frequency, but saturate at |
324 | // multiplying the slab size by 2^30. |
325 | return SlabSize * |
326 | ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay)); |
327 | } |
328 | |
329 | /// Allocate a new slab and move the bump pointers over into the new |
330 | /// slab, modifying CurPtr and End. |
331 | void StartNewSlab() { |
332 | size_t AllocatedSlabSize = computeSlabSize(Slabs.size()); |
333 | |
334 | void *NewSlab = |
335 | AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t)); |
336 | // We own the new slab and don't want anyone reading anything other than |
337 | // pieces returned from this method. So poison the whole slab. |
338 | __asan_poison_memory_region(NewSlab, AllocatedSlabSize); |
339 | |
340 | Slabs.push_back(NewSlab); |
341 | CurPtr = (char *)(NewSlab); |
342 | End = ((char *)NewSlab) + AllocatedSlabSize; |
343 | } |
344 | |
345 | /// Deallocate a sequence of slabs. |
346 | void DeallocateSlabs(SmallVectorImpl<void *>::iterator I, |
347 | SmallVectorImpl<void *>::iterator E) { |
348 | for (; I != E; ++I) { |
349 | size_t AllocatedSlabSize = |
350 | computeSlabSize(std::distance(Slabs.begin(), I)); |
351 | AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t)); |
352 | } |
353 | } |
354 | |
355 | /// Deallocate all memory for custom sized slabs. |
356 | void DeallocateCustomSizedSlabs() { |
357 | for (auto &PtrAndSize : CustomSizedSlabs) { |
358 | void *Ptr = PtrAndSize.first; |
359 | size_t Size = PtrAndSize.second; |
360 | AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t)); |
361 | } |
362 | } |
363 | |
364 | template <typename T> friend class SpecificBumpPtrAllocator; |
365 | }; |
366 | |
367 | /// The standard BumpPtrAllocator which just uses the default template |
368 | /// parameters. |
369 | typedef BumpPtrAllocatorImpl<> BumpPtrAllocator; |
370 | |
371 | /// A BumpPtrAllocator that allows only elements of a specific type to be |
372 | /// allocated. |
373 | /// |
374 | /// This allows calling the destructor in DestroyAll() and when the allocator is |
375 | /// destroyed. |
376 | template <typename T> class SpecificBumpPtrAllocator { |
377 | BumpPtrAllocator Allocator; |
378 | |
379 | public: |
380 | SpecificBumpPtrAllocator() { |
381 | // Because SpecificBumpPtrAllocator walks the memory to call destructors, |
382 | // it can't have red zones between allocations. |
383 | Allocator.setRedZoneSize(0); |
384 | } |
385 | SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old) |
386 | : Allocator(std::move(Old.Allocator)) {} |
387 | ~SpecificBumpPtrAllocator() { DestroyAll(); } |
388 | |
389 | SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) { |
390 | Allocator = std::move(RHS.Allocator); |
391 | return *this; |
392 | } |
393 | |
394 | /// Call the destructor of each allocated object and deallocate all but the |
395 | /// current slab and reset the current pointer to the beginning of it, freeing |
396 | /// all memory allocated so far. |
397 | void DestroyAll() { |
398 | auto DestroyElements = [](char *Begin, char *End) { |
399 | assert(Begin == (char *)alignAddr(Begin, Align::Of<T>()))((void)0); |
400 | for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T)) |
401 | reinterpret_cast<T *>(Ptr)->~T(); |
402 | }; |
403 | |
404 | for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E; |
405 | ++I) { |
406 | size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize( |
407 | std::distance(Allocator.Slabs.begin(), I)); |
408 | char *Begin = (char *)alignAddr(*I, Align::Of<T>()); |
409 | char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr |
410 | : (char *)*I + AllocatedSlabSize; |
411 | |
412 | DestroyElements(Begin, End); |
413 | } |
414 | |
415 | for (auto &PtrAndSize : Allocator.CustomSizedSlabs) { |
416 | void *Ptr = PtrAndSize.first; |
417 | size_t Size = PtrAndSize.second; |
418 | DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()), |
419 | (char *)Ptr + Size); |
420 | } |
421 | |
422 | Allocator.Reset(); |
423 | } |
424 | |
425 | /// Allocate space for an array of objects without constructing them. |
426 | T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); } |
427 | }; |
428 | |
429 | } // end namespace llvm |
430 | |
431 | template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold, |
432 | size_t GrowthDelay> |
433 | void * |
434 | operator new(size_t Size, |
435 | llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold, |
436 | GrowthDelay> &Allocator) { |
437 | return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size), |
438 | alignof(std::max_align_t))); |
439 | } |
440 | |
441 | template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold, |
442 | size_t GrowthDelay> |
443 | void operator delete(void *, |
444 | llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, |
445 | SizeThreshold, GrowthDelay> &) { |
446 | } |
447 | |
448 | #endif // LLVM_SUPPORT_ALLOCATOR_H |
1 | //===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file contains types to represent alignments. | |||
10 | // They are instrumented to guarantee some invariants are preserved and prevent | |||
11 | // invalid manipulations. | |||
12 | // | |||
13 | // - Align represents an alignment in bytes, it is always set and always a valid | |||
14 | // power of two, its minimum value is 1 which means no alignment requirements. | |||
15 | // | |||
16 | // - MaybeAlign is an optional type, it may be undefined or set. When it's set | |||
17 | // you can get the underlying Align type by using the getValue() method. | |||
18 | // | |||
19 | //===----------------------------------------------------------------------===// | |||
20 | ||||
21 | #ifndef LLVM_SUPPORT_ALIGNMENT_H_ | |||
22 | #define LLVM_SUPPORT_ALIGNMENT_H_ | |||
23 | ||||
24 | #include "llvm/ADT/Optional.h" | |||
25 | #include "llvm/Support/MathExtras.h" | |||
26 | #include <cassert> | |||
27 | #ifndef NDEBUG1 | |||
28 | #include <string> | |||
29 | #endif // NDEBUG | |||
30 | ||||
31 | namespace llvm { | |||
32 | ||||
33 | #define ALIGN_CHECK_ISPOSITIVE(decl) \ | |||
34 | assert(decl > 0 && (#decl " should be defined"))((void)0) | |||
35 | ||||
36 | /// This struct is a compact representation of a valid (non-zero power of two) | |||
37 | /// alignment. | |||
38 | /// It is suitable for use as static global constants. | |||
39 | struct Align { | |||
40 | private: | |||
41 | uint8_t ShiftValue = 0; /// The log2 of the required alignment. | |||
42 | /// ShiftValue is less than 64 by construction. | |||
43 | ||||
44 | friend struct MaybeAlign; | |||
45 | friend unsigned Log2(Align); | |||
46 | friend bool operator==(Align Lhs, Align Rhs); | |||
47 | friend bool operator!=(Align Lhs, Align Rhs); | |||
48 | friend bool operator<=(Align Lhs, Align Rhs); | |||
49 | friend bool operator>=(Align Lhs, Align Rhs); | |||
50 | friend bool operator<(Align Lhs, Align Rhs); | |||
51 | friend bool operator>(Align Lhs, Align Rhs); | |||
52 | friend unsigned encode(struct MaybeAlign A); | |||
53 | friend struct MaybeAlign decodeMaybeAlign(unsigned Value); | |||
54 | ||||
55 | /// A trivial type to allow construction of constexpr Align. | |||
56 | /// This is currently needed to workaround a bug in GCC 5.3 which prevents | |||
57 | /// definition of constexpr assign operators. | |||
58 | /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic | |||
59 | /// FIXME: Remove this, make all assign operators constexpr and introduce user | |||
60 | /// defined literals when we don't have to support GCC 5.3 anymore. | |||
61 | /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain | |||
62 | struct LogValue { | |||
63 | uint8_t Log; | |||
64 | }; | |||
65 | ||||
66 | public: | |||
67 | /// Default is byte-aligned. | |||
68 | constexpr Align() = default; | |||
69 | /// Do not perform checks in case of copy/move construct/assign, because the | |||
70 | /// checks have been performed when building `Other`. | |||
71 | constexpr Align(const Align &Other) = default; | |||
72 | constexpr Align(Align &&Other) = default; | |||
73 | Align &operator=(const Align &Other) = default; | |||
74 | Align &operator=(Align &&Other) = default; | |||
75 | ||||
76 | explicit Align(uint64_t Value) { | |||
77 | assert(Value > 0 && "Value must not be 0")((void)0); | |||
78 | assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2")((void)0); | |||
79 | ShiftValue = Log2_64(Value); | |||
80 | assert(ShiftValue < 64 && "Broken invariant")((void)0); | |||
81 | } | |||
82 | ||||
83 | /// This is a hole in the type system and should not be abused. | |||
84 | /// Needed to interact with C for instance. | |||
85 | uint64_t value() const { return uint64_t(1) << ShiftValue; } | |||
| ||||
86 | ||||
87 | /// Allow constructions of constexpr Align. | |||
88 | template <size_t kValue> constexpr static LogValue Constant() { | |||
89 | return LogValue{static_cast<uint8_t>(CTLog2<kValue>())}; | |||
90 | } | |||
91 | ||||
92 | /// Allow constructions of constexpr Align from types. | |||
93 | /// Compile time equivalent to Align(alignof(T)). | |||
94 | template <typename T> constexpr static LogValue Of() { | |||
95 | return Constant<std::alignment_of<T>::value>(); | |||
96 | } | |||
97 | ||||
98 | /// Constexpr constructor from LogValue type. | |||
99 | constexpr Align(LogValue CA) : ShiftValue(CA.Log) {} | |||
100 | }; | |||
101 | ||||
102 | /// Treats the value 0 as a 1, so Align is always at least 1. | |||
103 | inline Align assumeAligned(uint64_t Value) { | |||
104 | return Value ? Align(Value) : Align(); | |||
105 | } | |||
106 | ||||
107 | /// This struct is a compact representation of a valid (power of two) or | |||
108 | /// undefined (0) alignment. | |||
109 | struct MaybeAlign : public llvm::Optional<Align> { | |||
110 | private: | |||
111 | using UP = llvm::Optional<Align>; | |||
112 | ||||
113 | public: | |||
114 | /// Default is undefined. | |||
115 | MaybeAlign() = default; | |||
116 | /// Do not perform checks in case of copy/move construct/assign, because the | |||
117 | /// checks have been performed when building `Other`. | |||
118 | MaybeAlign(const MaybeAlign &Other) = default; | |||
119 | MaybeAlign &operator=(const MaybeAlign &Other) = default; | |||
120 | MaybeAlign(MaybeAlign &&Other) = default; | |||
121 | MaybeAlign &operator=(MaybeAlign &&Other) = default; | |||
122 | ||||
123 | /// Use llvm::Optional<Align> constructor. | |||
124 | using UP::UP; | |||
125 | ||||
126 | explicit MaybeAlign(uint64_t Value) { | |||
127 | assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&((void)0) | |||
128 | "Alignment is neither 0 nor a power of 2")((void)0); | |||
129 | if (Value) | |||
130 | emplace(Value); | |||
131 | } | |||
132 | ||||
133 | /// For convenience, returns a valid alignment or 1 if undefined. | |||
134 | Align valueOrOne() const { return hasValue() ? getValue() : Align(); } | |||
135 | }; | |||
136 | ||||
137 | /// Checks that SizeInBytes is a multiple of the alignment. | |||
138 | inline bool isAligned(Align Lhs, uint64_t SizeInBytes) { | |||
139 | return SizeInBytes % Lhs.value() == 0; | |||
140 | } | |||
141 | ||||
142 | /// Checks that Addr is a multiple of the alignment. | |||
143 | inline bool isAddrAligned(Align Lhs, const void *Addr) { | |||
144 | return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr)); | |||
145 | } | |||
146 | ||||
147 | /// Returns a multiple of A needed to store `Size` bytes. | |||
148 | inline uint64_t alignTo(uint64_t Size, Align A) { | |||
149 | const uint64_t Value = A.value(); | |||
150 | // The following line is equivalent to `(Size + Value - 1) / Value * Value`. | |||
151 | ||||
152 | // The division followed by a multiplication can be thought of as a right | |||
153 | // shift followed by a left shift which zeros out the extra bits produced in | |||
154 | // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out | |||
155 | // are just zero. | |||
156 | ||||
157 | // Most compilers can generate this code but the pattern may be missed when | |||
158 | // multiple functions gets inlined. | |||
159 | return (Size + Value - 1) & ~(Value - 1U); | |||
160 | } | |||
161 | ||||
162 | /// If non-zero \p Skew is specified, the return value will be a minimal integer | |||
163 | /// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for | |||
164 | /// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p | |||
165 | /// Skew mod \p A'. | |||
166 | /// | |||
167 | /// Examples: | |||
168 | /// \code | |||
169 | /// alignTo(5, Align(8), 7) = 7 | |||
170 | /// alignTo(17, Align(8), 1) = 17 | |||
171 | /// alignTo(~0LL, Align(8), 3) = 3 | |||
172 | /// \endcode | |||
173 | inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) { | |||
174 | const uint64_t Value = A.value(); | |||
175 | Skew %= Value; | |||
176 | return ((Size + Value - 1 - Skew) & ~(Value - 1U)) + Skew; | |||
177 | } | |||
178 | ||||
179 | /// Returns a multiple of A needed to store `Size` bytes. | |||
180 | /// Returns `Size` if current alignment is undefined. | |||
181 | inline uint64_t alignTo(uint64_t Size, MaybeAlign A) { | |||
182 | return A ? alignTo(Size, A.getValue()) : Size; | |||
183 | } | |||
184 | ||||
185 | /// Aligns `Addr` to `Alignment` bytes, rounding up. | |||
186 | inline uintptr_t alignAddr(const void *Addr, Align Alignment) { | |||
187 | uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr); | |||
188 | assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=((void)0) | |||
189 | ArithAddr &&((void)0) | |||
190 | "Overflow")((void)0); | |||
191 | return alignTo(ArithAddr, Alignment); | |||
192 | } | |||
193 | ||||
194 | /// Returns the offset to the next integer (mod 2**64) that is greater than | |||
195 | /// or equal to \p Value and is a multiple of \p Align. | |||
196 | inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) { | |||
197 | return alignTo(Value, Alignment) - Value; | |||
198 | } | |||
199 | ||||
200 | /// Returns the necessary adjustment for aligning `Addr` to `Alignment` | |||
201 | /// bytes, rounding up. | |||
202 | inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) { | |||
203 | return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment); | |||
204 | } | |||
205 | ||||
206 | /// Returns the log2 of the alignment. | |||
207 | inline unsigned Log2(Align A) { return A.ShiftValue; } | |||
208 | ||||
209 | /// Returns the alignment that satisfies both alignments. | |||
210 | /// Same semantic as MinAlign. | |||
211 | inline Align commonAlignment(Align A, Align B) { return std::min(A, B); } | |||
212 | ||||
213 | /// Returns the alignment that satisfies both alignments. | |||
214 | /// Same semantic as MinAlign. | |||
215 | inline Align commonAlignment(Align A, uint64_t Offset) { | |||
216 | return Align(MinAlign(A.value(), Offset)); | |||
217 | } | |||
218 | ||||
219 | /// Returns the alignment that satisfies both alignments. | |||
220 | /// Same semantic as MinAlign. | |||
221 | inline MaybeAlign commonAlignment(MaybeAlign A, MaybeAlign B) { | |||
222 | return A && B ? commonAlignment(*A, *B) : A ? A : B; | |||
223 | } | |||
224 | ||||
225 | /// Returns the alignment that satisfies both alignments. | |||
226 | /// Same semantic as MinAlign. | |||
227 | inline MaybeAlign commonAlignment(MaybeAlign A, uint64_t Offset) { | |||
228 | return MaybeAlign(MinAlign((*A).value(), Offset)); | |||
229 | } | |||
230 | ||||
231 | /// Returns a representation of the alignment that encodes undefined as 0. | |||
232 | inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; } | |||
233 | ||||
234 | /// Dual operation of the encode function above. | |||
235 | inline MaybeAlign decodeMaybeAlign(unsigned Value) { | |||
236 | if (Value == 0) | |||
237 | return MaybeAlign(); | |||
238 | Align Out; | |||
239 | Out.ShiftValue = Value - 1; | |||
240 | return Out; | |||
241 | } | |||
242 | ||||
243 | /// Returns a representation of the alignment, the encoded value is positive by | |||
244 | /// definition. | |||
245 | inline unsigned encode(Align A) { return encode(MaybeAlign(A)); } | |||
246 | ||||
247 | /// Comparisons between Align and scalars. Rhs must be positive. | |||
248 | inline bool operator==(Align Lhs, uint64_t Rhs) { | |||
249 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
250 | return Lhs.value() == Rhs; | |||
251 | } | |||
252 | inline bool operator!=(Align Lhs, uint64_t Rhs) { | |||
253 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
254 | return Lhs.value() != Rhs; | |||
255 | } | |||
256 | inline bool operator<=(Align Lhs, uint64_t Rhs) { | |||
257 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
258 | return Lhs.value() <= Rhs; | |||
259 | } | |||
260 | inline bool operator>=(Align Lhs, uint64_t Rhs) { | |||
261 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
262 | return Lhs.value() >= Rhs; | |||
263 | } | |||
264 | inline bool operator<(Align Lhs, uint64_t Rhs) { | |||
265 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
266 | return Lhs.value() < Rhs; | |||
267 | } | |||
268 | inline bool operator>(Align Lhs, uint64_t Rhs) { | |||
269 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
270 | return Lhs.value() > Rhs; | |||
271 | } | |||
272 | ||||
273 | /// Comparisons between MaybeAlign and scalars. | |||
274 | inline bool operator==(MaybeAlign Lhs, uint64_t Rhs) { | |||
275 | return Lhs ? (*Lhs).value() == Rhs : Rhs == 0; | |||
276 | } | |||
277 | inline bool operator!=(MaybeAlign Lhs, uint64_t Rhs) { | |||
278 | return Lhs ? (*Lhs).value() != Rhs : Rhs != 0; | |||
279 | } | |||
280 | ||||
281 | /// Comparisons operators between Align. | |||
282 | inline bool operator==(Align Lhs, Align Rhs) { | |||
283 | return Lhs.ShiftValue == Rhs.ShiftValue; | |||
284 | } | |||
285 | inline bool operator!=(Align Lhs, Align Rhs) { | |||
286 | return Lhs.ShiftValue != Rhs.ShiftValue; | |||
287 | } | |||
288 | inline bool operator<=(Align Lhs, Align Rhs) { | |||
289 | return Lhs.ShiftValue <= Rhs.ShiftValue; | |||
290 | } | |||
291 | inline bool operator>=(Align Lhs, Align Rhs) { | |||
292 | return Lhs.ShiftValue >= Rhs.ShiftValue; | |||
293 | } | |||
294 | inline bool operator<(Align Lhs, Align Rhs) { | |||
295 | return Lhs.ShiftValue < Rhs.ShiftValue; | |||
296 | } | |||
297 | inline bool operator>(Align Lhs, Align Rhs) { | |||
298 | return Lhs.ShiftValue > Rhs.ShiftValue; | |||
299 | } | |||
300 | ||||
301 | // Don't allow relational comparisons with MaybeAlign. | |||
302 | bool operator<=(Align Lhs, MaybeAlign Rhs) = delete; | |||
303 | bool operator>=(Align Lhs, MaybeAlign Rhs) = delete; | |||
304 | bool operator<(Align Lhs, MaybeAlign Rhs) = delete; | |||
305 | bool operator>(Align Lhs, MaybeAlign Rhs) = delete; | |||
306 | ||||
307 | bool operator<=(MaybeAlign Lhs, Align Rhs) = delete; | |||
308 | bool operator>=(MaybeAlign Lhs, Align Rhs) = delete; | |||
309 | bool operator<(MaybeAlign Lhs, Align Rhs) = delete; | |||
310 | bool operator>(MaybeAlign Lhs, Align Rhs) = delete; | |||
311 | ||||
312 | bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
313 | bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
314 | bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
315 | bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
316 | ||||
317 | inline Align operator*(Align Lhs, uint64_t Rhs) { | |||
318 | assert(Rhs > 0 && "Rhs must be positive")((void)0); | |||
319 | return Align(Lhs.value() * Rhs); | |||
320 | } | |||
321 | ||||
322 | inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) { | |||
323 | assert(Rhs > 0 && "Rhs must be positive")((void)0); | |||
324 | return Lhs ? Lhs.getValue() * Rhs : MaybeAlign(); | |||
325 | } | |||
326 | ||||
327 | inline Align operator/(Align Lhs, uint64_t Divisor) { | |||
328 | assert(llvm::isPowerOf2_64(Divisor) &&((void)0) | |||
329 | "Divisor must be positive and a power of 2")((void)0); | |||
330 | assert(Lhs != 1 && "Can't halve byte alignment")((void)0); | |||
331 | return Align(Lhs.value() / Divisor); | |||
332 | } | |||
333 | ||||
334 | inline MaybeAlign operator/(MaybeAlign Lhs, uint64_t Divisor) { | |||
335 | assert(llvm::isPowerOf2_64(Divisor) &&((void)0) | |||
336 | "Divisor must be positive and a power of 2")((void)0); | |||
337 | return Lhs ? Lhs.getValue() / Divisor : MaybeAlign(); | |||
338 | } | |||
339 | ||||
340 | inline Align max(MaybeAlign Lhs, Align Rhs) { | |||
341 | return Lhs && *Lhs > Rhs ? *Lhs : Rhs; | |||
342 | } | |||
343 | ||||
344 | inline Align max(Align Lhs, MaybeAlign Rhs) { | |||
345 | return Rhs && *Rhs > Lhs ? *Rhs : Lhs; | |||
346 | } | |||
347 | ||||
348 | #ifndef NDEBUG1 | |||
349 | // For usage in LLVM_DEBUG macros. | |||
350 | inline std::string DebugStr(const Align &A) { | |||
351 | return std::to_string(A.value()); | |||
352 | } | |||
353 | // For usage in LLVM_DEBUG macros. | |||
354 | inline std::string DebugStr(const MaybeAlign &MA) { | |||
355 | if (MA) | |||
356 | return std::to_string(MA->value()); | |||
357 | return "None"; | |||
358 | } | |||
359 | #endif // NDEBUG | |||
360 | ||||
361 | #undef ALIGN_CHECK_ISPOSITIVE | |||
362 | ||||
363 | } // namespace llvm | |||
364 | ||||
365 | #endif // LLVM_SUPPORT_ALIGNMENT_H_ |