Bug Summary

File:src/gnu/usr.bin/clang/libclangAST/../../../llvm/llvm/include/llvm/Support/Alignment.h
Warning:line 85, column 47
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name Program.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangAST/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangAST/obj/../include/clang/AST -I /usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangAST/../include -I /usr/src/gnu/usr.bin/clang/libclangAST/obj -I /usr/src/gnu/usr.bin/clang/libclangAST/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangAST/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/lib/AST/Interp/Program.cpp

/usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/lib/AST/Interp/Program.cpp

1//===--- Program.cpp - Bytecode for the constexpr VM ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "Program.h"
10#include "ByteCodeStmtGen.h"
11#include "Context.h"
12#include "Function.h"
13#include "Opcode.h"
14#include "PrimType.h"
15#include "clang/AST/Decl.h"
16#include "clang/AST/DeclCXX.h"
17
18using namespace clang;
19using namespace clang::interp;
20
21unsigned Program::createGlobalString(const StringLiteral *S) {
22 const size_t CharWidth = S->getCharByteWidth();
23 const size_t BitWidth = CharWidth * Ctx.getCharBit();
24
25 PrimType CharType;
26 switch (CharWidth) {
27 case 1:
28 CharType = PT_Sint8;
29 break;
30 case 2:
31 CharType = PT_Uint16;
32 break;
33 case 4:
34 CharType = PT_Uint32;
35 break;
36 default:
37 llvm_unreachable("unsupported character width")__builtin_unreachable();
38 }
39
40 // Create a descriptor for the string.
41 Descriptor *Desc = allocateDescriptor(S, CharType, S->getLength() + 1,
42 /*isConst=*/true,
43 /*isTemporary=*/false,
44 /*isMutable=*/false);
45
46 // Allocate storage for the string.
47 // The byte length does not include the null terminator.
48 unsigned I = Globals.size();
49 unsigned Sz = Desc->getAllocSize();
50 auto *G = new (Allocator, Sz) Global(Desc, /*isStatic=*/true,
51 /*isExtern=*/false);
52 Globals.push_back(G);
53
54 // Construct the string in storage.
55 const Pointer Ptr(G->block());
56 for (unsigned I = 0, N = S->getLength(); I <= N; ++I) {
57 Pointer Field = Ptr.atIndex(I).narrow();
58 const uint32_t CodePoint = I == N ? 0 : S->getCodeUnit(I);
59 switch (CharType) {
60 case PT_Sint8: {
61 using T = PrimConv<PT_Sint8>::T;
62 Field.deref<T>() = T::from(CodePoint, BitWidth);
63 break;
64 }
65 case PT_Uint16: {
66 using T = PrimConv<PT_Uint16>::T;
67 Field.deref<T>() = T::from(CodePoint, BitWidth);
68 break;
69 }
70 case PT_Uint32: {
71 using T = PrimConv<PT_Uint32>::T;
72 Field.deref<T>() = T::from(CodePoint, BitWidth);
73 break;
74 }
75 default:
76 llvm_unreachable("unsupported character type")__builtin_unreachable();
77 }
78 }
79 return I;
80}
81
82Pointer Program::getPtrGlobal(unsigned Idx) {
83 assert(Idx < Globals.size())((void)0);
84 return Pointer(Globals[Idx]->block());
85}
86
87llvm::Optional<unsigned> Program::getGlobal(const ValueDecl *VD) {
88 auto It = GlobalIndices.find(VD);
89 if (It != GlobalIndices.end())
90 return It->second;
91
92 // Find any previous declarations which were aleady evaluated.
93 llvm::Optional<unsigned> Index;
94 for (const Decl *P = VD; P; P = P->getPreviousDecl()) {
95 auto It = GlobalIndices.find(P);
96 if (It != GlobalIndices.end()) {
97 Index = It->second;
98 break;
99 }
100 }
101
102 // Map the decl to the existing index.
103 if (Index) {
104 GlobalIndices[VD] = *Index;
105 return {};
106 }
107
108 return Index;
109}
110
111llvm::Optional<unsigned> Program::getOrCreateGlobal(const ValueDecl *VD) {
112 if (auto Idx = getGlobal(VD))
113 return Idx;
114
115 if (auto Idx = createGlobal(VD)) {
116 GlobalIndices[VD] = *Idx;
117 return Idx;
118 }
119 return {};
120}
121
122llvm::Optional<unsigned> Program::getOrCreateDummy(const ParmVarDecl *PD) {
123 auto &ASTCtx = Ctx.getASTContext();
124
125 // Create a pointer to an incomplete array of the specified elements.
126 QualType ElemTy = PD->getType()->castAs<PointerType>()->getPointeeType();
127 QualType Ty = ASTCtx.getIncompleteArrayType(ElemTy, ArrayType::Normal, 0);
128
129 // Dedup blocks since they are immutable and pointers cannot be compared.
130 auto It = DummyParams.find(PD);
131 if (It != DummyParams.end())
132 return It->second;
133
134 if (auto Idx = createGlobal(PD, Ty, /*isStatic=*/true, /*isExtern=*/true)) {
135 DummyParams[PD] = *Idx;
136 return Idx;
137 }
138 return {};
139}
140
141llvm::Optional<unsigned> Program::createGlobal(const ValueDecl *VD) {
142 bool IsStatic, IsExtern;
143 if (auto *Var = dyn_cast<VarDecl>(VD)) {
144 IsStatic = !Var->hasLocalStorage();
145 IsExtern = !Var->getAnyInitializer();
146 } else {
147 IsStatic = false;
148 IsExtern = true;
149 }
150 if (auto Idx = createGlobal(VD, VD->getType(), IsStatic, IsExtern)) {
151 for (const Decl *P = VD; P; P = P->getPreviousDecl())
152 GlobalIndices[P] = *Idx;
153 return *Idx;
154 }
155 return {};
156}
157
158llvm::Optional<unsigned> Program::createGlobal(const Expr *E) {
159 return createGlobal(E, E->getType(), /*isStatic=*/true, /*isExtern=*/false);
1
Calling 'Program::createGlobal'
160}
161
162llvm::Optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty,
163 bool IsStatic, bool IsExtern) {
164 // Create a descriptor for the global.
165 Descriptor *Desc;
166 const bool IsConst = Ty.isConstQualified();
167 const bool IsTemporary = D.dyn_cast<const Expr *>();
168 if (auto T = Ctx.classify(Ty)) {
2
Assuming the condition is true
3
Taking true branch
169 Desc = createDescriptor(D, *T, IsConst, IsTemporary);
4
Calling 'Program::createDescriptor'
170 } else {
171 Desc = createDescriptor(D, Ty.getTypePtr(), IsConst, IsTemporary);
172 }
173 if (!Desc)
174 return {};
175
176 // Allocate a block for storage.
177 unsigned I = Globals.size();
178
179 auto *G = new (Allocator, Desc->getAllocSize())
180 Global(getCurrentDecl(), Desc, IsStatic, IsExtern);
181 G->block()->invokeCtor();
182
183 Globals.push_back(G);
184
185 return I;
186}
187
188Function *Program::getFunction(const FunctionDecl *F) {
189 F = F->getDefinition();
190 auto It = Funcs.find(F);
191 return It == Funcs.end() ? nullptr : It->second.get();
192}
193
194llvm::Expected<Function *> Program::getOrCreateFunction(const FunctionDecl *F) {
195 if (Function *Func = getFunction(F)) {
196 return Func;
197 }
198
199 // Try to compile the function if it wasn't compiled yet.
200 if (const FunctionDecl *FD = F->getDefinition())
201 return ByteCodeStmtGen<ByteCodeEmitter>(Ctx, *this).compileFunc(FD);
202
203 // A relocation which traps if not resolved.
204 return nullptr;
205}
206
207Record *Program::getOrCreateRecord(const RecordDecl *RD) {
208 // Use the actual definition as a key.
209 RD = RD->getDefinition();
210 if (!RD)
211 return nullptr;
212
213 // Deduplicate records.
214 auto It = Records.find(RD);
215 if (It != Records.end()) {
216 return It->second;
217 }
218
219 // Number of bytes required by fields and base classes.
220 unsigned Size = 0;
221 // Number of bytes required by virtual base.
222 unsigned VirtSize = 0;
223
224 // Helper to get a base descriptor.
225 auto GetBaseDesc = [this](const RecordDecl *BD, Record *BR) -> Descriptor * {
226 if (!BR)
227 return nullptr;
228 return allocateDescriptor(BD, BR, /*isConst=*/false,
229 /*isTemporary=*/false,
230 /*isMutable=*/false);
231 };
232
233 // Reserve space for base classes.
234 Record::BaseList Bases;
235 Record::VirtualBaseList VirtBases;
236 if (auto *CD = dyn_cast<CXXRecordDecl>(RD)) {
237 for (const CXXBaseSpecifier &Spec : CD->bases()) {
238 if (Spec.isVirtual())
239 continue;
240
241 const RecordDecl *BD = Spec.getType()->castAs<RecordType>()->getDecl();
242 Record *BR = getOrCreateRecord(BD);
243 if (Descriptor *Desc = GetBaseDesc(BD, BR)) {
244 Size += align(sizeof(InlineDescriptor));
245 Bases.push_back({BD, Size, Desc, BR});
246 Size += align(BR->getSize());
247 continue;
248 }
249 return nullptr;
250 }
251
252 for (const CXXBaseSpecifier &Spec : CD->vbases()) {
253 const RecordDecl *BD = Spec.getType()->castAs<RecordType>()->getDecl();
254 Record *BR = getOrCreateRecord(BD);
255
256 if (Descriptor *Desc = GetBaseDesc(BD, BR)) {
257 VirtSize += align(sizeof(InlineDescriptor));
258 VirtBases.push_back({BD, VirtSize, Desc, BR});
259 VirtSize += align(BR->getSize());
260 continue;
261 }
262 return nullptr;
263 }
264 }
265
266 // Reserve space for fields.
267 Record::FieldList Fields;
268 for (const FieldDecl *FD : RD->fields()) {
269 // Reserve space for the field's descriptor and the offset.
270 Size += align(sizeof(InlineDescriptor));
271
272 // Classify the field and add its metadata.
273 QualType FT = FD->getType();
274 const bool IsConst = FT.isConstQualified();
275 const bool IsMutable = FD->isMutable();
276 Descriptor *Desc;
277 if (llvm::Optional<PrimType> T = Ctx.classify(FT)) {
278 Desc = createDescriptor(FD, *T, IsConst, /*isTemporary=*/false,
279 IsMutable);
280 } else {
281 Desc = createDescriptor(FD, FT.getTypePtr(), IsConst,
282 /*isTemporary=*/false, IsMutable);
283 }
284 if (!Desc)
285 return nullptr;
286 Fields.push_back({FD, Size, Desc});
287 Size += align(Desc->getAllocSize());
288 }
289
290 Record *R = new (Allocator) Record(RD, std::move(Bases), std::move(Fields),
291 std::move(VirtBases), VirtSize, Size);
292 Records.insert({RD, R});
293 return R;
294}
295
296Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
297 bool IsConst, bool IsTemporary,
298 bool IsMutable) {
299 // Classes and structures.
300 if (auto *RT = Ty->getAs<RecordType>()) {
301 if (auto *Record = getOrCreateRecord(RT->getDecl()))
302 return allocateDescriptor(D, Record, IsConst, IsTemporary, IsMutable);
303 }
304
305 // Arrays.
306 if (auto ArrayType = Ty->getAsArrayTypeUnsafe()) {
307 QualType ElemTy = ArrayType->getElementType();
308 // Array of well-known bounds.
309 if (auto CAT = dyn_cast<ConstantArrayType>(ArrayType)) {
310 size_t NumElems = CAT->getSize().getZExtValue();
311 if (llvm::Optional<PrimType> T = Ctx.classify(ElemTy)) {
312 // Arrays of primitives.
313 unsigned ElemSize = primSize(*T);
314 if (std::numeric_limits<unsigned>::max() / ElemSize <= NumElems) {
315 return {};
316 }
317 return allocateDescriptor(D, *T, NumElems, IsConst, IsTemporary,
318 IsMutable);
319 } else {
320 // Arrays of composites. In this case, the array is a list of pointers,
321 // followed by the actual elements.
322 Descriptor *Desc =
323 createDescriptor(D, ElemTy.getTypePtr(), IsConst, IsTemporary);
324 if (!Desc)
325 return nullptr;
326 InterpSize ElemSize = Desc->getAllocSize() + sizeof(InlineDescriptor);
327 if (std::numeric_limits<unsigned>::max() / ElemSize <= NumElems)
328 return {};
329 return allocateDescriptor(D, Desc, NumElems, IsConst, IsTemporary,
330 IsMutable);
331 }
332 }
333
334 // Array of unknown bounds - cannot be accessed and pointer arithmetic
335 // is forbidden on pointers to such objects.
336 if (isa<IncompleteArrayType>(ArrayType)) {
337 if (llvm::Optional<PrimType> T = Ctx.classify(ElemTy)) {
338 return allocateDescriptor(D, *T, IsTemporary,
339 Descriptor::UnknownSize{});
340 } else {
341 Descriptor *Desc =
342 createDescriptor(D, ElemTy.getTypePtr(), IsConst, IsTemporary);
343 if (!Desc)
344 return nullptr;
345 return allocateDescriptor(D, Desc, IsTemporary,
346 Descriptor::UnknownSize{});
347 }
348 }
349 }
350
351 // Atomic types.
352 if (auto *AT = Ty->getAs<AtomicType>()) {
353 const Type *InnerTy = AT->getValueType().getTypePtr();
354 return createDescriptor(D, InnerTy, IsConst, IsTemporary, IsMutable);
355 }
356
357 // Complex types - represented as arrays of elements.
358 if (auto *CT = Ty->getAs<ComplexType>()) {
359 PrimType ElemTy = *Ctx.classify(CT->getElementType());
360 return allocateDescriptor(D, ElemTy, 2, IsConst, IsTemporary, IsMutable);
361 }
362
363 return nullptr;
364}

/usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/lib/AST/Interp/Program.h

1//===--- Program.h - Bytecode for the constexpr VM --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Defines a program which organises and links multiple bytecode functions.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_AST_INTERP_PROGRAM_H
14#define LLVM_CLANG_AST_INTERP_PROGRAM_H
15
16#include <map>
17#include <vector>
18#include "Function.h"
19#include "Pointer.h"
20#include "PrimType.h"
21#include "Record.h"
22#include "Source.h"
23#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/PointerUnion.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/Support/Allocator.h"
27
28namespace clang {
29class RecordDecl;
30class Expr;
31class FunctionDecl;
32class Stmt;
33class StringLiteral;
34class VarDecl;
35
36namespace interp {
37class Context;
38class State;
39class Record;
40class Scope;
41
42/// The program contains and links the bytecode for all functions.
43class Program {
44public:
45 Program(Context &Ctx) : Ctx(Ctx) {}
46
47 /// Emits a string literal among global data.
48 unsigned createGlobalString(const StringLiteral *S);
49
50 /// Returns a pointer to a global.
51 Pointer getPtrGlobal(unsigned Idx);
52
53 /// Returns the value of a global.
54 Block *getGlobal(unsigned Idx) {
55 assert(Idx < Globals.size())((void)0);
56 return Globals[Idx]->block();
57 }
58
59 /// Finds a global's index.
60 llvm::Optional<unsigned> getGlobal(const ValueDecl *VD);
61
62 /// Returns or creates a global an creates an index to it.
63 llvm::Optional<unsigned> getOrCreateGlobal(const ValueDecl *VD);
64
65 /// Returns or creates a dummy value for parameters.
66 llvm::Optional<unsigned> getOrCreateDummy(const ParmVarDecl *PD);
67
68 /// Creates a global and returns its index.
69 llvm::Optional<unsigned> createGlobal(const ValueDecl *VD);
70
71 /// Creates a global from a lifetime-extended temporary.
72 llvm::Optional<unsigned> createGlobal(const Expr *E);
73
74 /// Creates a new function from a code range.
75 template <typename... Ts>
76 Function *createFunction(const FunctionDecl *Def, Ts &&... Args) {
77 auto *Func = new Function(*this, Def, std::forward<Ts>(Args)...);
78 Funcs.insert({Def, std::unique_ptr<Function>(Func)});
79 return Func;
80 }
81 /// Creates an anonymous function.
82 template <typename... Ts>
83 Function *createFunction(Ts &&... Args) {
84 auto *Func = new Function(*this, std::forward<Ts>(Args)...);
85 AnonFuncs.emplace_back(Func);
86 return Func;
87 }
88
89 /// Returns a function.
90 Function *getFunction(const FunctionDecl *F);
91
92 /// Returns a pointer to a function if it exists and can be compiled.
93 /// If a function couldn't be compiled, an error is returned.
94 /// If a function was not yet defined, a null pointer is returned.
95 llvm::Expected<Function *> getOrCreateFunction(const FunctionDecl *F);
96
97 /// Returns a record or creates one if it does not exist.
98 Record *getOrCreateRecord(const RecordDecl *RD);
99
100 /// Creates a descriptor for a primitive type.
101 Descriptor *createDescriptor(const DeclTy &D, PrimType Type,
102 bool IsConst = false,
103 bool IsTemporary = false,
104 bool IsMutable = false) {
105 return allocateDescriptor(D, Type, IsConst, IsTemporary, IsMutable);
5
Calling 'Program::allocateDescriptor'
106 }
107
108 /// Creates a descriptor for a composite type.
109 Descriptor *createDescriptor(const DeclTy &D, const Type *Ty,
110 bool IsConst = false, bool IsTemporary = false,
111 bool IsMutable = false);
112
113 /// Context to manage declaration lifetimes.
114 class DeclScope {
115 public:
116 DeclScope(Program &P, const VarDecl *VD) : P(P) { P.startDeclaration(VD); }
117 ~DeclScope() { P.endDeclaration(); }
118
119 private:
120 Program &P;
121 };
122
123 /// Returns the current declaration ID.
124 llvm::Optional<unsigned> getCurrentDecl() const {
125 if (CurrentDeclaration == NoDeclaration)
126 return llvm::Optional<unsigned>{};
127 return LastDeclaration;
128 }
129
130private:
131 friend class DeclScope;
132
133 llvm::Optional<unsigned> createGlobal(const DeclTy &D, QualType Ty,
134 bool IsStatic, bool IsExtern);
135
136 /// Reference to the VM context.
137 Context &Ctx;
138 /// Mapping from decls to cached bytecode functions.
139 llvm::DenseMap<const FunctionDecl *, std::unique_ptr<Function>> Funcs;
140 /// List of anonymous functions.
141 std::vector<std::unique_ptr<Function>> AnonFuncs;
142
143 /// Function relocation locations.
144 llvm::DenseMap<const FunctionDecl *, std::vector<unsigned>> Relocs;
145
146 /// Custom allocator for global storage.
147 using PoolAllocTy = llvm::BumpPtrAllocatorImpl<llvm::MallocAllocator>;
148
149 /// Descriptor + storage for a global object.
150 ///
151 /// Global objects never go out of scope, thus they do not track pointers.
152 class Global {
153 public:
154 /// Create a global descriptor for string literals.
155 template <typename... Tys>
156 Global(Tys... Args) : B(std::forward<Tys>(Args)...) {}
157
158 /// Allocates the global in the pool, reserving storate for data.
159 void *operator new(size_t Meta, PoolAllocTy &Alloc, size_t Data) {
160 return Alloc.Allocate(Meta + Data, alignof(void *));
161 }
162
163 /// Return a pointer to the data.
164 char *data() { return B.data(); }
165 /// Return a pointer to the block.
166 Block *block() { return &B; }
167
168 private:
169 /// Required metadata - does not actually track pointers.
170 Block B;
171 };
172
173 /// Allocator for globals.
174 PoolAllocTy Allocator;
175
176 /// Global objects.
177 std::vector<Global *> Globals;
178 /// Cached global indices.
179 llvm::DenseMap<const void *, unsigned> GlobalIndices;
180
181 /// Mapping from decls to record metadata.
182 llvm::DenseMap<const RecordDecl *, Record *> Records;
183
184 /// Dummy parameter to generate pointers from.
185 llvm::DenseMap<const ParmVarDecl *, unsigned> DummyParams;
186
187 /// Creates a new descriptor.
188 template <typename... Ts>
189 Descriptor *allocateDescriptor(Ts &&... Args) {
190 return new (Allocator) Descriptor(std::forward<Ts>(Args)...);
6
Calling 'operator new<llvm::MallocAllocator, 4096UL, 4096UL, 128UL>'
191 }
192
193 /// No declaration ID.
194 static constexpr unsigned NoDeclaration = (unsigned)-1;
195 /// Last declaration ID.
196 unsigned LastDeclaration = 0;
197 /// Current declaration ID.
198 unsigned CurrentDeclaration = NoDeclaration;
199
200 /// Starts evaluating a declaration.
201 void startDeclaration(const VarDecl *Decl) {
202 LastDeclaration += 1;
203 CurrentDeclaration = LastDeclaration;
204 }
205
206 /// Ends a global declaration.
207 void endDeclaration() {
208 CurrentDeclaration = NoDeclaration;
209 }
210
211public:
212 /// Dumps the disassembled bytecode to \c llvm::errs().
213 void dump() const;
214 void dump(llvm::raw_ostream &OS) const;
215};
216
217} // namespace interp
218} // namespace clang
219
220#endif

/usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/llvm/include/llvm/Support/Allocator.h

1//===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms
11/// to the LLVM "Allocator" concept and is similar to MallocAllocator, but
12/// objects cannot be deallocated. Their lifetime is tied to the lifetime of the
13/// allocator.
14///
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_SUPPORT_ALLOCATOR_H
18#define LLVM_SUPPORT_ALLOCATOR_H
19
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/Support/Alignment.h"
23#include "llvm/Support/AllocatorBase.h"
24#include "llvm/Support/Compiler.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/MemAlloc.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstdint>
32#include <cstdlib>
33#include <iterator>
34#include <type_traits>
35#include <utility>
36
37namespace llvm {
38
39namespace detail {
40
41// We call out to an external function to actually print the message as the
42// printing code uses Allocator.h in its implementation.
43void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
44 size_t TotalMemory);
45
46} // end namespace detail
47
48/// Allocate memory in an ever growing pool, as if by bump-pointer.
49///
50/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
51/// memory rather than relying on a boundless contiguous heap. However, it has
52/// bump-pointer semantics in that it is a monotonically growing pool of memory
53/// where every allocation is found by merely allocating the next N bytes in
54/// the slab, or the next N bytes in the next slab.
55///
56/// Note that this also has a threshold for forcing allocations above a certain
57/// size into their own slab.
58///
59/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
60/// object, which wraps malloc, to allocate memory, but it can be changed to
61/// use a custom allocator.
62///
63/// The GrowthDelay specifies after how many allocated slabs the allocator
64/// increases the size of the slabs.
65template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
66 size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128>
67class BumpPtrAllocatorImpl
68 : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
69 SizeThreshold, GrowthDelay>>,
70 private AllocatorT {
71public:
72 static_assert(SizeThreshold <= SlabSize,
73 "The SizeThreshold must be at most the SlabSize to ensure "
74 "that objects larger than a slab go into their own memory "
75 "allocation.");
76 static_assert(GrowthDelay > 0,
77 "GrowthDelay must be at least 1 which already increases the"
78 "slab size after each allocated slab.");
79
80 BumpPtrAllocatorImpl() = default;
81
82 template <typename T>
83 BumpPtrAllocatorImpl(T &&Allocator)
84 : AllocatorT(std::forward<T &&>(Allocator)) {}
85
86 // Manually implement a move constructor as we must clear the old allocator's
87 // slabs as a matter of correctness.
88 BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
89 : AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr),
90 End(Old.End), Slabs(std::move(Old.Slabs)),
91 CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
92 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
93 Old.CurPtr = Old.End = nullptr;
94 Old.BytesAllocated = 0;
95 Old.Slabs.clear();
96 Old.CustomSizedSlabs.clear();
97 }
98
99 ~BumpPtrAllocatorImpl() {
100 DeallocateSlabs(Slabs.begin(), Slabs.end());
101 DeallocateCustomSizedSlabs();
102 }
103
104 BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
105 DeallocateSlabs(Slabs.begin(), Slabs.end());
106 DeallocateCustomSizedSlabs();
107
108 CurPtr = RHS.CurPtr;
109 End = RHS.End;
110 BytesAllocated = RHS.BytesAllocated;
111 RedZoneSize = RHS.RedZoneSize;
112 Slabs = std::move(RHS.Slabs);
113 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
114 AllocatorT::operator=(static_cast<AllocatorT &&>(RHS));
115
116 RHS.CurPtr = RHS.End = nullptr;
117 RHS.BytesAllocated = 0;
118 RHS.Slabs.clear();
119 RHS.CustomSizedSlabs.clear();
120 return *this;
121 }
122
123 /// Deallocate all but the current slab and reset the current pointer
124 /// to the beginning of it, freeing all memory allocated so far.
125 void Reset() {
126 // Deallocate all but the first slab, and deallocate all custom-sized slabs.
127 DeallocateCustomSizedSlabs();
128 CustomSizedSlabs.clear();
129
130 if (Slabs.empty())
131 return;
132
133 // Reset the state.
134 BytesAllocated = 0;
135 CurPtr = (char *)Slabs.front();
136 End = CurPtr + SlabSize;
137
138 __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
139 DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
140 Slabs.erase(std::next(Slabs.begin()), Slabs.end());
141 }
142
143 /// Allocate space at the specified alignment.
144 LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void *
145 Allocate(size_t Size, Align Alignment) {
146 // Keep track of how many bytes we've allocated.
147 BytesAllocated += Size;
148
149 size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment);
9
Calling 'offsetToAlignedAddr'
150 assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow")((void)0);
151
152 size_t SizeToAllocate = Size;
153#if LLVM_ADDRESS_SANITIZER_BUILD0
154 // Add trailing bytes as a "red zone" under ASan.
155 SizeToAllocate += RedZoneSize;
156#endif
157
158 // Check if we have enough space.
159 if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
160 char *AlignedPtr = CurPtr + Adjustment;
161 CurPtr = AlignedPtr + SizeToAllocate;
162 // Update the allocation point of this memory block in MemorySanitizer.
163 // Without this, MemorySanitizer messages for values originated from here
164 // will point to the allocation of the entire slab.
165 __msan_allocated_memory(AlignedPtr, Size);
166 // Similarly, tell ASan about this space.
167 __asan_unpoison_memory_region(AlignedPtr, Size);
168 return AlignedPtr;
169 }
170
171 // If Size is really big, allocate a separate slab for it.
172 size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
173 if (PaddedSize > SizeThreshold) {
174 void *NewSlab =
175 AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t));
176 // We own the new slab and don't want anyone reading anyting other than
177 // pieces returned from this method. So poison the whole slab.
178 __asan_poison_memory_region(NewSlab, PaddedSize);
179 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
180
181 uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
182 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize)((void)0);
183 char *AlignedPtr = (char*)AlignedAddr;
184 __msan_allocated_memory(AlignedPtr, Size);
185 __asan_unpoison_memory_region(AlignedPtr, Size);
186 return AlignedPtr;
187 }
188
189 // Otherwise, start a new slab and try again.
190 StartNewSlab();
191 uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
192 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&((void)0)
193 "Unable to allocate memory!")((void)0);
194 char *AlignedPtr = (char*)AlignedAddr;
195 CurPtr = AlignedPtr + SizeToAllocate;
196 __msan_allocated_memory(AlignedPtr, Size);
197 __asan_unpoison_memory_region(AlignedPtr, Size);
198 return AlignedPtr;
199 }
200
201 inline LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void *
202 Allocate(size_t Size, size_t Alignment) {
203 assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.")((void)0);
204 return Allocate(Size, Align(Alignment));
8
Calling 'BumpPtrAllocatorImpl::Allocate'
205 }
206
207 // Pull in base class overloads.
208 using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
209
210 // Bump pointer allocators are expected to never free their storage; and
211 // clients expect pointers to remain valid for non-dereferencing uses even
212 // after deallocation.
213 void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) {
214 __asan_poison_memory_region(Ptr, Size);
215 }
216
217 // Pull in base class overloads.
218 using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
219
220 size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
221
222 /// \return An index uniquely and reproducibly identifying
223 /// an input pointer \p Ptr in the given allocator.
224 /// The returned value is negative iff the object is inside a custom-size
225 /// slab.
226 /// Returns an empty optional if the pointer is not found in the allocator.
227 llvm::Optional<int64_t> identifyObject(const void *Ptr) {
228 const char *P = static_cast<const char *>(Ptr);
229 int64_t InSlabIdx = 0;
230 for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) {
231 const char *S = static_cast<const char *>(Slabs[Idx]);
232 if (P >= S && P < S + computeSlabSize(Idx))
233 return InSlabIdx + static_cast<int64_t>(P - S);
234 InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx));
235 }
236
237 // Use negative index to denote custom sized slabs.
238 int64_t InCustomSizedSlabIdx = -1;
239 for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) {
240 const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first);
241 size_t Size = CustomSizedSlabs[Idx].second;
242 if (P >= S && P < S + Size)
243 return InCustomSizedSlabIdx - static_cast<int64_t>(P - S);
244 InCustomSizedSlabIdx -= static_cast<int64_t>(Size);
245 }
246 return None;
247 }
248
249 /// A wrapper around identifyObject that additionally asserts that
250 /// the object is indeed within the allocator.
251 /// \return An index uniquely and reproducibly identifying
252 /// an input pointer \p Ptr in the given allocator.
253 int64_t identifyKnownObject(const void *Ptr) {
254 Optional<int64_t> Out = identifyObject(Ptr);
255 assert(Out && "Wrong allocator used")((void)0);
256 return *Out;
257 }
258
259 /// A wrapper around identifyKnownObject. Accepts type information
260 /// about the object and produces a smaller identifier by relying on
261 /// the alignment information. Note that sub-classes may have different
262 /// alignment, so the most base class should be passed as template parameter
263 /// in order to obtain correct results. For that reason automatic template
264 /// parameter deduction is disabled.
265 /// \return An index uniquely and reproducibly identifying
266 /// an input pointer \p Ptr in the given allocator. This identifier is
267 /// different from the ones produced by identifyObject and
268 /// identifyAlignedObject.
269 template <typename T>
270 int64_t identifyKnownAlignedObject(const void *Ptr) {
271 int64_t Out = identifyKnownObject(Ptr);
272 assert(Out % alignof(T) == 0 && "Wrong alignment information")((void)0);
273 return Out / alignof(T);
274 }
275
276 size_t getTotalMemory() const {
277 size_t TotalMemory = 0;
278 for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
279 TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
280 for (auto &PtrAndSize : CustomSizedSlabs)
281 TotalMemory += PtrAndSize.second;
282 return TotalMemory;
283 }
284
285 size_t getBytesAllocated() const { return BytesAllocated; }
286
287 void setRedZoneSize(size_t NewSize) {
288 RedZoneSize = NewSize;
289 }
290
291 void PrintStats() const {
292 detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
293 getTotalMemory());
294 }
295
296private:
297 /// The current pointer into the current slab.
298 ///
299 /// This points to the next free byte in the slab.
300 char *CurPtr = nullptr;
301
302 /// The end of the current slab.
303 char *End = nullptr;
304
305 /// The slabs allocated so far.
306 SmallVector<void *, 4> Slabs;
307
308 /// Custom-sized slabs allocated for too-large allocation requests.
309 SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
310
311 /// How many bytes we've allocated.
312 ///
313 /// Used so that we can compute how much space was wasted.
314 size_t BytesAllocated = 0;
315
316 /// The number of bytes to put between allocations when running under
317 /// a sanitizer.
318 size_t RedZoneSize = 1;
319
320 static size_t computeSlabSize(unsigned SlabIdx) {
321 // Scale the actual allocated slab size based on the number of slabs
322 // allocated. Every GrowthDelay slabs allocated, we double
323 // the allocated size to reduce allocation frequency, but saturate at
324 // multiplying the slab size by 2^30.
325 return SlabSize *
326 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
327 }
328
329 /// Allocate a new slab and move the bump pointers over into the new
330 /// slab, modifying CurPtr and End.
331 void StartNewSlab() {
332 size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
333
334 void *NewSlab =
335 AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t));
336 // We own the new slab and don't want anyone reading anything other than
337 // pieces returned from this method. So poison the whole slab.
338 __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
339
340 Slabs.push_back(NewSlab);
341 CurPtr = (char *)(NewSlab);
342 End = ((char *)NewSlab) + AllocatedSlabSize;
343 }
344
345 /// Deallocate a sequence of slabs.
346 void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
347 SmallVectorImpl<void *>::iterator E) {
348 for (; I != E; ++I) {
349 size_t AllocatedSlabSize =
350 computeSlabSize(std::distance(Slabs.begin(), I));
351 AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
352 }
353 }
354
355 /// Deallocate all memory for custom sized slabs.
356 void DeallocateCustomSizedSlabs() {
357 for (auto &PtrAndSize : CustomSizedSlabs) {
358 void *Ptr = PtrAndSize.first;
359 size_t Size = PtrAndSize.second;
360 AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t));
361 }
362 }
363
364 template <typename T> friend class SpecificBumpPtrAllocator;
365};
366
367/// The standard BumpPtrAllocator which just uses the default template
368/// parameters.
369typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
370
371/// A BumpPtrAllocator that allows only elements of a specific type to be
372/// allocated.
373///
374/// This allows calling the destructor in DestroyAll() and when the allocator is
375/// destroyed.
376template <typename T> class SpecificBumpPtrAllocator {
377 BumpPtrAllocator Allocator;
378
379public:
380 SpecificBumpPtrAllocator() {
381 // Because SpecificBumpPtrAllocator walks the memory to call destructors,
382 // it can't have red zones between allocations.
383 Allocator.setRedZoneSize(0);
384 }
385 SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
386 : Allocator(std::move(Old.Allocator)) {}
387 ~SpecificBumpPtrAllocator() { DestroyAll(); }
388
389 SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
390 Allocator = std::move(RHS.Allocator);
391 return *this;
392 }
393
394 /// Call the destructor of each allocated object and deallocate all but the
395 /// current slab and reset the current pointer to the beginning of it, freeing
396 /// all memory allocated so far.
397 void DestroyAll() {
398 auto DestroyElements = [](char *Begin, char *End) {
399 assert(Begin == (char *)alignAddr(Begin, Align::Of<T>()))((void)0);
400 for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
401 reinterpret_cast<T *>(Ptr)->~T();
402 };
403
404 for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
405 ++I) {
406 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
407 std::distance(Allocator.Slabs.begin(), I));
408 char *Begin = (char *)alignAddr(*I, Align::Of<T>());
409 char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
410 : (char *)*I + AllocatedSlabSize;
411
412 DestroyElements(Begin, End);
413 }
414
415 for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
416 void *Ptr = PtrAndSize.first;
417 size_t Size = PtrAndSize.second;
418 DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()),
419 (char *)Ptr + Size);
420 }
421
422 Allocator.Reset();
423 }
424
425 /// Allocate space for an array of objects without constructing them.
426 T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
427};
428
429} // end namespace llvm
430
431template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
432 size_t GrowthDelay>
433void *
434operator new(size_t Size,
435 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold,
436 GrowthDelay> &Allocator) {
437 return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
7
Calling 'BumpPtrAllocatorImpl::Allocate'
438 alignof(std::max_align_t)));
439}
440
441template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
442 size_t GrowthDelay>
443void operator delete(void *,
444 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
445 SizeThreshold, GrowthDelay> &) {
446}
447
448#endif // LLVM_SUPPORT_ALLOCATOR_H

/usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/llvm/include/llvm/Support/Alignment.h

1//===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains types to represent alignments.
10// They are instrumented to guarantee some invariants are preserved and prevent
11// invalid manipulations.
12//
13// - Align represents an alignment in bytes, it is always set and always a valid
14// power of two, its minimum value is 1 which means no alignment requirements.
15//
16// - MaybeAlign is an optional type, it may be undefined or set. When it's set
17// you can get the underlying Align type by using the getValue() method.
18//
19//===----------------------------------------------------------------------===//
20
21#ifndef LLVM_SUPPORT_ALIGNMENT_H_
22#define LLVM_SUPPORT_ALIGNMENT_H_
23
24#include "llvm/ADT/Optional.h"
25#include "llvm/Support/MathExtras.h"
26#include <cassert>
27#ifndef NDEBUG1
28#include <string>
29#endif // NDEBUG
30
31namespace llvm {
32
33#define ALIGN_CHECK_ISPOSITIVE(decl) \
34 assert(decl > 0 && (#decl " should be defined"))((void)0)
35
36/// This struct is a compact representation of a valid (non-zero power of two)
37/// alignment.
38/// It is suitable for use as static global constants.
39struct Align {
40private:
41 uint8_t ShiftValue = 0; /// The log2 of the required alignment.
42 /// ShiftValue is less than 64 by construction.
43
44 friend struct MaybeAlign;
45 friend unsigned Log2(Align);
46 friend bool operator==(Align Lhs, Align Rhs);
47 friend bool operator!=(Align Lhs, Align Rhs);
48 friend bool operator<=(Align Lhs, Align Rhs);
49 friend bool operator>=(Align Lhs, Align Rhs);
50 friend bool operator<(Align Lhs, Align Rhs);
51 friend bool operator>(Align Lhs, Align Rhs);
52 friend unsigned encode(struct MaybeAlign A);
53 friend struct MaybeAlign decodeMaybeAlign(unsigned Value);
54
55 /// A trivial type to allow construction of constexpr Align.
56 /// This is currently needed to workaround a bug in GCC 5.3 which prevents
57 /// definition of constexpr assign operators.
58 /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic
59 /// FIXME: Remove this, make all assign operators constexpr and introduce user
60 /// defined literals when we don't have to support GCC 5.3 anymore.
61 /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain
62 struct LogValue {
63 uint8_t Log;
64 };
65
66public:
67 /// Default is byte-aligned.
68 constexpr Align() = default;
69 /// Do not perform checks in case of copy/move construct/assign, because the
70 /// checks have been performed when building `Other`.
71 constexpr Align(const Align &Other) = default;
72 constexpr Align(Align &&Other) = default;
73 Align &operator=(const Align &Other) = default;
74 Align &operator=(Align &&Other) = default;
75
76 explicit Align(uint64_t Value) {
77 assert(Value > 0 && "Value must not be 0")((void)0);
78 assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2")((void)0);
79 ShiftValue = Log2_64(Value);
80 assert(ShiftValue < 64 && "Broken invariant")((void)0);
81 }
82
83 /// This is a hole in the type system and should not be abused.
84 /// Needed to interact with C for instance.
85 uint64_t value() const { return uint64_t(1) << ShiftValue; }
14
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'
86
87 /// Allow constructions of constexpr Align.
88 template <size_t kValue> constexpr static LogValue Constant() {
89 return LogValue{static_cast<uint8_t>(CTLog2<kValue>())};
90 }
91
92 /// Allow constructions of constexpr Align from types.
93 /// Compile time equivalent to Align(alignof(T)).
94 template <typename T> constexpr static LogValue Of() {
95 return Constant<std::alignment_of<T>::value>();
96 }
97
98 /// Constexpr constructor from LogValue type.
99 constexpr Align(LogValue CA) : ShiftValue(CA.Log) {}
100};
101
102/// Treats the value 0 as a 1, so Align is always at least 1.
103inline Align assumeAligned(uint64_t Value) {
104 return Value ? Align(Value) : Align();
105}
106
107/// This struct is a compact representation of a valid (power of two) or
108/// undefined (0) alignment.
109struct MaybeAlign : public llvm::Optional<Align> {
110private:
111 using UP = llvm::Optional<Align>;
112
113public:
114 /// Default is undefined.
115 MaybeAlign() = default;
116 /// Do not perform checks in case of copy/move construct/assign, because the
117 /// checks have been performed when building `Other`.
118 MaybeAlign(const MaybeAlign &Other) = default;
119 MaybeAlign &operator=(const MaybeAlign &Other) = default;
120 MaybeAlign(MaybeAlign &&Other) = default;
121 MaybeAlign &operator=(MaybeAlign &&Other) = default;
122
123 /// Use llvm::Optional<Align> constructor.
124 using UP::UP;
125
126 explicit MaybeAlign(uint64_t Value) {
127 assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&((void)0)
128 "Alignment is neither 0 nor a power of 2")((void)0);
129 if (Value)
130 emplace(Value);
131 }
132
133 /// For convenience, returns a valid alignment or 1 if undefined.
134 Align valueOrOne() const { return hasValue() ? getValue() : Align(); }
135};
136
137/// Checks that SizeInBytes is a multiple of the alignment.
138inline bool isAligned(Align Lhs, uint64_t SizeInBytes) {
139 return SizeInBytes % Lhs.value() == 0;
140}
141
142/// Checks that Addr is a multiple of the alignment.
143inline bool isAddrAligned(Align Lhs, const void *Addr) {
144 return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr));
145}
146
147/// Returns a multiple of A needed to store `Size` bytes.
148inline uint64_t alignTo(uint64_t Size, Align A) {
149 const uint64_t Value = A.value();
13
Calling 'Align::value'
150 // The following line is equivalent to `(Size + Value - 1) / Value * Value`.
151
152 // The division followed by a multiplication can be thought of as a right
153 // shift followed by a left shift which zeros out the extra bits produced in
154 // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out
155 // are just zero.
156
157 // Most compilers can generate this code but the pattern may be missed when
158 // multiple functions gets inlined.
159 return (Size + Value - 1) & ~(Value - 1U);
160}
161
162/// If non-zero \p Skew is specified, the return value will be a minimal integer
163/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
164/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
165/// Skew mod \p A'.
166///
167/// Examples:
168/// \code
169/// alignTo(5, Align(8), 7) = 7
170/// alignTo(17, Align(8), 1) = 17
171/// alignTo(~0LL, Align(8), 3) = 3
172/// \endcode
173inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) {
174 const uint64_t Value = A.value();
175 Skew %= Value;
176 return ((Size + Value - 1 - Skew) & ~(Value - 1U)) + Skew;
177}
178
179/// Returns a multiple of A needed to store `Size` bytes.
180/// Returns `Size` if current alignment is undefined.
181inline uint64_t alignTo(uint64_t Size, MaybeAlign A) {
182 return A ? alignTo(Size, A.getValue()) : Size;
183}
184
185/// Aligns `Addr` to `Alignment` bytes, rounding up.
186inline uintptr_t alignAddr(const void *Addr, Align Alignment) {
187 uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr);
188 assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=((void)0)
189 ArithAddr &&((void)0)
190 "Overflow")((void)0);
191 return alignTo(ArithAddr, Alignment);
192}
193
194/// Returns the offset to the next integer (mod 2**64) that is greater than
195/// or equal to \p Value and is a multiple of \p Align.
196inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
197 return alignTo(Value, Alignment) - Value;
11
The value 255 is assigned to 'A.ShiftValue'
12
Calling 'alignTo'
198}
199
200/// Returns the necessary adjustment for aligning `Addr` to `Alignment`
201/// bytes, rounding up.
202inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) {
203 return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment);
10
Calling 'offsetToAlignment'
204}
205
206/// Returns the log2 of the alignment.
207inline unsigned Log2(Align A) { return A.ShiftValue; }
208
209/// Returns the alignment that satisfies both alignments.
210/// Same semantic as MinAlign.
211inline Align commonAlignment(Align A, Align B) { return std::min(A, B); }
212
213/// Returns the alignment that satisfies both alignments.
214/// Same semantic as MinAlign.
215inline Align commonAlignment(Align A, uint64_t Offset) {
216 return Align(MinAlign(A.value(), Offset));
217}
218
219/// Returns the alignment that satisfies both alignments.
220/// Same semantic as MinAlign.
221inline MaybeAlign commonAlignment(MaybeAlign A, MaybeAlign B) {
222 return A && B ? commonAlignment(*A, *B) : A ? A : B;
223}
224
225/// Returns the alignment that satisfies both alignments.
226/// Same semantic as MinAlign.
227inline MaybeAlign commonAlignment(MaybeAlign A, uint64_t Offset) {
228 return MaybeAlign(MinAlign((*A).value(), Offset));
229}
230
231/// Returns a representation of the alignment that encodes undefined as 0.
232inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; }
233
234/// Dual operation of the encode function above.
235inline MaybeAlign decodeMaybeAlign(unsigned Value) {
236 if (Value == 0)
237 return MaybeAlign();
238 Align Out;
239 Out.ShiftValue = Value - 1;
240 return Out;
241}
242
243/// Returns a representation of the alignment, the encoded value is positive by
244/// definition.
245inline unsigned encode(Align A) { return encode(MaybeAlign(A)); }
246
247/// Comparisons between Align and scalars. Rhs must be positive.
248inline bool operator==(Align Lhs, uint64_t Rhs) {
249 ALIGN_CHECK_ISPOSITIVE(Rhs);
250 return Lhs.value() == Rhs;
251}
252inline bool operator!=(Align Lhs, uint64_t Rhs) {
253 ALIGN_CHECK_ISPOSITIVE(Rhs);
254 return Lhs.value() != Rhs;
255}
256inline bool operator<=(Align Lhs, uint64_t Rhs) {
257 ALIGN_CHECK_ISPOSITIVE(Rhs);
258 return Lhs.value() <= Rhs;
259}
260inline bool operator>=(Align Lhs, uint64_t Rhs) {
261 ALIGN_CHECK_ISPOSITIVE(Rhs);
262 return Lhs.value() >= Rhs;
263}
264inline bool operator<(Align Lhs, uint64_t Rhs) {
265 ALIGN_CHECK_ISPOSITIVE(Rhs);
266 return Lhs.value() < Rhs;
267}
268inline bool operator>(Align Lhs, uint64_t Rhs) {
269 ALIGN_CHECK_ISPOSITIVE(Rhs);
270 return Lhs.value() > Rhs;
271}
272
273/// Comparisons between MaybeAlign and scalars.
274inline bool operator==(MaybeAlign Lhs, uint64_t Rhs) {
275 return Lhs ? (*Lhs).value() == Rhs : Rhs == 0;
276}
277inline bool operator!=(MaybeAlign Lhs, uint64_t Rhs) {
278 return Lhs ? (*Lhs).value() != Rhs : Rhs != 0;
279}
280
281/// Comparisons operators between Align.
282inline bool operator==(Align Lhs, Align Rhs) {
283 return Lhs.ShiftValue == Rhs.ShiftValue;
284}
285inline bool operator!=(Align Lhs, Align Rhs) {
286 return Lhs.ShiftValue != Rhs.ShiftValue;
287}
288inline bool operator<=(Align Lhs, Align Rhs) {
289 return Lhs.ShiftValue <= Rhs.ShiftValue;
290}
291inline bool operator>=(Align Lhs, Align Rhs) {
292 return Lhs.ShiftValue >= Rhs.ShiftValue;
293}
294inline bool operator<(Align Lhs, Align Rhs) {
295 return Lhs.ShiftValue < Rhs.ShiftValue;
296}
297inline bool operator>(Align Lhs, Align Rhs) {
298 return Lhs.ShiftValue > Rhs.ShiftValue;
299}
300
301// Don't allow relational comparisons with MaybeAlign.
302bool operator<=(Align Lhs, MaybeAlign Rhs) = delete;
303bool operator>=(Align Lhs, MaybeAlign Rhs) = delete;
304bool operator<(Align Lhs, MaybeAlign Rhs) = delete;
305bool operator>(Align Lhs, MaybeAlign Rhs) = delete;
306
307bool operator<=(MaybeAlign Lhs, Align Rhs) = delete;
308bool operator>=(MaybeAlign Lhs, Align Rhs) = delete;
309bool operator<(MaybeAlign Lhs, Align Rhs) = delete;
310bool operator>(MaybeAlign Lhs, Align Rhs) = delete;
311
312bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
313bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
314bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
315bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
316
317inline Align operator*(Align Lhs, uint64_t Rhs) {
318 assert(Rhs > 0 && "Rhs must be positive")((void)0);
319 return Align(Lhs.value() * Rhs);
320}
321
322inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) {
323 assert(Rhs > 0 && "Rhs must be positive")((void)0);
324 return Lhs ? Lhs.getValue() * Rhs : MaybeAlign();
325}
326
327inline Align operator/(Align Lhs, uint64_t Divisor) {
328 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
329 "Divisor must be positive and a power of 2")((void)0);
330 assert(Lhs != 1 && "Can't halve byte alignment")((void)0);
331 return Align(Lhs.value() / Divisor);
332}
333
334inline MaybeAlign operator/(MaybeAlign Lhs, uint64_t Divisor) {
335 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
336 "Divisor must be positive and a power of 2")((void)0);
337 return Lhs ? Lhs.getValue() / Divisor : MaybeAlign();
338}
339
340inline Align max(MaybeAlign Lhs, Align Rhs) {
341 return Lhs && *Lhs > Rhs ? *Lhs : Rhs;
342}
343
344inline Align max(Align Lhs, MaybeAlign Rhs) {
345 return Rhs && *Rhs > Lhs ? *Rhs : Lhs;
346}
347
348#ifndef NDEBUG1
349// For usage in LLVM_DEBUG macros.
350inline std::string DebugStr(const Align &A) {
351 return std::to_string(A.value());
352}
353// For usage in LLVM_DEBUG macros.
354inline std::string DebugStr(const MaybeAlign &MA) {
355 if (MA)
356 return std::to_string(MA->value());
357 return "None";
358}
359#endif // NDEBUG
360
361#undef ALIGN_CHECK_ISPOSITIVE
362
363} // namespace llvm
364
365#endif // LLVM_SUPPORT_ALIGNMENT_H_