Bug Summary

File:src/gnu/usr.bin/clang/libclangAST/../../../llvm/llvm/include/llvm/Support/Alignment.h
Warning:line 85, column 47
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ByteCodeExprGen.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangAST/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangAST/obj/../include/clang/AST -I /usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangAST/../include -I /usr/src/gnu/usr.bin/clang/libclangAST/obj -I /usr/src/gnu/usr.bin/clang/libclangAST/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangAST/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/lib/AST/Interp/ByteCodeExprGen.cpp

/usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/lib/AST/Interp/ByteCodeExprGen.cpp

1//===--- ByteCodeExprGen.cpp - Code generator for expressions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ByteCodeExprGen.h"
10#include "ByteCodeEmitter.h"
11#include "ByteCodeGenError.h"
12#include "Context.h"
13#include "Function.h"
14#include "PrimType.h"
15#include "Program.h"
16#include "State.h"
17
18using namespace clang;
19using namespace clang::interp;
20
21using APSInt = llvm::APSInt;
22template <typename T> using Expected = llvm::Expected<T>;
23template <typename T> using Optional = llvm::Optional<T>;
24
25namespace clang {
26namespace interp {
27
28/// Scope used to handle temporaries in toplevel variable declarations.
29template <class Emitter> class DeclScope final : public LocalScope<Emitter> {
30public:
31 DeclScope(ByteCodeExprGen<Emitter> *Ctx, const VarDecl *VD)
32 : LocalScope<Emitter>(Ctx), Scope(Ctx->P, VD) {}
33
34 void addExtended(const Scope::Local &Local) override {
35 return this->addLocal(Local);
36 }
37
38private:
39 Program::DeclScope Scope;
40};
41
42/// Scope used to handle initialization methods.
43template <class Emitter> class OptionScope {
44public:
45 using InitFnRef = typename ByteCodeExprGen<Emitter>::InitFnRef;
46 using ChainedInitFnRef = std::function<bool(InitFnRef)>;
47
48 /// Root constructor, compiling or discarding primitives.
49 OptionScope(ByteCodeExprGen<Emitter> *Ctx, bool NewDiscardResult)
50 : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult),
51 OldInitFn(std::move(Ctx->InitFn)) {
52 Ctx->DiscardResult = NewDiscardResult;
53 Ctx->InitFn = llvm::Optional<InitFnRef>{};
54 }
55
56 /// Root constructor, setting up compilation state.
57 OptionScope(ByteCodeExprGen<Emitter> *Ctx, InitFnRef NewInitFn)
58 : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult),
59 OldInitFn(std::move(Ctx->InitFn)) {
60 Ctx->DiscardResult = true;
61 Ctx->InitFn = NewInitFn;
62 }
63
64 /// Extends the chain of initialisation pointers.
65 OptionScope(ByteCodeExprGen<Emitter> *Ctx, ChainedInitFnRef NewInitFn)
66 : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult),
67 OldInitFn(std::move(Ctx->InitFn)) {
68 assert(OldInitFn && "missing initializer")((void)0);
69 Ctx->InitFn = [this, NewInitFn] { return NewInitFn(*OldInitFn); };
70 }
71
72 ~OptionScope() {
73 Ctx->DiscardResult = OldDiscardResult;
74 Ctx->InitFn = std::move(OldInitFn);
75 }
76
77private:
78 /// Parent context.
79 ByteCodeExprGen<Emitter> *Ctx;
80 /// Old discard flag to restore.
81 bool OldDiscardResult;
82 /// Old pointer emitter to restore.
83 llvm::Optional<InitFnRef> OldInitFn;
84};
85
86} // namespace interp
87} // namespace clang
88
89template <class Emitter>
90bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
91 auto *SubExpr = CE->getSubExpr();
92 switch (CE->getCastKind()) {
93
94 case CK_LValueToRValue: {
95 return dereference(
96 CE->getSubExpr(), DerefKind::Read,
97 [](PrimType) {
98 // Value loaded - nothing to do here.
99 return true;
100 },
101 [this, CE](PrimType T) {
102 // Pointer on stack - dereference it.
103 if (!this->emitLoadPop(T, CE))
104 return false;
105 return DiscardResult ? this->emitPop(T, CE) : true;
106 });
107 }
108
109 case CK_ArrayToPointerDecay:
110 case CK_AtomicToNonAtomic:
111 case CK_ConstructorConversion:
112 case CK_FunctionToPointerDecay:
113 case CK_NonAtomicToAtomic:
114 case CK_NoOp:
115 case CK_UserDefinedConversion:
116 return this->Visit(SubExpr);
117
118 case CK_ToVoid:
119 return discard(SubExpr);
120
121 default: {
122 // TODO: implement other casts.
123 return this->bail(CE);
124 }
125 }
126}
127
128template <class Emitter>
129bool ByteCodeExprGen<Emitter>::VisitIntegerLiteral(const IntegerLiteral *LE) {
130 if (DiscardResult)
131 return true;
132
133 auto Val = LE->getValue();
134 QualType LitTy = LE->getType();
135 if (Optional<PrimType> T = classify(LitTy))
136 return emitConst(*T, getIntWidth(LitTy), LE->getValue(), LE);
137 return this->bail(LE);
138}
139
140template <class Emitter>
141bool ByteCodeExprGen<Emitter>::VisitParenExpr(const ParenExpr *PE) {
142 return this->Visit(PE->getSubExpr());
143}
144
145template <class Emitter>
146bool ByteCodeExprGen<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
147 const Expr *LHS = BO->getLHS();
148 const Expr *RHS = BO->getRHS();
149
150 // Deal with operations which have composite or void types.
151 switch (BO->getOpcode()) {
152 case BO_Comma:
153 if (!discard(LHS))
154 return false;
155 if (!this->Visit(RHS))
156 return false;
157 return true;
158 default:
159 break;
160 }
161
162 // Typecheck the args.
163 Optional<PrimType> LT = classify(LHS->getType());
164 Optional<PrimType> RT = classify(RHS->getType());
165 if (!LT || !RT) {
166 return this->bail(BO);
167 }
168
169 if (Optional<PrimType> T = classify(BO->getType())) {
170 if (!visit(LHS))
171 return false;
172 if (!visit(RHS))
173 return false;
174
175 auto Discard = [this, T, BO](bool Result) {
176 if (!Result)
177 return false;
178 return DiscardResult ? this->emitPop(*T, BO) : true;
179 };
180
181 switch (BO->getOpcode()) {
182 case BO_EQ:
183 return Discard(this->emitEQ(*LT, BO));
184 case BO_NE:
185 return Discard(this->emitNE(*LT, BO));
186 case BO_LT:
187 return Discard(this->emitLT(*LT, BO));
188 case BO_LE:
189 return Discard(this->emitLE(*LT, BO));
190 case BO_GT:
191 return Discard(this->emitGT(*LT, BO));
192 case BO_GE:
193 return Discard(this->emitGE(*LT, BO));
194 case BO_Sub:
195 return Discard(this->emitSub(*T, BO));
196 case BO_Add:
197 return Discard(this->emitAdd(*T, BO));
198 case BO_Mul:
199 return Discard(this->emitMul(*T, BO));
200 default:
201 return this->bail(BO);
202 }
203 }
204
205 return this->bail(BO);
206}
207
208template <class Emitter>
209bool ByteCodeExprGen<Emitter>::discard(const Expr *E) {
210 OptionScope<Emitter> Scope(this, /*discardResult=*/true);
211 return this->Visit(E);
212}
213
214template <class Emitter>
215bool ByteCodeExprGen<Emitter>::visit(const Expr *E) {
216 OptionScope<Emitter> Scope(this, /*discardResult=*/false);
217 return this->Visit(E);
218}
219
220template <class Emitter>
221bool ByteCodeExprGen<Emitter>::visitBool(const Expr *E) {
222 if (Optional<PrimType> T = classify(E->getType())) {
223 return visit(E);
224 } else {
225 return this->bail(E);
226 }
227}
228
229template <class Emitter>
230bool ByteCodeExprGen<Emitter>::visitZeroInitializer(PrimType T, const Expr *E) {
231 switch (T) {
232 case PT_Bool:
233 return this->emitZeroBool(E);
234 case PT_Sint8:
235 return this->emitZeroSint8(E);
236 case PT_Uint8:
237 return this->emitZeroUint8(E);
238 case PT_Sint16:
239 return this->emitZeroSint16(E);
240 case PT_Uint16:
241 return this->emitZeroUint16(E);
242 case PT_Sint32:
243 return this->emitZeroSint32(E);
244 case PT_Uint32:
245 return this->emitZeroUint32(E);
246 case PT_Sint64:
247 return this->emitZeroSint64(E);
248 case PT_Uint64:
249 return this->emitZeroUint64(E);
250 case PT_Ptr:
251 return this->emitNullPtr(E);
252 }
253 llvm_unreachable("unknown primitive type")__builtin_unreachable();
254}
255
256template <class Emitter>
257bool ByteCodeExprGen<Emitter>::dereference(
258 const Expr *LV, DerefKind AK, llvm::function_ref<bool(PrimType)> Direct,
259 llvm::function_ref<bool(PrimType)> Indirect) {
260 if (Optional<PrimType> T = classify(LV->getType())) {
261 if (!LV->refersToBitField()) {
262 // Only primitive, non bit-field types can be dereferenced directly.
263 if (auto *DE = dyn_cast<DeclRefExpr>(LV)) {
264 if (!DE->getDecl()->getType()->isReferenceType()) {
265 if (auto *PD = dyn_cast<ParmVarDecl>(DE->getDecl()))
266 return dereferenceParam(LV, *T, PD, AK, Direct, Indirect);
267 if (auto *VD = dyn_cast<VarDecl>(DE->getDecl()))
268 return dereferenceVar(LV, *T, VD, AK, Direct, Indirect);
269 }
270 }
271 }
272
273 if (!visit(LV))
274 return false;
275 return Indirect(*T);
276 }
277
278 return false;
279}
280
281template <class Emitter>
282bool ByteCodeExprGen<Emitter>::dereferenceParam(
283 const Expr *LV, PrimType T, const ParmVarDecl *PD, DerefKind AK,
284 llvm::function_ref<bool(PrimType)> Direct,
285 llvm::function_ref<bool(PrimType)> Indirect) {
286 auto It = this->Params.find(PD);
287 if (It != this->Params.end()) {
288 unsigned Idx = It->second;
289 switch (AK) {
290 case DerefKind::Read:
291 return DiscardResult ? true : this->emitGetParam(T, Idx, LV);
292
293 case DerefKind::Write:
294 if (!Direct(T))
295 return false;
296 if (!this->emitSetParam(T, Idx, LV))
297 return false;
298 return DiscardResult ? true : this->emitGetPtrParam(Idx, LV);
299
300 case DerefKind::ReadWrite:
301 if (!this->emitGetParam(T, Idx, LV))
302 return false;
303 if (!Direct(T))
304 return false;
305 if (!this->emitSetParam(T, Idx, LV))
306 return false;
307 return DiscardResult ? true : this->emitGetPtrParam(Idx, LV);
308 }
309 return true;
310 }
311
312 // If the param is a pointer, we can dereference a dummy value.
313 if (!DiscardResult && T == PT_Ptr && AK == DerefKind::Read) {
314 if (auto Idx = P.getOrCreateDummy(PD))
315 return this->emitGetPtrGlobal(*Idx, PD);
316 return false;
317 }
318
319 // Value cannot be produced - try to emit pointer and do stuff with it.
320 return visit(LV) && Indirect(T);
321}
322
323template <class Emitter>
324bool ByteCodeExprGen<Emitter>::dereferenceVar(
325 const Expr *LV, PrimType T, const VarDecl *VD, DerefKind AK,
326 llvm::function_ref<bool(PrimType)> Direct,
327 llvm::function_ref<bool(PrimType)> Indirect) {
328 auto It = Locals.find(VD);
329 if (It != Locals.end()) {
330 const auto &L = It->second;
331 switch (AK) {
332 case DerefKind::Read:
333 if (!this->emitGetLocal(T, L.Offset, LV))
334 return false;
335 return DiscardResult ? this->emitPop(T, LV) : true;
336
337 case DerefKind::Write:
338 if (!Direct(T))
339 return false;
340 if (!this->emitSetLocal(T, L.Offset, LV))
341 return false;
342 return DiscardResult ? true : this->emitGetPtrLocal(L.Offset, LV);
343
344 case DerefKind::ReadWrite:
345 if (!this->emitGetLocal(T, L.Offset, LV))
346 return false;
347 if (!Direct(T))
348 return false;
349 if (!this->emitSetLocal(T, L.Offset, LV))
350 return false;
351 return DiscardResult ? true : this->emitGetPtrLocal(L.Offset, LV);
352 }
353 } else if (auto Idx = getGlobalIdx(VD)) {
354 switch (AK) {
355 case DerefKind::Read:
356 if (!this->emitGetGlobal(T, *Idx, LV))
357 return false;
358 return DiscardResult ? this->emitPop(T, LV) : true;
359
360 case DerefKind::Write:
361 if (!Direct(T))
362 return false;
363 if (!this->emitSetGlobal(T, *Idx, LV))
364 return false;
365 return DiscardResult ? true : this->emitGetPtrGlobal(*Idx, LV);
366
367 case DerefKind::ReadWrite:
368 if (!this->emitGetGlobal(T, *Idx, LV))
369 return false;
370 if (!Direct(T))
371 return false;
372 if (!this->emitSetGlobal(T, *Idx, LV))
373 return false;
374 return DiscardResult ? true : this->emitGetPtrGlobal(*Idx, LV);
375 }
376 }
377
378 // If the declaration is a constant value, emit it here even
379 // though the declaration was not evaluated in the current scope.
380 // The access mode can only be read in this case.
381 if (!DiscardResult && AK == DerefKind::Read) {
382 if (VD->hasLocalStorage() && VD->hasInit() && !VD->isConstexpr()) {
383 QualType VT = VD->getType();
384 if (VT.isConstQualified() && VT->isFundamentalType())
385 return this->Visit(VD->getInit());
386 }
387 }
388
389 // Value cannot be produced - try to emit pointer.
390 return visit(LV) && Indirect(T);
391}
392
393template <class Emitter>
394bool ByteCodeExprGen<Emitter>::emitConst(PrimType T, unsigned NumBits,
395 const APInt &Value, const Expr *E) {
396 switch (T) {
397 case PT_Sint8:
398 return this->emitConstSint8(Value.getSExtValue(), E);
399 case PT_Uint8:
400 return this->emitConstUint8(Value.getZExtValue(), E);
401 case PT_Sint16:
402 return this->emitConstSint16(Value.getSExtValue(), E);
403 case PT_Uint16:
404 return this->emitConstUint16(Value.getZExtValue(), E);
405 case PT_Sint32:
406 return this->emitConstSint32(Value.getSExtValue(), E);
407 case PT_Uint32:
408 return this->emitConstUint32(Value.getZExtValue(), E);
409 case PT_Sint64:
410 return this->emitConstSint64(Value.getSExtValue(), E);
411 case PT_Uint64:
412 return this->emitConstUint64(Value.getZExtValue(), E);
413 case PT_Bool:
414 return this->emitConstBool(Value.getBoolValue(), E);
415 case PT_Ptr:
416 llvm_unreachable("Invalid integral type")__builtin_unreachable();
417 break;
418 }
419 llvm_unreachable("unknown primitive type")__builtin_unreachable();
420}
421
422template <class Emitter>
423unsigned ByteCodeExprGen<Emitter>::allocateLocalPrimitive(DeclTy &&Src,
424 PrimType Ty,
425 bool IsConst,
426 bool IsExtended) {
427 Descriptor *D = P.createDescriptor(Src, Ty, IsConst, Src.is<const Expr *>());
1
Calling 'Program::createDescriptor'
428 Scope::Local Local = this->createLocal(D);
429 if (auto *VD = dyn_cast_or_null<ValueDecl>(Src.dyn_cast<const Decl *>()))
430 Locals.insert({VD, Local});
431 VarScope->add(Local, IsExtended);
432 return Local.Offset;
433}
434
435template <class Emitter>
436llvm::Optional<unsigned>
437ByteCodeExprGen<Emitter>::allocateLocal(DeclTy &&Src, bool IsExtended) {
438 QualType Ty;
439
440 const ValueDecl *Key = nullptr;
441 bool IsTemporary = false;
442 if (auto *VD = dyn_cast_or_null<ValueDecl>(Src.dyn_cast<const Decl *>())) {
443 Key = VD;
444 Ty = VD->getType();
445 }
446 if (auto *E = Src.dyn_cast<const Expr *>()) {
447 IsTemporary = true;
448 Ty = E->getType();
449 }
450
451 Descriptor *D = P.createDescriptor(Src, Ty.getTypePtr(),
452 Ty.isConstQualified(), IsTemporary);
453 if (!D)
454 return {};
455
456 Scope::Local Local = this->createLocal(D);
457 if (Key)
458 Locals.insert({Key, Local});
459 VarScope->add(Local, IsExtended);
460 return Local.Offset;
461}
462
463template <class Emitter>
464bool ByteCodeExprGen<Emitter>::visitInitializer(
465 const Expr *Init, InitFnRef InitFn) {
466 OptionScope<Emitter> Scope(this, InitFn);
467 return this->Visit(Init);
468}
469
470template <class Emitter>
471bool ByteCodeExprGen<Emitter>::getPtrVarDecl(const VarDecl *VD, const Expr *E) {
472 // Generate a pointer to the local, loading refs.
473 if (Optional<unsigned> Idx = getGlobalIdx(VD)) {
474 if (VD->getType()->isReferenceType())
475 return this->emitGetGlobalPtr(*Idx, E);
476 else
477 return this->emitGetPtrGlobal(*Idx, E);
478 }
479 return this->bail(VD);
480}
481
482template <class Emitter>
483llvm::Optional<unsigned>
484ByteCodeExprGen<Emitter>::getGlobalIdx(const VarDecl *VD) {
485 if (VD->isConstexpr()) {
486 // Constexpr decl - it must have already been defined.
487 return P.getGlobal(VD);
488 }
489 if (!VD->hasLocalStorage()) {
490 // Not constexpr, but a global var - can have pointer taken.
491 Program::DeclScope Scope(P, VD);
492 return P.getOrCreateGlobal(VD);
493 }
494 return {};
495}
496
497template <class Emitter>
498const RecordType *ByteCodeExprGen<Emitter>::getRecordTy(QualType Ty) {
499 if (auto *PT = dyn_cast<PointerType>(Ty))
500 return PT->getPointeeType()->getAs<RecordType>();
501 else
502 return Ty->getAs<RecordType>();
503}
504
505template <class Emitter>
506Record *ByteCodeExprGen<Emitter>::getRecord(QualType Ty) {
507 if (auto *RecordTy = getRecordTy(Ty)) {
508 return getRecord(RecordTy->getDecl());
509 }
510 return nullptr;
511}
512
513template <class Emitter>
514Record *ByteCodeExprGen<Emitter>::getRecord(const RecordDecl *RD) {
515 return P.getOrCreateRecord(RD);
516}
517
518template <class Emitter>
519bool ByteCodeExprGen<Emitter>::visitExpr(const Expr *Exp) {
520 ExprScope<Emitter> RootScope(this);
521 if (!visit(Exp))
522 return false;
523
524 if (Optional<PrimType> T = classify(Exp))
525 return this->emitRet(*T, Exp);
526 else
527 return this->emitRetValue(Exp);
528}
529
530template <class Emitter>
531bool ByteCodeExprGen<Emitter>::visitDecl(const VarDecl *VD) {
532 const Expr *Init = VD->getInit();
533
534 if (Optional<unsigned> I = P.createGlobal(VD)) {
535 if (Optional<PrimType> T = classify(VD->getType())) {
536 {
537 // Primitive declarations - compute the value and set it.
538 DeclScope<Emitter> LocalScope(this, VD);
539 if (!visit(Init))
540 return false;
541 }
542
543 // If the declaration is global, save the value for later use.
544 if (!this->emitDup(*T, VD))
545 return false;
546 if (!this->emitInitGlobal(*T, *I, VD))
547 return false;
548 return this->emitRet(*T, VD);
549 } else {
550 {
551 // Composite declarations - allocate storage and initialize it.
552 DeclScope<Emitter> LocalScope(this, VD);
553 if (!visitGlobalInitializer(Init, *I))
554 return false;
555 }
556
557 // Return a pointer to the global.
558 if (!this->emitGetPtrGlobal(*I, VD))
559 return false;
560 return this->emitRetValue(VD);
561 }
562 }
563
564 return this->bail(VD);
565}
566
567template <class Emitter>
568void ByteCodeExprGen<Emitter>::emitCleanup() {
569 for (VariableScope<Emitter> *C = VarScope; C; C = C->getParent())
570 C->emitDestruction();
571}
572
573namespace clang {
574namespace interp {
575
576template class ByteCodeExprGen<ByteCodeEmitter>;
577template class ByteCodeExprGen<EvalEmitter>;
578
579} // namespace interp
580} // namespace clang

/usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/clang/lib/AST/Interp/Program.h

1//===--- Program.h - Bytecode for the constexpr VM --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Defines a program which organises and links multiple bytecode functions.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_AST_INTERP_PROGRAM_H
14#define LLVM_CLANG_AST_INTERP_PROGRAM_H
15
16#include <map>
17#include <vector>
18#include "Function.h"
19#include "Pointer.h"
20#include "PrimType.h"
21#include "Record.h"
22#include "Source.h"
23#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/PointerUnion.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/Support/Allocator.h"
27
28namespace clang {
29class RecordDecl;
30class Expr;
31class FunctionDecl;
32class Stmt;
33class StringLiteral;
34class VarDecl;
35
36namespace interp {
37class Context;
38class State;
39class Record;
40class Scope;
41
42/// The program contains and links the bytecode for all functions.
43class Program {
44public:
45 Program(Context &Ctx) : Ctx(Ctx) {}
46
47 /// Emits a string literal among global data.
48 unsigned createGlobalString(const StringLiteral *S);
49
50 /// Returns a pointer to a global.
51 Pointer getPtrGlobal(unsigned Idx);
52
53 /// Returns the value of a global.
54 Block *getGlobal(unsigned Idx) {
55 assert(Idx < Globals.size())((void)0);
56 return Globals[Idx]->block();
57 }
58
59 /// Finds a global's index.
60 llvm::Optional<unsigned> getGlobal(const ValueDecl *VD);
61
62 /// Returns or creates a global an creates an index to it.
63 llvm::Optional<unsigned> getOrCreateGlobal(const ValueDecl *VD);
64
65 /// Returns or creates a dummy value for parameters.
66 llvm::Optional<unsigned> getOrCreateDummy(const ParmVarDecl *PD);
67
68 /// Creates a global and returns its index.
69 llvm::Optional<unsigned> createGlobal(const ValueDecl *VD);
70
71 /// Creates a global from a lifetime-extended temporary.
72 llvm::Optional<unsigned> createGlobal(const Expr *E);
73
74 /// Creates a new function from a code range.
75 template <typename... Ts>
76 Function *createFunction(const FunctionDecl *Def, Ts &&... Args) {
77 auto *Func = new Function(*this, Def, std::forward<Ts>(Args)...);
78 Funcs.insert({Def, std::unique_ptr<Function>(Func)});
79 return Func;
80 }
81 /// Creates an anonymous function.
82 template <typename... Ts>
83 Function *createFunction(Ts &&... Args) {
84 auto *Func = new Function(*this, std::forward<Ts>(Args)...);
85 AnonFuncs.emplace_back(Func);
86 return Func;
87 }
88
89 /// Returns a function.
90 Function *getFunction(const FunctionDecl *F);
91
92 /// Returns a pointer to a function if it exists and can be compiled.
93 /// If a function couldn't be compiled, an error is returned.
94 /// If a function was not yet defined, a null pointer is returned.
95 llvm::Expected<Function *> getOrCreateFunction(const FunctionDecl *F);
96
97 /// Returns a record or creates one if it does not exist.
98 Record *getOrCreateRecord(const RecordDecl *RD);
99
100 /// Creates a descriptor for a primitive type.
101 Descriptor *createDescriptor(const DeclTy &D, PrimType Type,
102 bool IsConst = false,
103 bool IsTemporary = false,
104 bool IsMutable = false) {
105 return allocateDescriptor(D, Type, IsConst, IsTemporary, IsMutable);
2
Calling 'Program::allocateDescriptor'
106 }
107
108 /// Creates a descriptor for a composite type.
109 Descriptor *createDescriptor(const DeclTy &D, const Type *Ty,
110 bool IsConst = false, bool IsTemporary = false,
111 bool IsMutable = false);
112
113 /// Context to manage declaration lifetimes.
114 class DeclScope {
115 public:
116 DeclScope(Program &P, const VarDecl *VD) : P(P) { P.startDeclaration(VD); }
117 ~DeclScope() { P.endDeclaration(); }
118
119 private:
120 Program &P;
121 };
122
123 /// Returns the current declaration ID.
124 llvm::Optional<unsigned> getCurrentDecl() const {
125 if (CurrentDeclaration == NoDeclaration)
126 return llvm::Optional<unsigned>{};
127 return LastDeclaration;
128 }
129
130private:
131 friend class DeclScope;
132
133 llvm::Optional<unsigned> createGlobal(const DeclTy &D, QualType Ty,
134 bool IsStatic, bool IsExtern);
135
136 /// Reference to the VM context.
137 Context &Ctx;
138 /// Mapping from decls to cached bytecode functions.
139 llvm::DenseMap<const FunctionDecl *, std::unique_ptr<Function>> Funcs;
140 /// List of anonymous functions.
141 std::vector<std::unique_ptr<Function>> AnonFuncs;
142
143 /// Function relocation locations.
144 llvm::DenseMap<const FunctionDecl *, std::vector<unsigned>> Relocs;
145
146 /// Custom allocator for global storage.
147 using PoolAllocTy = llvm::BumpPtrAllocatorImpl<llvm::MallocAllocator>;
148
149 /// Descriptor + storage for a global object.
150 ///
151 /// Global objects never go out of scope, thus they do not track pointers.
152 class Global {
153 public:
154 /// Create a global descriptor for string literals.
155 template <typename... Tys>
156 Global(Tys... Args) : B(std::forward<Tys>(Args)...) {}
157
158 /// Allocates the global in the pool, reserving storate for data.
159 void *operator new(size_t Meta, PoolAllocTy &Alloc, size_t Data) {
160 return Alloc.Allocate(Meta + Data, alignof(void *));
161 }
162
163 /// Return a pointer to the data.
164 char *data() { return B.data(); }
165 /// Return a pointer to the block.
166 Block *block() { return &B; }
167
168 private:
169 /// Required metadata - does not actually track pointers.
170 Block B;
171 };
172
173 /// Allocator for globals.
174 PoolAllocTy Allocator;
175
176 /// Global objects.
177 std::vector<Global *> Globals;
178 /// Cached global indices.
179 llvm::DenseMap<const void *, unsigned> GlobalIndices;
180
181 /// Mapping from decls to record metadata.
182 llvm::DenseMap<const RecordDecl *, Record *> Records;
183
184 /// Dummy parameter to generate pointers from.
185 llvm::DenseMap<const ParmVarDecl *, unsigned> DummyParams;
186
187 /// Creates a new descriptor.
188 template <typename... Ts>
189 Descriptor *allocateDescriptor(Ts &&... Args) {
190 return new (Allocator) Descriptor(std::forward<Ts>(Args)...);
3
Calling 'operator new<llvm::MallocAllocator, 4096UL, 4096UL, 128UL>'
191 }
192
193 /// No declaration ID.
194 static constexpr unsigned NoDeclaration = (unsigned)-1;
195 /// Last declaration ID.
196 unsigned LastDeclaration = 0;
197 /// Current declaration ID.
198 unsigned CurrentDeclaration = NoDeclaration;
199
200 /// Starts evaluating a declaration.
201 void startDeclaration(const VarDecl *Decl) {
202 LastDeclaration += 1;
203 CurrentDeclaration = LastDeclaration;
204 }
205
206 /// Ends a global declaration.
207 void endDeclaration() {
208 CurrentDeclaration = NoDeclaration;
209 }
210
211public:
212 /// Dumps the disassembled bytecode to \c llvm::errs().
213 void dump() const;
214 void dump(llvm::raw_ostream &OS) const;
215};
216
217} // namespace interp
218} // namespace clang
219
220#endif

/usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/llvm/include/llvm/Support/Allocator.h

1//===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms
11/// to the LLVM "Allocator" concept and is similar to MallocAllocator, but
12/// objects cannot be deallocated. Their lifetime is tied to the lifetime of the
13/// allocator.
14///
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_SUPPORT_ALLOCATOR_H
18#define LLVM_SUPPORT_ALLOCATOR_H
19
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/Support/Alignment.h"
23#include "llvm/Support/AllocatorBase.h"
24#include "llvm/Support/Compiler.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/MemAlloc.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstdint>
32#include <cstdlib>
33#include <iterator>
34#include <type_traits>
35#include <utility>
36
37namespace llvm {
38
39namespace detail {
40
41// We call out to an external function to actually print the message as the
42// printing code uses Allocator.h in its implementation.
43void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
44 size_t TotalMemory);
45
46} // end namespace detail
47
48/// Allocate memory in an ever growing pool, as if by bump-pointer.
49///
50/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
51/// memory rather than relying on a boundless contiguous heap. However, it has
52/// bump-pointer semantics in that it is a monotonically growing pool of memory
53/// where every allocation is found by merely allocating the next N bytes in
54/// the slab, or the next N bytes in the next slab.
55///
56/// Note that this also has a threshold for forcing allocations above a certain
57/// size into their own slab.
58///
59/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
60/// object, which wraps malloc, to allocate memory, but it can be changed to
61/// use a custom allocator.
62///
63/// The GrowthDelay specifies after how many allocated slabs the allocator
64/// increases the size of the slabs.
65template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
66 size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128>
67class BumpPtrAllocatorImpl
68 : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
69 SizeThreshold, GrowthDelay>>,
70 private AllocatorT {
71public:
72 static_assert(SizeThreshold <= SlabSize,
73 "The SizeThreshold must be at most the SlabSize to ensure "
74 "that objects larger than a slab go into their own memory "
75 "allocation.");
76 static_assert(GrowthDelay > 0,
77 "GrowthDelay must be at least 1 which already increases the"
78 "slab size after each allocated slab.");
79
80 BumpPtrAllocatorImpl() = default;
81
82 template <typename T>
83 BumpPtrAllocatorImpl(T &&Allocator)
84 : AllocatorT(std::forward<T &&>(Allocator)) {}
85
86 // Manually implement a move constructor as we must clear the old allocator's
87 // slabs as a matter of correctness.
88 BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
89 : AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr),
90 End(Old.End), Slabs(std::move(Old.Slabs)),
91 CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
92 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
93 Old.CurPtr = Old.End = nullptr;
94 Old.BytesAllocated = 0;
95 Old.Slabs.clear();
96 Old.CustomSizedSlabs.clear();
97 }
98
99 ~BumpPtrAllocatorImpl() {
100 DeallocateSlabs(Slabs.begin(), Slabs.end());
101 DeallocateCustomSizedSlabs();
102 }
103
104 BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
105 DeallocateSlabs(Slabs.begin(), Slabs.end());
106 DeallocateCustomSizedSlabs();
107
108 CurPtr = RHS.CurPtr;
109 End = RHS.End;
110 BytesAllocated = RHS.BytesAllocated;
111 RedZoneSize = RHS.RedZoneSize;
112 Slabs = std::move(RHS.Slabs);
113 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
114 AllocatorT::operator=(static_cast<AllocatorT &&>(RHS));
115
116 RHS.CurPtr = RHS.End = nullptr;
117 RHS.BytesAllocated = 0;
118 RHS.Slabs.clear();
119 RHS.CustomSizedSlabs.clear();
120 return *this;
121 }
122
123 /// Deallocate all but the current slab and reset the current pointer
124 /// to the beginning of it, freeing all memory allocated so far.
125 void Reset() {
126 // Deallocate all but the first slab, and deallocate all custom-sized slabs.
127 DeallocateCustomSizedSlabs();
128 CustomSizedSlabs.clear();
129
130 if (Slabs.empty())
131 return;
132
133 // Reset the state.
134 BytesAllocated = 0;
135 CurPtr = (char *)Slabs.front();
136 End = CurPtr + SlabSize;
137
138 __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
139 DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
140 Slabs.erase(std::next(Slabs.begin()), Slabs.end());
141 }
142
143 /// Allocate space at the specified alignment.
144 LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void *
145 Allocate(size_t Size, Align Alignment) {
146 // Keep track of how many bytes we've allocated.
147 BytesAllocated += Size;
148
149 size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment);
6
Calling 'offsetToAlignedAddr'
150 assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow")((void)0);
151
152 size_t SizeToAllocate = Size;
153#if LLVM_ADDRESS_SANITIZER_BUILD0
154 // Add trailing bytes as a "red zone" under ASan.
155 SizeToAllocate += RedZoneSize;
156#endif
157
158 // Check if we have enough space.
159 if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
160 char *AlignedPtr = CurPtr + Adjustment;
161 CurPtr = AlignedPtr + SizeToAllocate;
162 // Update the allocation point of this memory block in MemorySanitizer.
163 // Without this, MemorySanitizer messages for values originated from here
164 // will point to the allocation of the entire slab.
165 __msan_allocated_memory(AlignedPtr, Size);
166 // Similarly, tell ASan about this space.
167 __asan_unpoison_memory_region(AlignedPtr, Size);
168 return AlignedPtr;
169 }
170
171 // If Size is really big, allocate a separate slab for it.
172 size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
173 if (PaddedSize > SizeThreshold) {
174 void *NewSlab =
175 AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t));
176 // We own the new slab and don't want anyone reading anyting other than
177 // pieces returned from this method. So poison the whole slab.
178 __asan_poison_memory_region(NewSlab, PaddedSize);
179 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
180
181 uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
182 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize)((void)0);
183 char *AlignedPtr = (char*)AlignedAddr;
184 __msan_allocated_memory(AlignedPtr, Size);
185 __asan_unpoison_memory_region(AlignedPtr, Size);
186 return AlignedPtr;
187 }
188
189 // Otherwise, start a new slab and try again.
190 StartNewSlab();
191 uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
192 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&((void)0)
193 "Unable to allocate memory!")((void)0);
194 char *AlignedPtr = (char*)AlignedAddr;
195 CurPtr = AlignedPtr + SizeToAllocate;
196 __msan_allocated_memory(AlignedPtr, Size);
197 __asan_unpoison_memory_region(AlignedPtr, Size);
198 return AlignedPtr;
199 }
200
201 inline LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void *
202 Allocate(size_t Size, size_t Alignment) {
203 assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.")((void)0);
204 return Allocate(Size, Align(Alignment));
5
Calling 'BumpPtrAllocatorImpl::Allocate'
205 }
206
207 // Pull in base class overloads.
208 using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
209
210 // Bump pointer allocators are expected to never free their storage; and
211 // clients expect pointers to remain valid for non-dereferencing uses even
212 // after deallocation.
213 void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) {
214 __asan_poison_memory_region(Ptr, Size);
215 }
216
217 // Pull in base class overloads.
218 using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
219
220 size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
221
222 /// \return An index uniquely and reproducibly identifying
223 /// an input pointer \p Ptr in the given allocator.
224 /// The returned value is negative iff the object is inside a custom-size
225 /// slab.
226 /// Returns an empty optional if the pointer is not found in the allocator.
227 llvm::Optional<int64_t> identifyObject(const void *Ptr) {
228 const char *P = static_cast<const char *>(Ptr);
229 int64_t InSlabIdx = 0;
230 for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) {
231 const char *S = static_cast<const char *>(Slabs[Idx]);
232 if (P >= S && P < S + computeSlabSize(Idx))
233 return InSlabIdx + static_cast<int64_t>(P - S);
234 InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx));
235 }
236
237 // Use negative index to denote custom sized slabs.
238 int64_t InCustomSizedSlabIdx = -1;
239 for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) {
240 const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first);
241 size_t Size = CustomSizedSlabs[Idx].second;
242 if (P >= S && P < S + Size)
243 return InCustomSizedSlabIdx - static_cast<int64_t>(P - S);
244 InCustomSizedSlabIdx -= static_cast<int64_t>(Size);
245 }
246 return None;
247 }
248
249 /// A wrapper around identifyObject that additionally asserts that
250 /// the object is indeed within the allocator.
251 /// \return An index uniquely and reproducibly identifying
252 /// an input pointer \p Ptr in the given allocator.
253 int64_t identifyKnownObject(const void *Ptr) {
254 Optional<int64_t> Out = identifyObject(Ptr);
255 assert(Out && "Wrong allocator used")((void)0);
256 return *Out;
257 }
258
259 /// A wrapper around identifyKnownObject. Accepts type information
260 /// about the object and produces a smaller identifier by relying on
261 /// the alignment information. Note that sub-classes may have different
262 /// alignment, so the most base class should be passed as template parameter
263 /// in order to obtain correct results. For that reason automatic template
264 /// parameter deduction is disabled.
265 /// \return An index uniquely and reproducibly identifying
266 /// an input pointer \p Ptr in the given allocator. This identifier is
267 /// different from the ones produced by identifyObject and
268 /// identifyAlignedObject.
269 template <typename T>
270 int64_t identifyKnownAlignedObject(const void *Ptr) {
271 int64_t Out = identifyKnownObject(Ptr);
272 assert(Out % alignof(T) == 0 && "Wrong alignment information")((void)0);
273 return Out / alignof(T);
274 }
275
276 size_t getTotalMemory() const {
277 size_t TotalMemory = 0;
278 for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
279 TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
280 for (auto &PtrAndSize : CustomSizedSlabs)
281 TotalMemory += PtrAndSize.second;
282 return TotalMemory;
283 }
284
285 size_t getBytesAllocated() const { return BytesAllocated; }
286
287 void setRedZoneSize(size_t NewSize) {
288 RedZoneSize = NewSize;
289 }
290
291 void PrintStats() const {
292 detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
293 getTotalMemory());
294 }
295
296private:
297 /// The current pointer into the current slab.
298 ///
299 /// This points to the next free byte in the slab.
300 char *CurPtr = nullptr;
301
302 /// The end of the current slab.
303 char *End = nullptr;
304
305 /// The slabs allocated so far.
306 SmallVector<void *, 4> Slabs;
307
308 /// Custom-sized slabs allocated for too-large allocation requests.
309 SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
310
311 /// How many bytes we've allocated.
312 ///
313 /// Used so that we can compute how much space was wasted.
314 size_t BytesAllocated = 0;
315
316 /// The number of bytes to put between allocations when running under
317 /// a sanitizer.
318 size_t RedZoneSize = 1;
319
320 static size_t computeSlabSize(unsigned SlabIdx) {
321 // Scale the actual allocated slab size based on the number of slabs
322 // allocated. Every GrowthDelay slabs allocated, we double
323 // the allocated size to reduce allocation frequency, but saturate at
324 // multiplying the slab size by 2^30.
325 return SlabSize *
326 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
327 }
328
329 /// Allocate a new slab and move the bump pointers over into the new
330 /// slab, modifying CurPtr and End.
331 void StartNewSlab() {
332 size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
333
334 void *NewSlab =
335 AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t));
336 // We own the new slab and don't want anyone reading anything other than
337 // pieces returned from this method. So poison the whole slab.
338 __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
339
340 Slabs.push_back(NewSlab);
341 CurPtr = (char *)(NewSlab);
342 End = ((char *)NewSlab) + AllocatedSlabSize;
343 }
344
345 /// Deallocate a sequence of slabs.
346 void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
347 SmallVectorImpl<void *>::iterator E) {
348 for (; I != E; ++I) {
349 size_t AllocatedSlabSize =
350 computeSlabSize(std::distance(Slabs.begin(), I));
351 AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
352 }
353 }
354
355 /// Deallocate all memory for custom sized slabs.
356 void DeallocateCustomSizedSlabs() {
357 for (auto &PtrAndSize : CustomSizedSlabs) {
358 void *Ptr = PtrAndSize.first;
359 size_t Size = PtrAndSize.second;
360 AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t));
361 }
362 }
363
364 template <typename T> friend class SpecificBumpPtrAllocator;
365};
366
367/// The standard BumpPtrAllocator which just uses the default template
368/// parameters.
369typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
370
371/// A BumpPtrAllocator that allows only elements of a specific type to be
372/// allocated.
373///
374/// This allows calling the destructor in DestroyAll() and when the allocator is
375/// destroyed.
376template <typename T> class SpecificBumpPtrAllocator {
377 BumpPtrAllocator Allocator;
378
379public:
380 SpecificBumpPtrAllocator() {
381 // Because SpecificBumpPtrAllocator walks the memory to call destructors,
382 // it can't have red zones between allocations.
383 Allocator.setRedZoneSize(0);
384 }
385 SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
386 : Allocator(std::move(Old.Allocator)) {}
387 ~SpecificBumpPtrAllocator() { DestroyAll(); }
388
389 SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
390 Allocator = std::move(RHS.Allocator);
391 return *this;
392 }
393
394 /// Call the destructor of each allocated object and deallocate all but the
395 /// current slab and reset the current pointer to the beginning of it, freeing
396 /// all memory allocated so far.
397 void DestroyAll() {
398 auto DestroyElements = [](char *Begin, char *End) {
399 assert(Begin == (char *)alignAddr(Begin, Align::Of<T>()))((void)0);
400 for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
401 reinterpret_cast<T *>(Ptr)->~T();
402 };
403
404 for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
405 ++I) {
406 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
407 std::distance(Allocator.Slabs.begin(), I));
408 char *Begin = (char *)alignAddr(*I, Align::Of<T>());
409 char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
410 : (char *)*I + AllocatedSlabSize;
411
412 DestroyElements(Begin, End);
413 }
414
415 for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
416 void *Ptr = PtrAndSize.first;
417 size_t Size = PtrAndSize.second;
418 DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()),
419 (char *)Ptr + Size);
420 }
421
422 Allocator.Reset();
423 }
424
425 /// Allocate space for an array of objects without constructing them.
426 T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
427};
428
429} // end namespace llvm
430
431template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
432 size_t GrowthDelay>
433void *
434operator new(size_t Size,
435 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold,
436 GrowthDelay> &Allocator) {
437 return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
4
Calling 'BumpPtrAllocatorImpl::Allocate'
438 alignof(std::max_align_t)));
439}
440
441template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
442 size_t GrowthDelay>
443void operator delete(void *,
444 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
445 SizeThreshold, GrowthDelay> &) {
446}
447
448#endif // LLVM_SUPPORT_ALLOCATOR_H

/usr/src/gnu/usr.bin/clang/libclangAST/../../../llvm/llvm/include/llvm/Support/Alignment.h

1//===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains types to represent alignments.
10// They are instrumented to guarantee some invariants are preserved and prevent
11// invalid manipulations.
12//
13// - Align represents an alignment in bytes, it is always set and always a valid
14// power of two, its minimum value is 1 which means no alignment requirements.
15//
16// - MaybeAlign is an optional type, it may be undefined or set. When it's set
17// you can get the underlying Align type by using the getValue() method.
18//
19//===----------------------------------------------------------------------===//
20
21#ifndef LLVM_SUPPORT_ALIGNMENT_H_
22#define LLVM_SUPPORT_ALIGNMENT_H_
23
24#include "llvm/ADT/Optional.h"
25#include "llvm/Support/MathExtras.h"
26#include <cassert>
27#ifndef NDEBUG1
28#include <string>
29#endif // NDEBUG
30
31namespace llvm {
32
33#define ALIGN_CHECK_ISPOSITIVE(decl) \
34 assert(decl > 0 && (#decl " should be defined"))((void)0)
35
36/// This struct is a compact representation of a valid (non-zero power of two)
37/// alignment.
38/// It is suitable for use as static global constants.
39struct Align {
40private:
41 uint8_t ShiftValue = 0; /// The log2 of the required alignment.
42 /// ShiftValue is less than 64 by construction.
43
44 friend struct MaybeAlign;
45 friend unsigned Log2(Align);
46 friend bool operator==(Align Lhs, Align Rhs);
47 friend bool operator!=(Align Lhs, Align Rhs);
48 friend bool operator<=(Align Lhs, Align Rhs);
49 friend bool operator>=(Align Lhs, Align Rhs);
50 friend bool operator<(Align Lhs, Align Rhs);
51 friend bool operator>(Align Lhs, Align Rhs);
52 friend unsigned encode(struct MaybeAlign A);
53 friend struct MaybeAlign decodeMaybeAlign(unsigned Value);
54
55 /// A trivial type to allow construction of constexpr Align.
56 /// This is currently needed to workaround a bug in GCC 5.3 which prevents
57 /// definition of constexpr assign operators.
58 /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic
59 /// FIXME: Remove this, make all assign operators constexpr and introduce user
60 /// defined literals when we don't have to support GCC 5.3 anymore.
61 /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain
62 struct LogValue {
63 uint8_t Log;
64 };
65
66public:
67 /// Default is byte-aligned.
68 constexpr Align() = default;
69 /// Do not perform checks in case of copy/move construct/assign, because the
70 /// checks have been performed when building `Other`.
71 constexpr Align(const Align &Other) = default;
72 constexpr Align(Align &&Other) = default;
73 Align &operator=(const Align &Other) = default;
74 Align &operator=(Align &&Other) = default;
75
76 explicit Align(uint64_t Value) {
77 assert(Value > 0 && "Value must not be 0")((void)0);
78 assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2")((void)0);
79 ShiftValue = Log2_64(Value);
80 assert(ShiftValue < 64 && "Broken invariant")((void)0);
81 }
82
83 /// This is a hole in the type system and should not be abused.
84 /// Needed to interact with C for instance.
85 uint64_t value() const { return uint64_t(1) << ShiftValue; }
11
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'
86
87 /// Allow constructions of constexpr Align.
88 template <size_t kValue> constexpr static LogValue Constant() {
89 return LogValue{static_cast<uint8_t>(CTLog2<kValue>())};
90 }
91
92 /// Allow constructions of constexpr Align from types.
93 /// Compile time equivalent to Align(alignof(T)).
94 template <typename T> constexpr static LogValue Of() {
95 return Constant<std::alignment_of<T>::value>();
96 }
97
98 /// Constexpr constructor from LogValue type.
99 constexpr Align(LogValue CA) : ShiftValue(CA.Log) {}
100};
101
102/// Treats the value 0 as a 1, so Align is always at least 1.
103inline Align assumeAligned(uint64_t Value) {
104 return Value ? Align(Value) : Align();
105}
106
107/// This struct is a compact representation of a valid (power of two) or
108/// undefined (0) alignment.
109struct MaybeAlign : public llvm::Optional<Align> {
110private:
111 using UP = llvm::Optional<Align>;
112
113public:
114 /// Default is undefined.
115 MaybeAlign() = default;
116 /// Do not perform checks in case of copy/move construct/assign, because the
117 /// checks have been performed when building `Other`.
118 MaybeAlign(const MaybeAlign &Other) = default;
119 MaybeAlign &operator=(const MaybeAlign &Other) = default;
120 MaybeAlign(MaybeAlign &&Other) = default;
121 MaybeAlign &operator=(MaybeAlign &&Other) = default;
122
123 /// Use llvm::Optional<Align> constructor.
124 using UP::UP;
125
126 explicit MaybeAlign(uint64_t Value) {
127 assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&((void)0)
128 "Alignment is neither 0 nor a power of 2")((void)0);
129 if (Value)
130 emplace(Value);
131 }
132
133 /// For convenience, returns a valid alignment or 1 if undefined.
134 Align valueOrOne() const { return hasValue() ? getValue() : Align(); }
135};
136
137/// Checks that SizeInBytes is a multiple of the alignment.
138inline bool isAligned(Align Lhs, uint64_t SizeInBytes) {
139 return SizeInBytes % Lhs.value() == 0;
140}
141
142/// Checks that Addr is a multiple of the alignment.
143inline bool isAddrAligned(Align Lhs, const void *Addr) {
144 return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr));
145}
146
147/// Returns a multiple of A needed to store `Size` bytes.
148inline uint64_t alignTo(uint64_t Size, Align A) {
149 const uint64_t Value = A.value();
10
Calling 'Align::value'
150 // The following line is equivalent to `(Size + Value - 1) / Value * Value`.
151
152 // The division followed by a multiplication can be thought of as a right
153 // shift followed by a left shift which zeros out the extra bits produced in
154 // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out
155 // are just zero.
156
157 // Most compilers can generate this code but the pattern may be missed when
158 // multiple functions gets inlined.
159 return (Size + Value - 1) & ~(Value - 1U);
160}
161
162/// If non-zero \p Skew is specified, the return value will be a minimal integer
163/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
164/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
165/// Skew mod \p A'.
166///
167/// Examples:
168/// \code
169/// alignTo(5, Align(8), 7) = 7
170/// alignTo(17, Align(8), 1) = 17
171/// alignTo(~0LL, Align(8), 3) = 3
172/// \endcode
173inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) {
174 const uint64_t Value = A.value();
175 Skew %= Value;
176 return ((Size + Value - 1 - Skew) & ~(Value - 1U)) + Skew;
177}
178
179/// Returns a multiple of A needed to store `Size` bytes.
180/// Returns `Size` if current alignment is undefined.
181inline uint64_t alignTo(uint64_t Size, MaybeAlign A) {
182 return A ? alignTo(Size, A.getValue()) : Size;
183}
184
185/// Aligns `Addr` to `Alignment` bytes, rounding up.
186inline uintptr_t alignAddr(const void *Addr, Align Alignment) {
187 uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr);
188 assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=((void)0)
189 ArithAddr &&((void)0)
190 "Overflow")((void)0);
191 return alignTo(ArithAddr, Alignment);
192}
193
194/// Returns the offset to the next integer (mod 2**64) that is greater than
195/// or equal to \p Value and is a multiple of \p Align.
196inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
197 return alignTo(Value, Alignment) - Value;
8
The value 255 is assigned to 'A.ShiftValue'
9
Calling 'alignTo'
198}
199
200/// Returns the necessary adjustment for aligning `Addr` to `Alignment`
201/// bytes, rounding up.
202inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) {
203 return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment);
7
Calling 'offsetToAlignment'
204}
205
206/// Returns the log2 of the alignment.
207inline unsigned Log2(Align A) { return A.ShiftValue; }
208
209/// Returns the alignment that satisfies both alignments.
210/// Same semantic as MinAlign.
211inline Align commonAlignment(Align A, Align B) { return std::min(A, B); }
212
213/// Returns the alignment that satisfies both alignments.
214/// Same semantic as MinAlign.
215inline Align commonAlignment(Align A, uint64_t Offset) {
216 return Align(MinAlign(A.value(), Offset));
217}
218
219/// Returns the alignment that satisfies both alignments.
220/// Same semantic as MinAlign.
221inline MaybeAlign commonAlignment(MaybeAlign A, MaybeAlign B) {
222 return A && B ? commonAlignment(*A, *B) : A ? A : B;
223}
224
225/// Returns the alignment that satisfies both alignments.
226/// Same semantic as MinAlign.
227inline MaybeAlign commonAlignment(MaybeAlign A, uint64_t Offset) {
228 return MaybeAlign(MinAlign((*A).value(), Offset));
229}
230
231/// Returns a representation of the alignment that encodes undefined as 0.
232inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; }
233
234/// Dual operation of the encode function above.
235inline MaybeAlign decodeMaybeAlign(unsigned Value) {
236 if (Value == 0)
237 return MaybeAlign();
238 Align Out;
239 Out.ShiftValue = Value - 1;
240 return Out;
241}
242
243/// Returns a representation of the alignment, the encoded value is positive by
244/// definition.
245inline unsigned encode(Align A) { return encode(MaybeAlign(A)); }
246
247/// Comparisons between Align and scalars. Rhs must be positive.
248inline bool operator==(Align Lhs, uint64_t Rhs) {
249 ALIGN_CHECK_ISPOSITIVE(Rhs);
250 return Lhs.value() == Rhs;
251}
252inline bool operator!=(Align Lhs, uint64_t Rhs) {
253 ALIGN_CHECK_ISPOSITIVE(Rhs);
254 return Lhs.value() != Rhs;
255}
256inline bool operator<=(Align Lhs, uint64_t Rhs) {
257 ALIGN_CHECK_ISPOSITIVE(Rhs);
258 return Lhs.value() <= Rhs;
259}
260inline bool operator>=(Align Lhs, uint64_t Rhs) {
261 ALIGN_CHECK_ISPOSITIVE(Rhs);
262 return Lhs.value() >= Rhs;
263}
264inline bool operator<(Align Lhs, uint64_t Rhs) {
265 ALIGN_CHECK_ISPOSITIVE(Rhs);
266 return Lhs.value() < Rhs;
267}
268inline bool operator>(Align Lhs, uint64_t Rhs) {
269 ALIGN_CHECK_ISPOSITIVE(Rhs);
270 return Lhs.value() > Rhs;
271}
272
273/// Comparisons between MaybeAlign and scalars.
274inline bool operator==(MaybeAlign Lhs, uint64_t Rhs) {
275 return Lhs ? (*Lhs).value() == Rhs : Rhs == 0;
276}
277inline bool operator!=(MaybeAlign Lhs, uint64_t Rhs) {
278 return Lhs ? (*Lhs).value() != Rhs : Rhs != 0;
279}
280
281/// Comparisons operators between Align.
282inline bool operator==(Align Lhs, Align Rhs) {
283 return Lhs.ShiftValue == Rhs.ShiftValue;
284}
285inline bool operator!=(Align Lhs, Align Rhs) {
286 return Lhs.ShiftValue != Rhs.ShiftValue;
287}
288inline bool operator<=(Align Lhs, Align Rhs) {
289 return Lhs.ShiftValue <= Rhs.ShiftValue;
290}
291inline bool operator>=(Align Lhs, Align Rhs) {
292 return Lhs.ShiftValue >= Rhs.ShiftValue;
293}
294inline bool operator<(Align Lhs, Align Rhs) {
295 return Lhs.ShiftValue < Rhs.ShiftValue;
296}
297inline bool operator>(Align Lhs, Align Rhs) {
298 return Lhs.ShiftValue > Rhs.ShiftValue;
299}
300
301// Don't allow relational comparisons with MaybeAlign.
302bool operator<=(Align Lhs, MaybeAlign Rhs) = delete;
303bool operator>=(Align Lhs, MaybeAlign Rhs) = delete;
304bool operator<(Align Lhs, MaybeAlign Rhs) = delete;
305bool operator>(Align Lhs, MaybeAlign Rhs) = delete;
306
307bool operator<=(MaybeAlign Lhs, Align Rhs) = delete;
308bool operator>=(MaybeAlign Lhs, Align Rhs) = delete;
309bool operator<(MaybeAlign Lhs, Align Rhs) = delete;
310bool operator>(MaybeAlign Lhs, Align Rhs) = delete;
311
312bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
313bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
314bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
315bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
316
317inline Align operator*(Align Lhs, uint64_t Rhs) {
318 assert(Rhs > 0 && "Rhs must be positive")((void)0);
319 return Align(Lhs.value() * Rhs);
320}
321
322inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) {
323 assert(Rhs > 0 && "Rhs must be positive")((void)0);
324 return Lhs ? Lhs.getValue() * Rhs : MaybeAlign();
325}
326
327inline Align operator/(Align Lhs, uint64_t Divisor) {
328 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
329 "Divisor must be positive and a power of 2")((void)0);
330 assert(Lhs != 1 && "Can't halve byte alignment")((void)0);
331 return Align(Lhs.value() / Divisor);
332}
333
334inline MaybeAlign operator/(MaybeAlign Lhs, uint64_t Divisor) {
335 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
336 "Divisor must be positive and a power of 2")((void)0);
337 return Lhs ? Lhs.getValue() / Divisor : MaybeAlign();
338}
339
340inline Align max(MaybeAlign Lhs, Align Rhs) {
341 return Lhs && *Lhs > Rhs ? *Lhs : Rhs;
342}
343
344inline Align max(Align Lhs, MaybeAlign Rhs) {
345 return Rhs && *Rhs > Lhs ? *Rhs : Lhs;
346}
347
348#ifndef NDEBUG1
349// For usage in LLVM_DEBUG macros.
350inline std::string DebugStr(const Align &A) {
351 return std::to_string(A.value());
352}
353// For usage in LLVM_DEBUG macros.
354inline std::string DebugStr(const MaybeAlign &MA) {
355 if (MA)
356 return std::to_string(MA->value());
357 return "None";
358}
359#endif // NDEBUG
360
361#undef ALIGN_CHECK_ISPOSITIVE
362
363} // namespace llvm
364
365#endif // LLVM_SUPPORT_ALIGNMENT_H_