File: | src/gnu/usr.bin/clang/libclangAST/../../../llvm/llvm/include/llvm/Support/Alignment.h |
Warning: | line 85, column 47 The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===--- RawCommentList.cpp - Processing raw comments -----------*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | ||||
9 | #include "clang/AST/RawCommentList.h" | |||
10 | #include "clang/AST/ASTContext.h" | |||
11 | #include "clang/AST/Comment.h" | |||
12 | #include "clang/AST/CommentBriefParser.h" | |||
13 | #include "clang/AST/CommentCommandTraits.h" | |||
14 | #include "clang/AST/CommentLexer.h" | |||
15 | #include "clang/AST/CommentParser.h" | |||
16 | #include "clang/AST/CommentSema.h" | |||
17 | #include "clang/Basic/CharInfo.h" | |||
18 | #include "llvm/ADT/STLExtras.h" | |||
19 | #include "llvm/Support/Allocator.h" | |||
20 | ||||
21 | using namespace clang; | |||
22 | ||||
23 | namespace { | |||
24 | /// Get comment kind and bool describing if it is a trailing comment. | |||
25 | std::pair<RawComment::CommentKind, bool> getCommentKind(StringRef Comment, | |||
26 | bool ParseAllComments) { | |||
27 | const size_t MinCommentLength = ParseAllComments ? 2 : 3; | |||
28 | if ((Comment.size() < MinCommentLength) || Comment[0] != '/') | |||
29 | return std::make_pair(RawComment::RCK_Invalid, false); | |||
30 | ||||
31 | RawComment::CommentKind K; | |||
32 | if (Comment[1] == '/') { | |||
33 | if (Comment.size() < 3) | |||
34 | return std::make_pair(RawComment::RCK_OrdinaryBCPL, false); | |||
35 | ||||
36 | if (Comment[2] == '/') | |||
37 | K = RawComment::RCK_BCPLSlash; | |||
38 | else if (Comment[2] == '!') | |||
39 | K = RawComment::RCK_BCPLExcl; | |||
40 | else | |||
41 | return std::make_pair(RawComment::RCK_OrdinaryBCPL, false); | |||
42 | } else { | |||
43 | assert(Comment.size() >= 4)((void)0); | |||
44 | ||||
45 | // Comment lexer does not understand escapes in comment markers, so pretend | |||
46 | // that this is not a comment. | |||
47 | if (Comment[1] != '*' || | |||
48 | Comment[Comment.size() - 2] != '*' || | |||
49 | Comment[Comment.size() - 1] != '/') | |||
50 | return std::make_pair(RawComment::RCK_Invalid, false); | |||
51 | ||||
52 | if (Comment[2] == '*') | |||
53 | K = RawComment::RCK_JavaDoc; | |||
54 | else if (Comment[2] == '!') | |||
55 | K = RawComment::RCK_Qt; | |||
56 | else | |||
57 | return std::make_pair(RawComment::RCK_OrdinaryC, false); | |||
58 | } | |||
59 | const bool TrailingComment = (Comment.size() > 3) && (Comment[3] == '<'); | |||
60 | return std::make_pair(K, TrailingComment); | |||
61 | } | |||
62 | ||||
63 | bool mergedCommentIsTrailingComment(StringRef Comment) { | |||
64 | return (Comment.size() > 3) && (Comment[3] == '<'); | |||
65 | } | |||
66 | ||||
67 | /// Returns true if R1 and R2 both have valid locations that start on the same | |||
68 | /// column. | |||
69 | bool commentsStartOnSameColumn(const SourceManager &SM, const RawComment &R1, | |||
70 | const RawComment &R2) { | |||
71 | SourceLocation L1 = R1.getBeginLoc(); | |||
72 | SourceLocation L2 = R2.getBeginLoc(); | |||
73 | bool Invalid = false; | |||
74 | unsigned C1 = SM.getPresumedColumnNumber(L1, &Invalid); | |||
75 | if (!Invalid) { | |||
76 | unsigned C2 = SM.getPresumedColumnNumber(L2, &Invalid); | |||
77 | return !Invalid && (C1 == C2); | |||
78 | } | |||
79 | return false; | |||
80 | } | |||
81 | } // unnamed namespace | |||
82 | ||||
83 | /// Determines whether there is only whitespace in `Buffer` between `P` | |||
84 | /// and the previous line. | |||
85 | /// \param Buffer The buffer to search in. | |||
86 | /// \param P The offset from the beginning of `Buffer` to start from. | |||
87 | /// \return true if all of the characters in `Buffer` ranging from the closest | |||
88 | /// line-ending character before `P` (or the beginning of `Buffer`) to `P - 1` | |||
89 | /// are whitespace. | |||
90 | static bool onlyWhitespaceOnLineBefore(const char *Buffer, unsigned P) { | |||
91 | // Search backwards until we see linefeed or carriage return. | |||
92 | for (unsigned I = P; I != 0; --I) { | |||
93 | char C = Buffer[I - 1]; | |||
94 | if (isVerticalWhitespace(C)) | |||
95 | return true; | |||
96 | if (!isHorizontalWhitespace(C)) | |||
97 | return false; | |||
98 | } | |||
99 | // We hit the beginning of the buffer. | |||
100 | return true; | |||
101 | } | |||
102 | ||||
103 | /// Returns whether `K` is an ordinary comment kind. | |||
104 | static bool isOrdinaryKind(RawComment::CommentKind K) { | |||
105 | return (K == RawComment::RCK_OrdinaryBCPL) || | |||
106 | (K == RawComment::RCK_OrdinaryC); | |||
107 | } | |||
108 | ||||
109 | RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR, | |||
110 | const CommentOptions &CommentOpts, bool Merged) : | |||
111 | Range(SR), RawTextValid(false), BriefTextValid(false), | |||
112 | IsAttached(false), IsTrailingComment(false), | |||
113 | IsAlmostTrailingComment(false) { | |||
114 | // Extract raw comment text, if possible. | |||
115 | if (SR.getBegin() == SR.getEnd() || getRawText(SourceMgr).empty()) { | |||
116 | Kind = RCK_Invalid; | |||
117 | return; | |||
118 | } | |||
119 | ||||
120 | // Guess comment kind. | |||
121 | std::pair<CommentKind, bool> K = | |||
122 | getCommentKind(RawText, CommentOpts.ParseAllComments); | |||
123 | ||||
124 | // Guess whether an ordinary comment is trailing. | |||
125 | if (CommentOpts.ParseAllComments && isOrdinaryKind(K.first)) { | |||
126 | FileID BeginFileID; | |||
127 | unsigned BeginOffset; | |||
128 | std::tie(BeginFileID, BeginOffset) = | |||
129 | SourceMgr.getDecomposedLoc(Range.getBegin()); | |||
130 | if (BeginOffset != 0) { | |||
131 | bool Invalid = false; | |||
132 | const char *Buffer = | |||
133 | SourceMgr.getBufferData(BeginFileID, &Invalid).data(); | |||
134 | IsTrailingComment |= | |||
135 | (!Invalid && !onlyWhitespaceOnLineBefore(Buffer, BeginOffset)); | |||
136 | } | |||
137 | } | |||
138 | ||||
139 | if (!Merged) { | |||
140 | Kind = K.first; | |||
141 | IsTrailingComment |= K.second; | |||
142 | ||||
143 | IsAlmostTrailingComment = RawText.startswith("//<") || | |||
144 | RawText.startswith("/*<"); | |||
145 | } else { | |||
146 | Kind = RCK_Merged; | |||
147 | IsTrailingComment = | |||
148 | IsTrailingComment || mergedCommentIsTrailingComment(RawText); | |||
149 | } | |||
150 | } | |||
151 | ||||
152 | StringRef RawComment::getRawTextSlow(const SourceManager &SourceMgr) const { | |||
153 | FileID BeginFileID; | |||
154 | FileID EndFileID; | |||
155 | unsigned BeginOffset; | |||
156 | unsigned EndOffset; | |||
157 | ||||
158 | std::tie(BeginFileID, BeginOffset) = | |||
159 | SourceMgr.getDecomposedLoc(Range.getBegin()); | |||
160 | std::tie(EndFileID, EndOffset) = SourceMgr.getDecomposedLoc(Range.getEnd()); | |||
161 | ||||
162 | const unsigned Length = EndOffset - BeginOffset; | |||
163 | if (Length < 2) | |||
164 | return StringRef(); | |||
165 | ||||
166 | // The comment can't begin in one file and end in another. | |||
167 | assert(BeginFileID == EndFileID)((void)0); | |||
168 | ||||
169 | bool Invalid = false; | |||
170 | const char *BufferStart = SourceMgr.getBufferData(BeginFileID, | |||
171 | &Invalid).data(); | |||
172 | if (Invalid) | |||
173 | return StringRef(); | |||
174 | ||||
175 | return StringRef(BufferStart + BeginOffset, Length); | |||
176 | } | |||
177 | ||||
178 | const char *RawComment::extractBriefText(const ASTContext &Context) const { | |||
179 | // Lazily initialize RawText using the accessor before using it. | |||
180 | (void)getRawText(Context.getSourceManager()); | |||
181 | ||||
182 | // Since we will be copying the resulting text, all allocations made during | |||
183 | // parsing are garbage after resulting string is formed. Thus we can use | |||
184 | // a separate allocator for all temporary stuff. | |||
185 | llvm::BumpPtrAllocator Allocator; | |||
186 | ||||
187 | comments::Lexer L(Allocator, Context.getDiagnostics(), | |||
188 | Context.getCommentCommandTraits(), | |||
189 | Range.getBegin(), | |||
190 | RawText.begin(), RawText.end()); | |||
191 | comments::BriefParser P(L, Context.getCommentCommandTraits()); | |||
192 | ||||
193 | const std::string Result = P.Parse(); | |||
194 | const unsigned BriefTextLength = Result.size(); | |||
195 | char *BriefTextPtr = new (Context) char[BriefTextLength + 1]; | |||
196 | memcpy(BriefTextPtr, Result.c_str(), BriefTextLength + 1); | |||
197 | BriefText = BriefTextPtr; | |||
198 | BriefTextValid = true; | |||
199 | ||||
200 | return BriefTextPtr; | |||
201 | } | |||
202 | ||||
203 | comments::FullComment *RawComment::parse(const ASTContext &Context, | |||
204 | const Preprocessor *PP, | |||
205 | const Decl *D) const { | |||
206 | // Lazily initialize RawText using the accessor before using it. | |||
207 | (void)getRawText(Context.getSourceManager()); | |||
208 | ||||
209 | comments::Lexer L(Context.getAllocator(), Context.getDiagnostics(), | |||
210 | Context.getCommentCommandTraits(), | |||
211 | getSourceRange().getBegin(), | |||
212 | RawText.begin(), RawText.end()); | |||
213 | comments::Sema S(Context.getAllocator(), Context.getSourceManager(), | |||
214 | Context.getDiagnostics(), | |||
215 | Context.getCommentCommandTraits(), | |||
216 | PP); | |||
217 | S.setDecl(D); | |||
218 | comments::Parser P(L, S, Context.getAllocator(), Context.getSourceManager(), | |||
219 | Context.getDiagnostics(), | |||
220 | Context.getCommentCommandTraits()); | |||
221 | ||||
222 | return P.parseFullComment(); | |||
223 | } | |||
224 | ||||
225 | static bool onlyWhitespaceBetween(SourceManager &SM, | |||
226 | SourceLocation Loc1, SourceLocation Loc2, | |||
227 | unsigned MaxNewlinesAllowed) { | |||
228 | std::pair<FileID, unsigned> Loc1Info = SM.getDecomposedLoc(Loc1); | |||
229 | std::pair<FileID, unsigned> Loc2Info = SM.getDecomposedLoc(Loc2); | |||
230 | ||||
231 | // Question does not make sense if locations are in different files. | |||
232 | if (Loc1Info.first != Loc2Info.first) | |||
233 | return false; | |||
234 | ||||
235 | bool Invalid = false; | |||
236 | const char *Buffer = SM.getBufferData(Loc1Info.first, &Invalid).data(); | |||
237 | if (Invalid) | |||
238 | return false; | |||
239 | ||||
240 | unsigned NumNewlines = 0; | |||
241 | assert(Loc1Info.second <= Loc2Info.second && "Loc1 after Loc2!")((void)0); | |||
242 | // Look for non-whitespace characters and remember any newlines seen. | |||
243 | for (unsigned I = Loc1Info.second; I != Loc2Info.second; ++I) { | |||
244 | switch (Buffer[I]) { | |||
245 | default: | |||
246 | return false; | |||
247 | case ' ': | |||
248 | case '\t': | |||
249 | case '\f': | |||
250 | case '\v': | |||
251 | break; | |||
252 | case '\r': | |||
253 | case '\n': | |||
254 | ++NumNewlines; | |||
255 | ||||
256 | // Check if we have found more than the maximum allowed number of | |||
257 | // newlines. | |||
258 | if (NumNewlines > MaxNewlinesAllowed) | |||
259 | return false; | |||
260 | ||||
261 | // Collapse \r\n and \n\r into a single newline. | |||
262 | if (I + 1 != Loc2Info.second && | |||
263 | (Buffer[I + 1] == '\n' || Buffer[I + 1] == '\r') && | |||
264 | Buffer[I] != Buffer[I + 1]) | |||
265 | ++I; | |||
266 | break; | |||
267 | } | |||
268 | } | |||
269 | ||||
270 | return true; | |||
271 | } | |||
272 | ||||
273 | void RawCommentList::addComment(const RawComment &RC, | |||
274 | const CommentOptions &CommentOpts, | |||
275 | llvm::BumpPtrAllocator &Allocator) { | |||
276 | if (RC.isInvalid()) | |||
| ||||
277 | return; | |||
278 | ||||
279 | // Ordinary comments are not interesting for us. | |||
280 | if (RC.isOrdinary() && !CommentOpts.ParseAllComments) | |||
281 | return; | |||
282 | ||||
283 | std::pair<FileID, unsigned> Loc = | |||
284 | SourceMgr.getDecomposedLoc(RC.getBeginLoc()); | |||
285 | ||||
286 | const FileID CommentFile = Loc.first; | |||
287 | const unsigned CommentOffset = Loc.second; | |||
288 | ||||
289 | // If this is the first Doxygen comment, save it (because there isn't | |||
290 | // anything to merge it with). | |||
291 | if (OrderedComments[CommentFile].empty()) { | |||
292 | OrderedComments[CommentFile][CommentOffset] = | |||
293 | new (Allocator) RawComment(RC); | |||
294 | return; | |||
295 | } | |||
296 | ||||
297 | const RawComment &C1 = *OrderedComments[CommentFile].rbegin()->second; | |||
298 | const RawComment &C2 = RC; | |||
299 | ||||
300 | // Merge comments only if there is only whitespace between them. | |||
301 | // Can't merge trailing and non-trailing comments unless the second is | |||
302 | // non-trailing ordinary in the same column, as in the case: | |||
303 | // int x; // documents x | |||
304 | // // more text | |||
305 | // versus: | |||
306 | // int x; // documents x | |||
307 | // int y; // documents y | |||
308 | // or: | |||
309 | // int x; // documents x | |||
310 | // // documents y | |||
311 | // int y; | |||
312 | // Merge comments if they are on same or consecutive lines. | |||
313 | if ((C1.isTrailingComment() == C2.isTrailingComment() || | |||
314 | (C1.isTrailingComment() && !C2.isTrailingComment() && | |||
315 | isOrdinaryKind(C2.getKind()) && | |||
316 | commentsStartOnSameColumn(SourceMgr, C1, C2))) && | |||
317 | onlyWhitespaceBetween(SourceMgr, C1.getEndLoc(), C2.getBeginLoc(), | |||
318 | /*MaxNewlinesAllowed=*/1)) { | |||
319 | SourceRange MergedRange(C1.getBeginLoc(), C2.getEndLoc()); | |||
320 | *OrderedComments[CommentFile].rbegin()->second = | |||
321 | RawComment(SourceMgr, MergedRange, CommentOpts, true); | |||
322 | } else { | |||
323 | OrderedComments[CommentFile][CommentOffset] = | |||
324 | new (Allocator) RawComment(RC); | |||
325 | } | |||
326 | } | |||
327 | ||||
328 | const std::map<unsigned, RawComment *> * | |||
329 | RawCommentList::getCommentsInFile(FileID File) const { | |||
330 | auto CommentsInFile = OrderedComments.find(File); | |||
331 | if (CommentsInFile == OrderedComments.end()) | |||
332 | return nullptr; | |||
333 | ||||
334 | return &CommentsInFile->second; | |||
335 | } | |||
336 | ||||
337 | bool RawCommentList::empty() const { return OrderedComments.empty(); } | |||
338 | ||||
339 | unsigned RawCommentList::getCommentBeginLine(RawComment *C, FileID File, | |||
340 | unsigned Offset) const { | |||
341 | auto Cached = CommentBeginLine.find(C); | |||
342 | if (Cached != CommentBeginLine.end()) | |||
343 | return Cached->second; | |||
344 | const unsigned Line = SourceMgr.getLineNumber(File, Offset); | |||
345 | CommentBeginLine[C] = Line; | |||
346 | return Line; | |||
347 | } | |||
348 | ||||
349 | unsigned RawCommentList::getCommentEndOffset(RawComment *C) const { | |||
350 | auto Cached = CommentEndOffset.find(C); | |||
351 | if (Cached != CommentEndOffset.end()) | |||
352 | return Cached->second; | |||
353 | const unsigned Offset = | |||
354 | SourceMgr.getDecomposedLoc(C->getSourceRange().getEnd()).second; | |||
355 | CommentEndOffset[C] = Offset; | |||
356 | return Offset; | |||
357 | } | |||
358 | ||||
359 | std::string RawComment::getFormattedText(const SourceManager &SourceMgr, | |||
360 | DiagnosticsEngine &Diags) const { | |||
361 | llvm::StringRef CommentText = getRawText(SourceMgr); | |||
362 | if (CommentText.empty()) | |||
363 | return ""; | |||
364 | ||||
365 | llvm::BumpPtrAllocator Allocator; | |||
366 | // We do not parse any commands, so CommentOptions are ignored by | |||
367 | // comments::Lexer. Therefore, we just use default-constructed options. | |||
368 | CommentOptions DefOpts; | |||
369 | comments::CommandTraits EmptyTraits(Allocator, DefOpts); | |||
370 | comments::Lexer L(Allocator, Diags, EmptyTraits, getSourceRange().getBegin(), | |||
371 | CommentText.begin(), CommentText.end(), | |||
372 | /*ParseCommands=*/false); | |||
373 | ||||
374 | std::string Result; | |||
375 | // A column number of the first non-whitespace token in the comment text. | |||
376 | // We skip whitespace up to this column, but keep the whitespace after this | |||
377 | // column. IndentColumn is calculated when lexing the first line and reused | |||
378 | // for the rest of lines. | |||
379 | unsigned IndentColumn = 0; | |||
380 | ||||
381 | // Processes one line of the comment and adds it to the result. | |||
382 | // Handles skipping the indent at the start of the line. | |||
383 | // Returns false when eof is reached and true otherwise. | |||
384 | auto LexLine = [&](bool IsFirstLine) -> bool { | |||
385 | comments::Token Tok; | |||
386 | // Lex the first token on the line. We handle it separately, because we to | |||
387 | // fix up its indentation. | |||
388 | L.lex(Tok); | |||
389 | if (Tok.is(comments::tok::eof)) | |||
390 | return false; | |||
391 | if (Tok.is(comments::tok::newline)) { | |||
392 | Result += "\n"; | |||
393 | return true; | |||
394 | } | |||
395 | llvm::StringRef TokText = L.getSpelling(Tok, SourceMgr); | |||
396 | bool LocInvalid = false; | |||
397 | unsigned TokColumn = | |||
398 | SourceMgr.getSpellingColumnNumber(Tok.getLocation(), &LocInvalid); | |||
399 | assert(!LocInvalid && "getFormattedText for invalid location")((void)0); | |||
400 | ||||
401 | // Amount of leading whitespace in TokText. | |||
402 | size_t WhitespaceLen = TokText.find_first_not_of(" \t"); | |||
403 | if (WhitespaceLen == StringRef::npos) | |||
404 | WhitespaceLen = TokText.size(); | |||
405 | // Remember the amount of whitespace we skipped in the first line to remove | |||
406 | // indent up to that column in the following lines. | |||
407 | if (IsFirstLine) | |||
408 | IndentColumn = TokColumn + WhitespaceLen; | |||
409 | ||||
410 | // Amount of leading whitespace we actually want to skip. | |||
411 | // For the first line we skip all the whitespace. | |||
412 | // For the rest of the lines, we skip whitespace up to IndentColumn. | |||
413 | unsigned SkipLen = | |||
414 | IsFirstLine | |||
415 | ? WhitespaceLen | |||
416 | : std::min<size_t>( | |||
417 | WhitespaceLen, | |||
418 | std::max<int>(static_cast<int>(IndentColumn) - TokColumn, 0)); | |||
419 | llvm::StringRef Trimmed = TokText.drop_front(SkipLen); | |||
420 | Result += Trimmed; | |||
421 | // Lex all tokens in the rest of the line. | |||
422 | for (L.lex(Tok); Tok.isNot(comments::tok::eof); L.lex(Tok)) { | |||
423 | if (Tok.is(comments::tok::newline)) { | |||
424 | Result += "\n"; | |||
425 | return true; | |||
426 | } | |||
427 | Result += L.getSpelling(Tok, SourceMgr); | |||
428 | } | |||
429 | // We've reached the end of file token. | |||
430 | return false; | |||
431 | }; | |||
432 | ||||
433 | auto DropTrailingNewLines = [](std::string &Str) { | |||
434 | while (!Str.empty() && Str.back() == '\n') | |||
435 | Str.pop_back(); | |||
436 | }; | |||
437 | ||||
438 | // Process first line separately to remember indent for the following lines. | |||
439 | if (!LexLine(/*IsFirstLine=*/true)) { | |||
440 | DropTrailingNewLines(Result); | |||
441 | return Result; | |||
442 | } | |||
443 | // Process the rest of the lines. | |||
444 | while (LexLine(/*IsFirstLine=*/false)) | |||
445 | ; | |||
446 | DropTrailingNewLines(Result); | |||
447 | return Result; | |||
448 | } |
1 | //===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// \file |
9 | /// |
10 | /// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms |
11 | /// to the LLVM "Allocator" concept and is similar to MallocAllocator, but |
12 | /// objects cannot be deallocated. Their lifetime is tied to the lifetime of the |
13 | /// allocator. |
14 | /// |
15 | //===----------------------------------------------------------------------===// |
16 | |
17 | #ifndef LLVM_SUPPORT_ALLOCATOR_H |
18 | #define LLVM_SUPPORT_ALLOCATOR_H |
19 | |
20 | #include "llvm/ADT/Optional.h" |
21 | #include "llvm/ADT/SmallVector.h" |
22 | #include "llvm/Support/Alignment.h" |
23 | #include "llvm/Support/AllocatorBase.h" |
24 | #include "llvm/Support/Compiler.h" |
25 | #include "llvm/Support/ErrorHandling.h" |
26 | #include "llvm/Support/MathExtras.h" |
27 | #include "llvm/Support/MemAlloc.h" |
28 | #include <algorithm> |
29 | #include <cassert> |
30 | #include <cstddef> |
31 | #include <cstdint> |
32 | #include <cstdlib> |
33 | #include <iterator> |
34 | #include <type_traits> |
35 | #include <utility> |
36 | |
37 | namespace llvm { |
38 | |
39 | namespace detail { |
40 | |
41 | // We call out to an external function to actually print the message as the |
42 | // printing code uses Allocator.h in its implementation. |
43 | void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, |
44 | size_t TotalMemory); |
45 | |
46 | } // end namespace detail |
47 | |
48 | /// Allocate memory in an ever growing pool, as if by bump-pointer. |
49 | /// |
50 | /// This isn't strictly a bump-pointer allocator as it uses backing slabs of |
51 | /// memory rather than relying on a boundless contiguous heap. However, it has |
52 | /// bump-pointer semantics in that it is a monotonically growing pool of memory |
53 | /// where every allocation is found by merely allocating the next N bytes in |
54 | /// the slab, or the next N bytes in the next slab. |
55 | /// |
56 | /// Note that this also has a threshold for forcing allocations above a certain |
57 | /// size into their own slab. |
58 | /// |
59 | /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator |
60 | /// object, which wraps malloc, to allocate memory, but it can be changed to |
61 | /// use a custom allocator. |
62 | /// |
63 | /// The GrowthDelay specifies after how many allocated slabs the allocator |
64 | /// increases the size of the slabs. |
65 | template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096, |
66 | size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128> |
67 | class BumpPtrAllocatorImpl |
68 | : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize, |
69 | SizeThreshold, GrowthDelay>>, |
70 | private AllocatorT { |
71 | public: |
72 | static_assert(SizeThreshold <= SlabSize, |
73 | "The SizeThreshold must be at most the SlabSize to ensure " |
74 | "that objects larger than a slab go into their own memory " |
75 | "allocation."); |
76 | static_assert(GrowthDelay > 0, |
77 | "GrowthDelay must be at least 1 which already increases the" |
78 | "slab size after each allocated slab."); |
79 | |
80 | BumpPtrAllocatorImpl() = default; |
81 | |
82 | template <typename T> |
83 | BumpPtrAllocatorImpl(T &&Allocator) |
84 | : AllocatorT(std::forward<T &&>(Allocator)) {} |
85 | |
86 | // Manually implement a move constructor as we must clear the old allocator's |
87 | // slabs as a matter of correctness. |
88 | BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old) |
89 | : AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr), |
90 | End(Old.End), Slabs(std::move(Old.Slabs)), |
91 | CustomSizedSlabs(std::move(Old.CustomSizedSlabs)), |
92 | BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) { |
93 | Old.CurPtr = Old.End = nullptr; |
94 | Old.BytesAllocated = 0; |
95 | Old.Slabs.clear(); |
96 | Old.CustomSizedSlabs.clear(); |
97 | } |
98 | |
99 | ~BumpPtrAllocatorImpl() { |
100 | DeallocateSlabs(Slabs.begin(), Slabs.end()); |
101 | DeallocateCustomSizedSlabs(); |
102 | } |
103 | |
104 | BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) { |
105 | DeallocateSlabs(Slabs.begin(), Slabs.end()); |
106 | DeallocateCustomSizedSlabs(); |
107 | |
108 | CurPtr = RHS.CurPtr; |
109 | End = RHS.End; |
110 | BytesAllocated = RHS.BytesAllocated; |
111 | RedZoneSize = RHS.RedZoneSize; |
112 | Slabs = std::move(RHS.Slabs); |
113 | CustomSizedSlabs = std::move(RHS.CustomSizedSlabs); |
114 | AllocatorT::operator=(static_cast<AllocatorT &&>(RHS)); |
115 | |
116 | RHS.CurPtr = RHS.End = nullptr; |
117 | RHS.BytesAllocated = 0; |
118 | RHS.Slabs.clear(); |
119 | RHS.CustomSizedSlabs.clear(); |
120 | return *this; |
121 | } |
122 | |
123 | /// Deallocate all but the current slab and reset the current pointer |
124 | /// to the beginning of it, freeing all memory allocated so far. |
125 | void Reset() { |
126 | // Deallocate all but the first slab, and deallocate all custom-sized slabs. |
127 | DeallocateCustomSizedSlabs(); |
128 | CustomSizedSlabs.clear(); |
129 | |
130 | if (Slabs.empty()) |
131 | return; |
132 | |
133 | // Reset the state. |
134 | BytesAllocated = 0; |
135 | CurPtr = (char *)Slabs.front(); |
136 | End = CurPtr + SlabSize; |
137 | |
138 | __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0)); |
139 | DeallocateSlabs(std::next(Slabs.begin()), Slabs.end()); |
140 | Slabs.erase(std::next(Slabs.begin()), Slabs.end()); |
141 | } |
142 | |
143 | /// Allocate space at the specified alignment. |
144 | LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void * |
145 | Allocate(size_t Size, Align Alignment) { |
146 | // Keep track of how many bytes we've allocated. |
147 | BytesAllocated += Size; |
148 | |
149 | size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment); |
150 | assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow")((void)0); |
151 | |
152 | size_t SizeToAllocate = Size; |
153 | #if LLVM_ADDRESS_SANITIZER_BUILD0 |
154 | // Add trailing bytes as a "red zone" under ASan. |
155 | SizeToAllocate += RedZoneSize; |
156 | #endif |
157 | |
158 | // Check if we have enough space. |
159 | if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) { |
160 | char *AlignedPtr = CurPtr + Adjustment; |
161 | CurPtr = AlignedPtr + SizeToAllocate; |
162 | // Update the allocation point of this memory block in MemorySanitizer. |
163 | // Without this, MemorySanitizer messages for values originated from here |
164 | // will point to the allocation of the entire slab. |
165 | __msan_allocated_memory(AlignedPtr, Size); |
166 | // Similarly, tell ASan about this space. |
167 | __asan_unpoison_memory_region(AlignedPtr, Size); |
168 | return AlignedPtr; |
169 | } |
170 | |
171 | // If Size is really big, allocate a separate slab for it. |
172 | size_t PaddedSize = SizeToAllocate + Alignment.value() - 1; |
173 | if (PaddedSize > SizeThreshold) { |
174 | void *NewSlab = |
175 | AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t)); |
176 | // We own the new slab and don't want anyone reading anyting other than |
177 | // pieces returned from this method. So poison the whole slab. |
178 | __asan_poison_memory_region(NewSlab, PaddedSize); |
179 | CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize)); |
180 | |
181 | uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment); |
182 | assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize)((void)0); |
183 | char *AlignedPtr = (char*)AlignedAddr; |
184 | __msan_allocated_memory(AlignedPtr, Size); |
185 | __asan_unpoison_memory_region(AlignedPtr, Size); |
186 | return AlignedPtr; |
187 | } |
188 | |
189 | // Otherwise, start a new slab and try again. |
190 | StartNewSlab(); |
191 | uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment); |
192 | assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&((void)0) |
193 | "Unable to allocate memory!")((void)0); |
194 | char *AlignedPtr = (char*)AlignedAddr; |
195 | CurPtr = AlignedPtr + SizeToAllocate; |
196 | __msan_allocated_memory(AlignedPtr, Size); |
197 | __asan_unpoison_memory_region(AlignedPtr, Size); |
198 | return AlignedPtr; |
199 | } |
200 | |
201 | inline LLVM_ATTRIBUTE_RETURNS_NONNULL__attribute__((returns_nonnull)) LLVM_ATTRIBUTE_RETURNS_NOALIAS__attribute__((__malloc__)) void * |
202 | Allocate(size_t Size, size_t Alignment) { |
203 | assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.")((void)0); |
204 | return Allocate(Size, Align(Alignment)); |
205 | } |
206 | |
207 | // Pull in base class overloads. |
208 | using AllocatorBase<BumpPtrAllocatorImpl>::Allocate; |
209 | |
210 | // Bump pointer allocators are expected to never free their storage; and |
211 | // clients expect pointers to remain valid for non-dereferencing uses even |
212 | // after deallocation. |
213 | void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) { |
214 | __asan_poison_memory_region(Ptr, Size); |
215 | } |
216 | |
217 | // Pull in base class overloads. |
218 | using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate; |
219 | |
220 | size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); } |
221 | |
222 | /// \return An index uniquely and reproducibly identifying |
223 | /// an input pointer \p Ptr in the given allocator. |
224 | /// The returned value is negative iff the object is inside a custom-size |
225 | /// slab. |
226 | /// Returns an empty optional if the pointer is not found in the allocator. |
227 | llvm::Optional<int64_t> identifyObject(const void *Ptr) { |
228 | const char *P = static_cast<const char *>(Ptr); |
229 | int64_t InSlabIdx = 0; |
230 | for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) { |
231 | const char *S = static_cast<const char *>(Slabs[Idx]); |
232 | if (P >= S && P < S + computeSlabSize(Idx)) |
233 | return InSlabIdx + static_cast<int64_t>(P - S); |
234 | InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx)); |
235 | } |
236 | |
237 | // Use negative index to denote custom sized slabs. |
238 | int64_t InCustomSizedSlabIdx = -1; |
239 | for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) { |
240 | const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first); |
241 | size_t Size = CustomSizedSlabs[Idx].second; |
242 | if (P >= S && P < S + Size) |
243 | return InCustomSizedSlabIdx - static_cast<int64_t>(P - S); |
244 | InCustomSizedSlabIdx -= static_cast<int64_t>(Size); |
245 | } |
246 | return None; |
247 | } |
248 | |
249 | /// A wrapper around identifyObject that additionally asserts that |
250 | /// the object is indeed within the allocator. |
251 | /// \return An index uniquely and reproducibly identifying |
252 | /// an input pointer \p Ptr in the given allocator. |
253 | int64_t identifyKnownObject(const void *Ptr) { |
254 | Optional<int64_t> Out = identifyObject(Ptr); |
255 | assert(Out && "Wrong allocator used")((void)0); |
256 | return *Out; |
257 | } |
258 | |
259 | /// A wrapper around identifyKnownObject. Accepts type information |
260 | /// about the object and produces a smaller identifier by relying on |
261 | /// the alignment information. Note that sub-classes may have different |
262 | /// alignment, so the most base class should be passed as template parameter |
263 | /// in order to obtain correct results. For that reason automatic template |
264 | /// parameter deduction is disabled. |
265 | /// \return An index uniquely and reproducibly identifying |
266 | /// an input pointer \p Ptr in the given allocator. This identifier is |
267 | /// different from the ones produced by identifyObject and |
268 | /// identifyAlignedObject. |
269 | template <typename T> |
270 | int64_t identifyKnownAlignedObject(const void *Ptr) { |
271 | int64_t Out = identifyKnownObject(Ptr); |
272 | assert(Out % alignof(T) == 0 && "Wrong alignment information")((void)0); |
273 | return Out / alignof(T); |
274 | } |
275 | |
276 | size_t getTotalMemory() const { |
277 | size_t TotalMemory = 0; |
278 | for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I) |
279 | TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I)); |
280 | for (auto &PtrAndSize : CustomSizedSlabs) |
281 | TotalMemory += PtrAndSize.second; |
282 | return TotalMemory; |
283 | } |
284 | |
285 | size_t getBytesAllocated() const { return BytesAllocated; } |
286 | |
287 | void setRedZoneSize(size_t NewSize) { |
288 | RedZoneSize = NewSize; |
289 | } |
290 | |
291 | void PrintStats() const { |
292 | detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated, |
293 | getTotalMemory()); |
294 | } |
295 | |
296 | private: |
297 | /// The current pointer into the current slab. |
298 | /// |
299 | /// This points to the next free byte in the slab. |
300 | char *CurPtr = nullptr; |
301 | |
302 | /// The end of the current slab. |
303 | char *End = nullptr; |
304 | |
305 | /// The slabs allocated so far. |
306 | SmallVector<void *, 4> Slabs; |
307 | |
308 | /// Custom-sized slabs allocated for too-large allocation requests. |
309 | SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs; |
310 | |
311 | /// How many bytes we've allocated. |
312 | /// |
313 | /// Used so that we can compute how much space was wasted. |
314 | size_t BytesAllocated = 0; |
315 | |
316 | /// The number of bytes to put between allocations when running under |
317 | /// a sanitizer. |
318 | size_t RedZoneSize = 1; |
319 | |
320 | static size_t computeSlabSize(unsigned SlabIdx) { |
321 | // Scale the actual allocated slab size based on the number of slabs |
322 | // allocated. Every GrowthDelay slabs allocated, we double |
323 | // the allocated size to reduce allocation frequency, but saturate at |
324 | // multiplying the slab size by 2^30. |
325 | return SlabSize * |
326 | ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay)); |
327 | } |
328 | |
329 | /// Allocate a new slab and move the bump pointers over into the new |
330 | /// slab, modifying CurPtr and End. |
331 | void StartNewSlab() { |
332 | size_t AllocatedSlabSize = computeSlabSize(Slabs.size()); |
333 | |
334 | void *NewSlab = |
335 | AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t)); |
336 | // We own the new slab and don't want anyone reading anything other than |
337 | // pieces returned from this method. So poison the whole slab. |
338 | __asan_poison_memory_region(NewSlab, AllocatedSlabSize); |
339 | |
340 | Slabs.push_back(NewSlab); |
341 | CurPtr = (char *)(NewSlab); |
342 | End = ((char *)NewSlab) + AllocatedSlabSize; |
343 | } |
344 | |
345 | /// Deallocate a sequence of slabs. |
346 | void DeallocateSlabs(SmallVectorImpl<void *>::iterator I, |
347 | SmallVectorImpl<void *>::iterator E) { |
348 | for (; I != E; ++I) { |
349 | size_t AllocatedSlabSize = |
350 | computeSlabSize(std::distance(Slabs.begin(), I)); |
351 | AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t)); |
352 | } |
353 | } |
354 | |
355 | /// Deallocate all memory for custom sized slabs. |
356 | void DeallocateCustomSizedSlabs() { |
357 | for (auto &PtrAndSize : CustomSizedSlabs) { |
358 | void *Ptr = PtrAndSize.first; |
359 | size_t Size = PtrAndSize.second; |
360 | AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t)); |
361 | } |
362 | } |
363 | |
364 | template <typename T> friend class SpecificBumpPtrAllocator; |
365 | }; |
366 | |
367 | /// The standard BumpPtrAllocator which just uses the default template |
368 | /// parameters. |
369 | typedef BumpPtrAllocatorImpl<> BumpPtrAllocator; |
370 | |
371 | /// A BumpPtrAllocator that allows only elements of a specific type to be |
372 | /// allocated. |
373 | /// |
374 | /// This allows calling the destructor in DestroyAll() and when the allocator is |
375 | /// destroyed. |
376 | template <typename T> class SpecificBumpPtrAllocator { |
377 | BumpPtrAllocator Allocator; |
378 | |
379 | public: |
380 | SpecificBumpPtrAllocator() { |
381 | // Because SpecificBumpPtrAllocator walks the memory to call destructors, |
382 | // it can't have red zones between allocations. |
383 | Allocator.setRedZoneSize(0); |
384 | } |
385 | SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old) |
386 | : Allocator(std::move(Old.Allocator)) {} |
387 | ~SpecificBumpPtrAllocator() { DestroyAll(); } |
388 | |
389 | SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) { |
390 | Allocator = std::move(RHS.Allocator); |
391 | return *this; |
392 | } |
393 | |
394 | /// Call the destructor of each allocated object and deallocate all but the |
395 | /// current slab and reset the current pointer to the beginning of it, freeing |
396 | /// all memory allocated so far. |
397 | void DestroyAll() { |
398 | auto DestroyElements = [](char *Begin, char *End) { |
399 | assert(Begin == (char *)alignAddr(Begin, Align::Of<T>()))((void)0); |
400 | for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T)) |
401 | reinterpret_cast<T *>(Ptr)->~T(); |
402 | }; |
403 | |
404 | for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E; |
405 | ++I) { |
406 | size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize( |
407 | std::distance(Allocator.Slabs.begin(), I)); |
408 | char *Begin = (char *)alignAddr(*I, Align::Of<T>()); |
409 | char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr |
410 | : (char *)*I + AllocatedSlabSize; |
411 | |
412 | DestroyElements(Begin, End); |
413 | } |
414 | |
415 | for (auto &PtrAndSize : Allocator.CustomSizedSlabs) { |
416 | void *Ptr = PtrAndSize.first; |
417 | size_t Size = PtrAndSize.second; |
418 | DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()), |
419 | (char *)Ptr + Size); |
420 | } |
421 | |
422 | Allocator.Reset(); |
423 | } |
424 | |
425 | /// Allocate space for an array of objects without constructing them. |
426 | T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); } |
427 | }; |
428 | |
429 | } // end namespace llvm |
430 | |
431 | template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold, |
432 | size_t GrowthDelay> |
433 | void * |
434 | operator new(size_t Size, |
435 | llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold, |
436 | GrowthDelay> &Allocator) { |
437 | return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size), |
438 | alignof(std::max_align_t))); |
439 | } |
440 | |
441 | template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold, |
442 | size_t GrowthDelay> |
443 | void operator delete(void *, |
444 | llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, |
445 | SizeThreshold, GrowthDelay> &) { |
446 | } |
447 | |
448 | #endif // LLVM_SUPPORT_ALLOCATOR_H |
1 | //===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file contains types to represent alignments. | |||
10 | // They are instrumented to guarantee some invariants are preserved and prevent | |||
11 | // invalid manipulations. | |||
12 | // | |||
13 | // - Align represents an alignment in bytes, it is always set and always a valid | |||
14 | // power of two, its minimum value is 1 which means no alignment requirements. | |||
15 | // | |||
16 | // - MaybeAlign is an optional type, it may be undefined or set. When it's set | |||
17 | // you can get the underlying Align type by using the getValue() method. | |||
18 | // | |||
19 | //===----------------------------------------------------------------------===// | |||
20 | ||||
21 | #ifndef LLVM_SUPPORT_ALIGNMENT_H_ | |||
22 | #define LLVM_SUPPORT_ALIGNMENT_H_ | |||
23 | ||||
24 | #include "llvm/ADT/Optional.h" | |||
25 | #include "llvm/Support/MathExtras.h" | |||
26 | #include <cassert> | |||
27 | #ifndef NDEBUG1 | |||
28 | #include <string> | |||
29 | #endif // NDEBUG | |||
30 | ||||
31 | namespace llvm { | |||
32 | ||||
33 | #define ALIGN_CHECK_ISPOSITIVE(decl) \ | |||
34 | assert(decl > 0 && (#decl " should be defined"))((void)0) | |||
35 | ||||
36 | /// This struct is a compact representation of a valid (non-zero power of two) | |||
37 | /// alignment. | |||
38 | /// It is suitable for use as static global constants. | |||
39 | struct Align { | |||
40 | private: | |||
41 | uint8_t ShiftValue = 0; /// The log2 of the required alignment. | |||
42 | /// ShiftValue is less than 64 by construction. | |||
43 | ||||
44 | friend struct MaybeAlign; | |||
45 | friend unsigned Log2(Align); | |||
46 | friend bool operator==(Align Lhs, Align Rhs); | |||
47 | friend bool operator!=(Align Lhs, Align Rhs); | |||
48 | friend bool operator<=(Align Lhs, Align Rhs); | |||
49 | friend bool operator>=(Align Lhs, Align Rhs); | |||
50 | friend bool operator<(Align Lhs, Align Rhs); | |||
51 | friend bool operator>(Align Lhs, Align Rhs); | |||
52 | friend unsigned encode(struct MaybeAlign A); | |||
53 | friend struct MaybeAlign decodeMaybeAlign(unsigned Value); | |||
54 | ||||
55 | /// A trivial type to allow construction of constexpr Align. | |||
56 | /// This is currently needed to workaround a bug in GCC 5.3 which prevents | |||
57 | /// definition of constexpr assign operators. | |||
58 | /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic | |||
59 | /// FIXME: Remove this, make all assign operators constexpr and introduce user | |||
60 | /// defined literals when we don't have to support GCC 5.3 anymore. | |||
61 | /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain | |||
62 | struct LogValue { | |||
63 | uint8_t Log; | |||
64 | }; | |||
65 | ||||
66 | public: | |||
67 | /// Default is byte-aligned. | |||
68 | constexpr Align() = default; | |||
69 | /// Do not perform checks in case of copy/move construct/assign, because the | |||
70 | /// checks have been performed when building `Other`. | |||
71 | constexpr Align(const Align &Other) = default; | |||
72 | constexpr Align(Align &&Other) = default; | |||
73 | Align &operator=(const Align &Other) = default; | |||
74 | Align &operator=(Align &&Other) = default; | |||
75 | ||||
76 | explicit Align(uint64_t Value) { | |||
77 | assert(Value > 0 && "Value must not be 0")((void)0); | |||
78 | assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2")((void)0); | |||
79 | ShiftValue = Log2_64(Value); | |||
80 | assert(ShiftValue < 64 && "Broken invariant")((void)0); | |||
81 | } | |||
82 | ||||
83 | /// This is a hole in the type system and should not be abused. | |||
84 | /// Needed to interact with C for instance. | |||
85 | uint64_t value() const { return uint64_t(1) << ShiftValue; } | |||
| ||||
86 | ||||
87 | /// Allow constructions of constexpr Align. | |||
88 | template <size_t kValue> constexpr static LogValue Constant() { | |||
89 | return LogValue{static_cast<uint8_t>(CTLog2<kValue>())}; | |||
90 | } | |||
91 | ||||
92 | /// Allow constructions of constexpr Align from types. | |||
93 | /// Compile time equivalent to Align(alignof(T)). | |||
94 | template <typename T> constexpr static LogValue Of() { | |||
95 | return Constant<std::alignment_of<T>::value>(); | |||
96 | } | |||
97 | ||||
98 | /// Constexpr constructor from LogValue type. | |||
99 | constexpr Align(LogValue CA) : ShiftValue(CA.Log) {} | |||
100 | }; | |||
101 | ||||
102 | /// Treats the value 0 as a 1, so Align is always at least 1. | |||
103 | inline Align assumeAligned(uint64_t Value) { | |||
104 | return Value ? Align(Value) : Align(); | |||
105 | } | |||
106 | ||||
107 | /// This struct is a compact representation of a valid (power of two) or | |||
108 | /// undefined (0) alignment. | |||
109 | struct MaybeAlign : public llvm::Optional<Align> { | |||
110 | private: | |||
111 | using UP = llvm::Optional<Align>; | |||
112 | ||||
113 | public: | |||
114 | /// Default is undefined. | |||
115 | MaybeAlign() = default; | |||
116 | /// Do not perform checks in case of copy/move construct/assign, because the | |||
117 | /// checks have been performed when building `Other`. | |||
118 | MaybeAlign(const MaybeAlign &Other) = default; | |||
119 | MaybeAlign &operator=(const MaybeAlign &Other) = default; | |||
120 | MaybeAlign(MaybeAlign &&Other) = default; | |||
121 | MaybeAlign &operator=(MaybeAlign &&Other) = default; | |||
122 | ||||
123 | /// Use llvm::Optional<Align> constructor. | |||
124 | using UP::UP; | |||
125 | ||||
126 | explicit MaybeAlign(uint64_t Value) { | |||
127 | assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&((void)0) | |||
128 | "Alignment is neither 0 nor a power of 2")((void)0); | |||
129 | if (Value) | |||
130 | emplace(Value); | |||
131 | } | |||
132 | ||||
133 | /// For convenience, returns a valid alignment or 1 if undefined. | |||
134 | Align valueOrOne() const { return hasValue() ? getValue() : Align(); } | |||
135 | }; | |||
136 | ||||
137 | /// Checks that SizeInBytes is a multiple of the alignment. | |||
138 | inline bool isAligned(Align Lhs, uint64_t SizeInBytes) { | |||
139 | return SizeInBytes % Lhs.value() == 0; | |||
140 | } | |||
141 | ||||
142 | /// Checks that Addr is a multiple of the alignment. | |||
143 | inline bool isAddrAligned(Align Lhs, const void *Addr) { | |||
144 | return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr)); | |||
145 | } | |||
146 | ||||
147 | /// Returns a multiple of A needed to store `Size` bytes. | |||
148 | inline uint64_t alignTo(uint64_t Size, Align A) { | |||
149 | const uint64_t Value = A.value(); | |||
150 | // The following line is equivalent to `(Size + Value - 1) / Value * Value`. | |||
151 | ||||
152 | // The division followed by a multiplication can be thought of as a right | |||
153 | // shift followed by a left shift which zeros out the extra bits produced in | |||
154 | // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out | |||
155 | // are just zero. | |||
156 | ||||
157 | // Most compilers can generate this code but the pattern may be missed when | |||
158 | // multiple functions gets inlined. | |||
159 | return (Size + Value - 1) & ~(Value - 1U); | |||
160 | } | |||
161 | ||||
162 | /// If non-zero \p Skew is specified, the return value will be a minimal integer | |||
163 | /// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for | |||
164 | /// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p | |||
165 | /// Skew mod \p A'. | |||
166 | /// | |||
167 | /// Examples: | |||
168 | /// \code | |||
169 | /// alignTo(5, Align(8), 7) = 7 | |||
170 | /// alignTo(17, Align(8), 1) = 17 | |||
171 | /// alignTo(~0LL, Align(8), 3) = 3 | |||
172 | /// \endcode | |||
173 | inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) { | |||
174 | const uint64_t Value = A.value(); | |||
175 | Skew %= Value; | |||
176 | return ((Size + Value - 1 - Skew) & ~(Value - 1U)) + Skew; | |||
177 | } | |||
178 | ||||
179 | /// Returns a multiple of A needed to store `Size` bytes. | |||
180 | /// Returns `Size` if current alignment is undefined. | |||
181 | inline uint64_t alignTo(uint64_t Size, MaybeAlign A) { | |||
182 | return A ? alignTo(Size, A.getValue()) : Size; | |||
183 | } | |||
184 | ||||
185 | /// Aligns `Addr` to `Alignment` bytes, rounding up. | |||
186 | inline uintptr_t alignAddr(const void *Addr, Align Alignment) { | |||
187 | uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr); | |||
188 | assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=((void)0) | |||
189 | ArithAddr &&((void)0) | |||
190 | "Overflow")((void)0); | |||
191 | return alignTo(ArithAddr, Alignment); | |||
192 | } | |||
193 | ||||
194 | /// Returns the offset to the next integer (mod 2**64) that is greater than | |||
195 | /// or equal to \p Value and is a multiple of \p Align. | |||
196 | inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) { | |||
197 | return alignTo(Value, Alignment) - Value; | |||
198 | } | |||
199 | ||||
200 | /// Returns the necessary adjustment for aligning `Addr` to `Alignment` | |||
201 | /// bytes, rounding up. | |||
202 | inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) { | |||
203 | return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment); | |||
204 | } | |||
205 | ||||
206 | /// Returns the log2 of the alignment. | |||
207 | inline unsigned Log2(Align A) { return A.ShiftValue; } | |||
208 | ||||
209 | /// Returns the alignment that satisfies both alignments. | |||
210 | /// Same semantic as MinAlign. | |||
211 | inline Align commonAlignment(Align A, Align B) { return std::min(A, B); } | |||
212 | ||||
213 | /// Returns the alignment that satisfies both alignments. | |||
214 | /// Same semantic as MinAlign. | |||
215 | inline Align commonAlignment(Align A, uint64_t Offset) { | |||
216 | return Align(MinAlign(A.value(), Offset)); | |||
217 | } | |||
218 | ||||
219 | /// Returns the alignment that satisfies both alignments. | |||
220 | /// Same semantic as MinAlign. | |||
221 | inline MaybeAlign commonAlignment(MaybeAlign A, MaybeAlign B) { | |||
222 | return A && B ? commonAlignment(*A, *B) : A ? A : B; | |||
223 | } | |||
224 | ||||
225 | /// Returns the alignment that satisfies both alignments. | |||
226 | /// Same semantic as MinAlign. | |||
227 | inline MaybeAlign commonAlignment(MaybeAlign A, uint64_t Offset) { | |||
228 | return MaybeAlign(MinAlign((*A).value(), Offset)); | |||
229 | } | |||
230 | ||||
231 | /// Returns a representation of the alignment that encodes undefined as 0. | |||
232 | inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; } | |||
233 | ||||
234 | /// Dual operation of the encode function above. | |||
235 | inline MaybeAlign decodeMaybeAlign(unsigned Value) { | |||
236 | if (Value == 0) | |||
237 | return MaybeAlign(); | |||
238 | Align Out; | |||
239 | Out.ShiftValue = Value - 1; | |||
240 | return Out; | |||
241 | } | |||
242 | ||||
243 | /// Returns a representation of the alignment, the encoded value is positive by | |||
244 | /// definition. | |||
245 | inline unsigned encode(Align A) { return encode(MaybeAlign(A)); } | |||
246 | ||||
247 | /// Comparisons between Align and scalars. Rhs must be positive. | |||
248 | inline bool operator==(Align Lhs, uint64_t Rhs) { | |||
249 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
250 | return Lhs.value() == Rhs; | |||
251 | } | |||
252 | inline bool operator!=(Align Lhs, uint64_t Rhs) { | |||
253 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
254 | return Lhs.value() != Rhs; | |||
255 | } | |||
256 | inline bool operator<=(Align Lhs, uint64_t Rhs) { | |||
257 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
258 | return Lhs.value() <= Rhs; | |||
259 | } | |||
260 | inline bool operator>=(Align Lhs, uint64_t Rhs) { | |||
261 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
262 | return Lhs.value() >= Rhs; | |||
263 | } | |||
264 | inline bool operator<(Align Lhs, uint64_t Rhs) { | |||
265 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
266 | return Lhs.value() < Rhs; | |||
267 | } | |||
268 | inline bool operator>(Align Lhs, uint64_t Rhs) { | |||
269 | ALIGN_CHECK_ISPOSITIVE(Rhs); | |||
270 | return Lhs.value() > Rhs; | |||
271 | } | |||
272 | ||||
273 | /// Comparisons between MaybeAlign and scalars. | |||
274 | inline bool operator==(MaybeAlign Lhs, uint64_t Rhs) { | |||
275 | return Lhs ? (*Lhs).value() == Rhs : Rhs == 0; | |||
276 | } | |||
277 | inline bool operator!=(MaybeAlign Lhs, uint64_t Rhs) { | |||
278 | return Lhs ? (*Lhs).value() != Rhs : Rhs != 0; | |||
279 | } | |||
280 | ||||
281 | /// Comparisons operators between Align. | |||
282 | inline bool operator==(Align Lhs, Align Rhs) { | |||
283 | return Lhs.ShiftValue == Rhs.ShiftValue; | |||
284 | } | |||
285 | inline bool operator!=(Align Lhs, Align Rhs) { | |||
286 | return Lhs.ShiftValue != Rhs.ShiftValue; | |||
287 | } | |||
288 | inline bool operator<=(Align Lhs, Align Rhs) { | |||
289 | return Lhs.ShiftValue <= Rhs.ShiftValue; | |||
290 | } | |||
291 | inline bool operator>=(Align Lhs, Align Rhs) { | |||
292 | return Lhs.ShiftValue >= Rhs.ShiftValue; | |||
293 | } | |||
294 | inline bool operator<(Align Lhs, Align Rhs) { | |||
295 | return Lhs.ShiftValue < Rhs.ShiftValue; | |||
296 | } | |||
297 | inline bool operator>(Align Lhs, Align Rhs) { | |||
298 | return Lhs.ShiftValue > Rhs.ShiftValue; | |||
299 | } | |||
300 | ||||
301 | // Don't allow relational comparisons with MaybeAlign. | |||
302 | bool operator<=(Align Lhs, MaybeAlign Rhs) = delete; | |||
303 | bool operator>=(Align Lhs, MaybeAlign Rhs) = delete; | |||
304 | bool operator<(Align Lhs, MaybeAlign Rhs) = delete; | |||
305 | bool operator>(Align Lhs, MaybeAlign Rhs) = delete; | |||
306 | ||||
307 | bool operator<=(MaybeAlign Lhs, Align Rhs) = delete; | |||
308 | bool operator>=(MaybeAlign Lhs, Align Rhs) = delete; | |||
309 | bool operator<(MaybeAlign Lhs, Align Rhs) = delete; | |||
310 | bool operator>(MaybeAlign Lhs, Align Rhs) = delete; | |||
311 | ||||
312 | bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
313 | bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
314 | bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
315 | bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete; | |||
316 | ||||
317 | inline Align operator*(Align Lhs, uint64_t Rhs) { | |||
318 | assert(Rhs > 0 && "Rhs must be positive")((void)0); | |||
319 | return Align(Lhs.value() * Rhs); | |||
320 | } | |||
321 | ||||
322 | inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) { | |||
323 | assert(Rhs > 0 && "Rhs must be positive")((void)0); | |||
324 | return Lhs ? Lhs.getValue() * Rhs : MaybeAlign(); | |||
325 | } | |||
326 | ||||
327 | inline Align operator/(Align Lhs, uint64_t Divisor) { | |||
328 | assert(llvm::isPowerOf2_64(Divisor) &&((void)0) | |||
329 | "Divisor must be positive and a power of 2")((void)0); | |||
330 | assert(Lhs != 1 && "Can't halve byte alignment")((void)0); | |||
331 | return Align(Lhs.value() / Divisor); | |||
332 | } | |||
333 | ||||
334 | inline MaybeAlign operator/(MaybeAlign Lhs, uint64_t Divisor) { | |||
335 | assert(llvm::isPowerOf2_64(Divisor) &&((void)0) | |||
336 | "Divisor must be positive and a power of 2")((void)0); | |||
337 | return Lhs ? Lhs.getValue() / Divisor : MaybeAlign(); | |||
338 | } | |||
339 | ||||
340 | inline Align max(MaybeAlign Lhs, Align Rhs) { | |||
341 | return Lhs && *Lhs > Rhs ? *Lhs : Rhs; | |||
342 | } | |||
343 | ||||
344 | inline Align max(Align Lhs, MaybeAlign Rhs) { | |||
345 | return Rhs && *Rhs > Lhs ? *Rhs : Lhs; | |||
346 | } | |||
347 | ||||
348 | #ifndef NDEBUG1 | |||
349 | // For usage in LLVM_DEBUG macros. | |||
350 | inline std::string DebugStr(const Align &A) { | |||
351 | return std::to_string(A.value()); | |||
352 | } | |||
353 | // For usage in LLVM_DEBUG macros. | |||
354 | inline std::string DebugStr(const MaybeAlign &MA) { | |||
355 | if (MA) | |||
356 | return std::to_string(MA->value()); | |||
357 | return "None"; | |||
358 | } | |||
359 | #endif // NDEBUG | |||
360 | ||||
361 | #undef ALIGN_CHECK_ISPOSITIVE | |||
362 | ||||
363 | } // namespace llvm | |||
364 | ||||
365 | #endif // LLVM_SUPPORT_ALIGNMENT_H_ |