Bug Summary

File:src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/lib/CodeGen/CGOpenMPRuntime.cpp
Warning:line 11506, column 58
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGOpenMPRuntime.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/lib/CodeGen/CGOpenMPRuntime.cpp

/usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/lib/CodeGen/CGOpenMPRuntime.cpp

1//===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides a class for OpenMP runtime code generation.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGOpenMPRuntime.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGRecordLayout.h"
17#include "CodeGenFunction.h"
18#include "clang/AST/APValue.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Decl.h"
21#include "clang/AST/OpenMPClause.h"
22#include "clang/AST/StmtOpenMP.h"
23#include "clang/AST/StmtVisitor.h"
24#include "clang/Basic/BitmaskEnum.h"
25#include "clang/Basic/FileManager.h"
26#include "clang/Basic/OpenMPKinds.h"
27#include "clang/Basic/SourceManager.h"
28#include "clang/CodeGen/ConstantInitBuilder.h"
29#include "llvm/ADT/ArrayRef.h"
30#include "llvm/ADT/SetOperations.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/Bitcode/BitcodeReader.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DerivedTypes.h"
35#include "llvm/IR/GlobalValue.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/AtomicOrdering.h"
38#include "llvm/Support/Format.h"
39#include "llvm/Support/raw_ostream.h"
40#include <cassert>
41#include <numeric>
42
43using namespace clang;
44using namespace CodeGen;
45using namespace llvm::omp;
46
47namespace {
48/// Base class for handling code generation inside OpenMP regions.
49class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
50public:
51 /// Kinds of OpenMP regions used in codegen.
52 enum CGOpenMPRegionKind {
53 /// Region with outlined function for standalone 'parallel'
54 /// directive.
55 ParallelOutlinedRegion,
56 /// Region with outlined function for standalone 'task' directive.
57 TaskOutlinedRegion,
58 /// Region for constructs that do not require function outlining,
59 /// like 'for', 'sections', 'atomic' etc. directives.
60 InlinedRegion,
61 /// Region with outlined function for standalone 'target' directive.
62 TargetRegion,
63 };
64
65 CGOpenMPRegionInfo(const CapturedStmt &CS,
66 const CGOpenMPRegionKind RegionKind,
67 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
68 bool HasCancel)
69 : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
70 CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
71
72 CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
73 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
74 bool HasCancel)
75 : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
76 Kind(Kind), HasCancel(HasCancel) {}
77
78 /// Get a variable or parameter for storing global thread id
79 /// inside OpenMP construct.
80 virtual const VarDecl *getThreadIDVariable() const = 0;
81
82 /// Emit the captured statement body.
83 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
84
85 /// Get an LValue for the current ThreadID variable.
86 /// \return LValue for thread id variable. This LValue always has type int32*.
87 virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
88
89 virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
90
91 CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
92
93 OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
94
95 bool hasCancel() const { return HasCancel; }
96
97 static bool classof(const CGCapturedStmtInfo *Info) {
98 return Info->getKind() == CR_OpenMP;
99 }
100
101 ~CGOpenMPRegionInfo() override = default;
102
103protected:
104 CGOpenMPRegionKind RegionKind;
105 RegionCodeGenTy CodeGen;
106 OpenMPDirectiveKind Kind;
107 bool HasCancel;
108};
109
110/// API for captured statement code generation in OpenMP constructs.
111class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
112public:
113 CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
114 const RegionCodeGenTy &CodeGen,
115 OpenMPDirectiveKind Kind, bool HasCancel,
116 StringRef HelperName)
117 : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
118 HasCancel),
119 ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
120 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.")((void)0);
121 }
122
123 /// Get a variable or parameter for storing global thread id
124 /// inside OpenMP construct.
125 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
126
127 /// Get the name of the capture helper.
128 StringRef getHelperName() const override { return HelperName; }
129
130 static bool classof(const CGCapturedStmtInfo *Info) {
131 return CGOpenMPRegionInfo::classof(Info) &&
132 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
133 ParallelOutlinedRegion;
134 }
135
136private:
137 /// A variable or parameter storing global thread id for OpenMP
138 /// constructs.
139 const VarDecl *ThreadIDVar;
140 StringRef HelperName;
141};
142
143/// API for captured statement code generation in OpenMP constructs.
144class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
145public:
146 class UntiedTaskActionTy final : public PrePostActionTy {
147 bool Untied;
148 const VarDecl *PartIDVar;
149 const RegionCodeGenTy UntiedCodeGen;
150 llvm::SwitchInst *UntiedSwitch = nullptr;
151
152 public:
153 UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
154 const RegionCodeGenTy &UntiedCodeGen)
155 : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
156 void Enter(CodeGenFunction &CGF) override {
157 if (Untied) {
158 // Emit task switching point.
159 LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
160 CGF.GetAddrOfLocalVar(PartIDVar),
161 PartIDVar->getType()->castAs<PointerType>());
162 llvm::Value *Res =
163 CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
164 llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
165 UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
166 CGF.EmitBlock(DoneBB);
167 CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
168 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
169 UntiedSwitch->addCase(CGF.Builder.getInt32(0),
170 CGF.Builder.GetInsertBlock());
171 emitUntiedSwitch(CGF);
172 }
173 }
174 void emitUntiedSwitch(CodeGenFunction &CGF) const {
175 if (Untied) {
176 LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
177 CGF.GetAddrOfLocalVar(PartIDVar),
178 PartIDVar->getType()->castAs<PointerType>());
179 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
180 PartIdLVal);
181 UntiedCodeGen(CGF);
182 CodeGenFunction::JumpDest CurPoint =
183 CGF.getJumpDestInCurrentScope(".untied.next.");
184 CGF.EmitBranch(CGF.ReturnBlock.getBlock());
185 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
186 UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
187 CGF.Builder.GetInsertBlock());
188 CGF.EmitBranchThroughCleanup(CurPoint);
189 CGF.EmitBlock(CurPoint.getBlock());
190 }
191 }
192 unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
193 };
194 CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
195 const VarDecl *ThreadIDVar,
196 const RegionCodeGenTy &CodeGen,
197 OpenMPDirectiveKind Kind, bool HasCancel,
198 const UntiedTaskActionTy &Action)
199 : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
200 ThreadIDVar(ThreadIDVar), Action(Action) {
201 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.")((void)0);
202 }
203
204 /// Get a variable or parameter for storing global thread id
205 /// inside OpenMP construct.
206 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
207
208 /// Get an LValue for the current ThreadID variable.
209 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
210
211 /// Get the name of the capture helper.
212 StringRef getHelperName() const override { return ".omp_outlined."; }
213
214 void emitUntiedSwitch(CodeGenFunction &CGF) override {
215 Action.emitUntiedSwitch(CGF);
216 }
217
218 static bool classof(const CGCapturedStmtInfo *Info) {
219 return CGOpenMPRegionInfo::classof(Info) &&
220 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
221 TaskOutlinedRegion;
222 }
223
224private:
225 /// A variable or parameter storing global thread id for OpenMP
226 /// constructs.
227 const VarDecl *ThreadIDVar;
228 /// Action for emitting code for untied tasks.
229 const UntiedTaskActionTy &Action;
230};
231
232/// API for inlined captured statement code generation in OpenMP
233/// constructs.
234class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
235public:
236 CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
237 const RegionCodeGenTy &CodeGen,
238 OpenMPDirectiveKind Kind, bool HasCancel)
239 : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
240 OldCSI(OldCSI),
241 OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
242
243 // Retrieve the value of the context parameter.
244 llvm::Value *getContextValue() const override {
245 if (OuterRegionInfo)
246 return OuterRegionInfo->getContextValue();
247 llvm_unreachable("No context value for inlined OpenMP region")__builtin_unreachable();
248 }
249
250 void setContextValue(llvm::Value *V) override {
251 if (OuterRegionInfo) {
252 OuterRegionInfo->setContextValue(V);
253 return;
254 }
255 llvm_unreachable("No context value for inlined OpenMP region")__builtin_unreachable();
256 }
257
258 /// Lookup the captured field decl for a variable.
259 const FieldDecl *lookup(const VarDecl *VD) const override {
260 if (OuterRegionInfo)
261 return OuterRegionInfo->lookup(VD);
262 // If there is no outer outlined region,no need to lookup in a list of
263 // captured variables, we can use the original one.
264 return nullptr;
265 }
266
267 FieldDecl *getThisFieldDecl() const override {
268 if (OuterRegionInfo)
269 return OuterRegionInfo->getThisFieldDecl();
270 return nullptr;
271 }
272
273 /// Get a variable or parameter for storing global thread id
274 /// inside OpenMP construct.
275 const VarDecl *getThreadIDVariable() const override {
276 if (OuterRegionInfo)
277 return OuterRegionInfo->getThreadIDVariable();
278 return nullptr;
279 }
280
281 /// Get an LValue for the current ThreadID variable.
282 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
283 if (OuterRegionInfo)
284 return OuterRegionInfo->getThreadIDVariableLValue(CGF);
285 llvm_unreachable("No LValue for inlined OpenMP construct")__builtin_unreachable();
286 }
287
288 /// Get the name of the capture helper.
289 StringRef getHelperName() const override {
290 if (auto *OuterRegionInfo = getOldCSI())
291 return OuterRegionInfo->getHelperName();
292 llvm_unreachable("No helper name for inlined OpenMP construct")__builtin_unreachable();
293 }
294
295 void emitUntiedSwitch(CodeGenFunction &CGF) override {
296 if (OuterRegionInfo)
297 OuterRegionInfo->emitUntiedSwitch(CGF);
298 }
299
300 CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
301
302 static bool classof(const CGCapturedStmtInfo *Info) {
303 return CGOpenMPRegionInfo::classof(Info) &&
304 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
305 }
306
307 ~CGOpenMPInlinedRegionInfo() override = default;
308
309private:
310 /// CodeGen info about outer OpenMP region.
311 CodeGenFunction::CGCapturedStmtInfo *OldCSI;
312 CGOpenMPRegionInfo *OuterRegionInfo;
313};
314
315/// API for captured statement code generation in OpenMP target
316/// constructs. For this captures, implicit parameters are used instead of the
317/// captured fields. The name of the target region has to be unique in a given
318/// application so it is provided by the client, because only the client has
319/// the information to generate that.
320class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
321public:
322 CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
323 const RegionCodeGenTy &CodeGen, StringRef HelperName)
324 : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
325 /*HasCancel=*/false),
326 HelperName(HelperName) {}
327
328 /// This is unused for target regions because each starts executing
329 /// with a single thread.
330 const VarDecl *getThreadIDVariable() const override { return nullptr; }
331
332 /// Get the name of the capture helper.
333 StringRef getHelperName() const override { return HelperName; }
334
335 static bool classof(const CGCapturedStmtInfo *Info) {
336 return CGOpenMPRegionInfo::classof(Info) &&
337 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
338 }
339
340private:
341 StringRef HelperName;
342};
343
344static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
345 llvm_unreachable("No codegen for expressions")__builtin_unreachable();
346}
347/// API for generation of expressions captured in a innermost OpenMP
348/// region.
349class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
350public:
351 CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
352 : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
353 OMPD_unknown,
354 /*HasCancel=*/false),
355 PrivScope(CGF) {
356 // Make sure the globals captured in the provided statement are local by
357 // using the privatization logic. We assume the same variable is not
358 // captured more than once.
359 for (const auto &C : CS.captures()) {
360 if (!C.capturesVariable() && !C.capturesVariableByCopy())
361 continue;
362
363 const VarDecl *VD = C.getCapturedVar();
364 if (VD->isLocalVarDeclOrParm())
365 continue;
366
367 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
368 /*RefersToEnclosingVariableOrCapture=*/false,
369 VD->getType().getNonReferenceType(), VK_LValue,
370 C.getLocation());
371 PrivScope.addPrivate(
372 VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(CGF); });
373 }
374 (void)PrivScope.Privatize();
375 }
376
377 /// Lookup the captured field decl for a variable.
378 const FieldDecl *lookup(const VarDecl *VD) const override {
379 if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
380 return FD;
381 return nullptr;
382 }
383
384 /// Emit the captured statement body.
385 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
386 llvm_unreachable("No body for expressions")__builtin_unreachable();
387 }
388
389 /// Get a variable or parameter for storing global thread id
390 /// inside OpenMP construct.
391 const VarDecl *getThreadIDVariable() const override {
392 llvm_unreachable("No thread id for expressions")__builtin_unreachable();
393 }
394
395 /// Get the name of the capture helper.
396 StringRef getHelperName() const override {
397 llvm_unreachable("No helper name for expressions")__builtin_unreachable();
398 }
399
400 static bool classof(const CGCapturedStmtInfo *Info) { return false; }
401
402private:
403 /// Private scope to capture global variables.
404 CodeGenFunction::OMPPrivateScope PrivScope;
405};
406
407/// RAII for emitting code of OpenMP constructs.
408class InlinedOpenMPRegionRAII {
409 CodeGenFunction &CGF;
410 llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
411 FieldDecl *LambdaThisCaptureField = nullptr;
412 const CodeGen::CGBlockInfo *BlockInfo = nullptr;
413 bool NoInheritance = false;
414
415public:
416 /// Constructs region for combined constructs.
417 /// \param CodeGen Code generation sequence for combined directives. Includes
418 /// a list of functions used for code generation of implicitly inlined
419 /// regions.
420 InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
421 OpenMPDirectiveKind Kind, bool HasCancel,
422 bool NoInheritance = true)
423 : CGF(CGF), NoInheritance(NoInheritance) {
424 // Start emission for the construct.
425 CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
426 CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
427 if (NoInheritance) {
428 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
429 LambdaThisCaptureField = CGF.LambdaThisCaptureField;
430 CGF.LambdaThisCaptureField = nullptr;
431 BlockInfo = CGF.BlockInfo;
432 CGF.BlockInfo = nullptr;
433 }
434 }
435
436 ~InlinedOpenMPRegionRAII() {
437 // Restore original CapturedStmtInfo only if we're done with code emission.
438 auto *OldCSI =
439 cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
440 delete CGF.CapturedStmtInfo;
441 CGF.CapturedStmtInfo = OldCSI;
442 if (NoInheritance) {
443 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
444 CGF.LambdaThisCaptureField = LambdaThisCaptureField;
445 CGF.BlockInfo = BlockInfo;
446 }
447 }
448};
449
450/// Values for bit flags used in the ident_t to describe the fields.
451/// All enumeric elements are named and described in accordance with the code
452/// from https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
453enum OpenMPLocationFlags : unsigned {
454 /// Use trampoline for internal microtask.
455 OMP_IDENT_IMD = 0x01,
456 /// Use c-style ident structure.
457 OMP_IDENT_KMPC = 0x02,
458 /// Atomic reduction option for kmpc_reduce.
459 OMP_ATOMIC_REDUCE = 0x10,
460 /// Explicit 'barrier' directive.
461 OMP_IDENT_BARRIER_EXPL = 0x20,
462 /// Implicit barrier in code.
463 OMP_IDENT_BARRIER_IMPL = 0x40,
464 /// Implicit barrier in 'for' directive.
465 OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
466 /// Implicit barrier in 'sections' directive.
467 OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
468 /// Implicit barrier in 'single' directive.
469 OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
470 /// Call of __kmp_for_static_init for static loop.
471 OMP_IDENT_WORK_LOOP = 0x200,
472 /// Call of __kmp_for_static_init for sections.
473 OMP_IDENT_WORK_SECTIONS = 0x400,
474 /// Call of __kmp_for_static_init for distribute.
475 OMP_IDENT_WORK_DISTRIBUTE = 0x800,
476 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)LLVM_BITMASK_LARGEST_ENUMERATOR = OMP_IDENT_WORK_DISTRIBUTE
477};
478
479namespace {
480LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()using ::llvm::BitmaskEnumDetail::operator~; using ::llvm::BitmaskEnumDetail
::operator|; using ::llvm::BitmaskEnumDetail::operator&; using
::llvm::BitmaskEnumDetail::operator^; using ::llvm::BitmaskEnumDetail
::operator|=; using ::llvm::BitmaskEnumDetail::operator&=
; using ::llvm::BitmaskEnumDetail::operator^=
;
481/// Values for bit flags for marking which requires clauses have been used.
482enum OpenMPOffloadingRequiresDirFlags : int64_t {
483 /// flag undefined.
484 OMP_REQ_UNDEFINED = 0x000,
485 /// no requires clause present.
486 OMP_REQ_NONE = 0x001,
487 /// reverse_offload clause.
488 OMP_REQ_REVERSE_OFFLOAD = 0x002,
489 /// unified_address clause.
490 OMP_REQ_UNIFIED_ADDRESS = 0x004,
491 /// unified_shared_memory clause.
492 OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
493 /// dynamic_allocators clause.
494 OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
495 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)LLVM_BITMASK_LARGEST_ENUMERATOR = OMP_REQ_DYNAMIC_ALLOCATORS
496};
497
498enum OpenMPOffloadingReservedDeviceIDs {
499 /// Device ID if the device was not defined, runtime should get it
500 /// from environment variables in the spec.
501 OMP_DEVICEID_UNDEF = -1,
502};
503} // anonymous namespace
504
505/// Describes ident structure that describes a source location.
506/// All descriptions are taken from
507/// https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
508/// Original structure:
509/// typedef struct ident {
510/// kmp_int32 reserved_1; /**< might be used in Fortran;
511/// see above */
512/// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
513/// KMP_IDENT_KMPC identifies this union
514/// member */
515/// kmp_int32 reserved_2; /**< not really used in Fortran any more;
516/// see above */
517///#if USE_ITT_BUILD
518/// /* but currently used for storing
519/// region-specific ITT */
520/// /* contextual information. */
521///#endif /* USE_ITT_BUILD */
522/// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
523/// C++ */
524/// char const *psource; /**< String describing the source location.
525/// The string is composed of semi-colon separated
526// fields which describe the source file,
527/// the function and a pair of line numbers that
528/// delimit the construct.
529/// */
530/// } ident_t;
531enum IdentFieldIndex {
532 /// might be used in Fortran
533 IdentField_Reserved_1,
534 /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
535 IdentField_Flags,
536 /// Not really used in Fortran any more
537 IdentField_Reserved_2,
538 /// Source[4] in Fortran, do not use for C++
539 IdentField_Reserved_3,
540 /// String describing the source location. The string is composed of
541 /// semi-colon separated fields which describe the source file, the function
542 /// and a pair of line numbers that delimit the construct.
543 IdentField_PSource
544};
545
546/// Schedule types for 'omp for' loops (these enumerators are taken from
547/// the enum sched_type in kmp.h).
548enum OpenMPSchedType {
549 /// Lower bound for default (unordered) versions.
550 OMP_sch_lower = 32,
551 OMP_sch_static_chunked = 33,
552 OMP_sch_static = 34,
553 OMP_sch_dynamic_chunked = 35,
554 OMP_sch_guided_chunked = 36,
555 OMP_sch_runtime = 37,
556 OMP_sch_auto = 38,
557 /// static with chunk adjustment (e.g., simd)
558 OMP_sch_static_balanced_chunked = 45,
559 /// Lower bound for 'ordered' versions.
560 OMP_ord_lower = 64,
561 OMP_ord_static_chunked = 65,
562 OMP_ord_static = 66,
563 OMP_ord_dynamic_chunked = 67,
564 OMP_ord_guided_chunked = 68,
565 OMP_ord_runtime = 69,
566 OMP_ord_auto = 70,
567 OMP_sch_default = OMP_sch_static,
568 /// dist_schedule types
569 OMP_dist_sch_static_chunked = 91,
570 OMP_dist_sch_static = 92,
571 /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
572 /// Set if the monotonic schedule modifier was present.
573 OMP_sch_modifier_monotonic = (1 << 29),
574 /// Set if the nonmonotonic schedule modifier was present.
575 OMP_sch_modifier_nonmonotonic = (1 << 30),
576};
577
578/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
579/// region.
580class CleanupTy final : public EHScopeStack::Cleanup {
581 PrePostActionTy *Action;
582
583public:
584 explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
585 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
586 if (!CGF.HaveInsertPoint())
587 return;
588 Action->Exit(CGF);
589 }
590};
591
592} // anonymous namespace
593
594void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
595 CodeGenFunction::RunCleanupsScope Scope(CGF);
596 if (PrePostAction) {
597 CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
598 Callback(CodeGen, CGF, *PrePostAction);
599 } else {
600 PrePostActionTy Action;
601 Callback(CodeGen, CGF, Action);
602 }
603}
604
605/// Check if the combiner is a call to UDR combiner and if it is so return the
606/// UDR decl used for reduction.
607static const OMPDeclareReductionDecl *
608getReductionInit(const Expr *ReductionOp) {
609 if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
610 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
611 if (const auto *DRE =
612 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
613 if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
614 return DRD;
615 return nullptr;
616}
617
618static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
619 const OMPDeclareReductionDecl *DRD,
620 const Expr *InitOp,
621 Address Private, Address Original,
622 QualType Ty) {
623 if (DRD->getInitializer()) {
624 std::pair<llvm::Function *, llvm::Function *> Reduction =
625 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
626 const auto *CE = cast<CallExpr>(InitOp);
627 const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
628 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
629 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
630 const auto *LHSDRE =
631 cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
632 const auto *RHSDRE =
633 cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
634 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
635 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
636 [=]() { return Private; });
637 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
638 [=]() { return Original; });
639 (void)PrivateScope.Privatize();
640 RValue Func = RValue::get(Reduction.second);
641 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
642 CGF.EmitIgnoredExpr(InitOp);
643 } else {
644 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
645 std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
646 auto *GV = new llvm::GlobalVariable(
647 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
648 llvm::GlobalValue::PrivateLinkage, Init, Name);
649 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
650 RValue InitRVal;
651 switch (CGF.getEvaluationKind(Ty)) {
652 case TEK_Scalar:
653 InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
654 break;
655 case TEK_Complex:
656 InitRVal =
657 RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
658 break;
659 case TEK_Aggregate: {
660 OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_LValue);
661 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, LV);
662 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
663 /*IsInitializer=*/false);
664 return;
665 }
666 }
667 OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_PRValue);
668 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
669 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
670 /*IsInitializer=*/false);
671 }
672}
673
674/// Emit initialization of arrays of complex types.
675/// \param DestAddr Address of the array.
676/// \param Type Type of array.
677/// \param Init Initial expression of array.
678/// \param SrcAddr Address of the original array.
679static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
680 QualType Type, bool EmitDeclareReductionInit,
681 const Expr *Init,
682 const OMPDeclareReductionDecl *DRD,
683 Address SrcAddr = Address::invalid()) {
684 // Perform element-by-element initialization.
685 QualType ElementTy;
686
687 // Drill down to the base element type on both arrays.
688 const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
689 llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
690 DestAddr =
691 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
692 if (DRD)
693 SrcAddr =
694 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
695
696 llvm::Value *SrcBegin = nullptr;
697 if (DRD)
698 SrcBegin = SrcAddr.getPointer();
699 llvm::Value *DestBegin = DestAddr.getPointer();
700 // Cast from pointer to array type to pointer to single element.
701 llvm::Value *DestEnd =
702 CGF.Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
703 // The basic structure here is a while-do loop.
704 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
705 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
706 llvm::Value *IsEmpty =
707 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
708 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
709
710 // Enter the loop body, making that address the current address.
711 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
712 CGF.EmitBlock(BodyBB);
713
714 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
715
716 llvm::PHINode *SrcElementPHI = nullptr;
717 Address SrcElementCurrent = Address::invalid();
718 if (DRD) {
719 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
720 "omp.arraycpy.srcElementPast");
721 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
722 SrcElementCurrent =
723 Address(SrcElementPHI,
724 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
725 }
726 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
727 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
728 DestElementPHI->addIncoming(DestBegin, EntryBB);
729 Address DestElementCurrent =
730 Address(DestElementPHI,
731 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
732
733 // Emit copy.
734 {
735 CodeGenFunction::RunCleanupsScope InitScope(CGF);
736 if (EmitDeclareReductionInit) {
737 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
738 SrcElementCurrent, ElementTy);
739 } else
740 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
741 /*IsInitializer=*/false);
742 }
743
744 if (DRD) {
745 // Shift the address forward by one element.
746 llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
747 SrcAddr.getElementType(), SrcElementPHI, /*Idx0=*/1,
748 "omp.arraycpy.dest.element");
749 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
750 }
751
752 // Shift the address forward by one element.
753 llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
754 DestAddr.getElementType(), DestElementPHI, /*Idx0=*/1,
755 "omp.arraycpy.dest.element");
756 // Check whether we've reached the end.
757 llvm::Value *Done =
758 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
759 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
760 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
761
762 // Done.
763 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
764}
765
766LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
767 return CGF.EmitOMPSharedLValue(E);
768}
769
770LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
771 const Expr *E) {
772 if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
773 return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
774 return LValue();
775}
776
777void ReductionCodeGen::emitAggregateInitialization(
778 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
779 const OMPDeclareReductionDecl *DRD) {
780 // Emit VarDecl with copy init for arrays.
781 // Get the address of the original variable captured in current
782 // captured region.
783 const auto *PrivateVD =
784 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
785 bool EmitDeclareReductionInit =
786 DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
787 EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
788 EmitDeclareReductionInit,
789 EmitDeclareReductionInit ? ClausesData[N].ReductionOp
790 : PrivateVD->getInit(),
791 DRD, SharedLVal.getAddress(CGF));
792}
793
794ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
795 ArrayRef<const Expr *> Origs,
796 ArrayRef<const Expr *> Privates,
797 ArrayRef<const Expr *> ReductionOps) {
798 ClausesData.reserve(Shareds.size());
799 SharedAddresses.reserve(Shareds.size());
800 Sizes.reserve(Shareds.size());
801 BaseDecls.reserve(Shareds.size());
802 const auto *IOrig = Origs.begin();
803 const auto *IPriv = Privates.begin();
804 const auto *IRed = ReductionOps.begin();
805 for (const Expr *Ref : Shareds) {
806 ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed);
807 std::advance(IOrig, 1);
808 std::advance(IPriv, 1);
809 std::advance(IRed, 1);
810 }
811}
812
813void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
814 assert(SharedAddresses.size() == N && OrigAddresses.size() == N &&((void)0)
815 "Number of generated lvalues must be exactly N.")((void)0);
816 LValue First = emitSharedLValue(CGF, ClausesData[N].Shared);
817 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared);
818 SharedAddresses.emplace_back(First, Second);
819 if (ClausesData[N].Shared == ClausesData[N].Ref) {
820 OrigAddresses.emplace_back(First, Second);
821 } else {
822 LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
823 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
824 OrigAddresses.emplace_back(First, Second);
825 }
826}
827
828void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
829 const auto *PrivateVD =
830 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
831 QualType PrivateType = PrivateVD->getType();
832 bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
833 if (!PrivateType->isVariablyModifiedType()) {
834 Sizes.emplace_back(
835 CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
836 nullptr);
837 return;
838 }
839 llvm::Value *Size;
840 llvm::Value *SizeInChars;
841 auto *ElemType =
842 cast<llvm::PointerType>(OrigAddresses[N].first.getPointer(CGF)->getType())
843 ->getElementType();
844 auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
845 if (AsArraySection) {
846 Size = CGF.Builder.CreatePtrDiff(OrigAddresses[N].second.getPointer(CGF),
847 OrigAddresses[N].first.getPointer(CGF));
848 Size = CGF.Builder.CreateNUWAdd(
849 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
850 SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
851 } else {
852 SizeInChars =
853 CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType());
854 Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
855 }
856 Sizes.emplace_back(SizeInChars, Size);
857 CodeGenFunction::OpaqueValueMapping OpaqueMap(
858 CGF,
859 cast<OpaqueValueExpr>(
860 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
861 RValue::get(Size));
862 CGF.EmitVariablyModifiedType(PrivateType);
863}
864
865void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
866 llvm::Value *Size) {
867 const auto *PrivateVD =
868 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
869 QualType PrivateType = PrivateVD->getType();
870 if (!PrivateType->isVariablyModifiedType()) {
871 assert(!Size && !Sizes[N].second &&((void)0)
872 "Size should be nullptr for non-variably modified reduction "((void)0)
873 "items.")((void)0);
874 return;
875 }
876 CodeGenFunction::OpaqueValueMapping OpaqueMap(
877 CGF,
878 cast<OpaqueValueExpr>(
879 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
880 RValue::get(Size));
881 CGF.EmitVariablyModifiedType(PrivateType);
882}
883
884void ReductionCodeGen::emitInitialization(
885 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
886 llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
887 assert(SharedAddresses.size() > N && "No variable was generated")((void)0);
888 const auto *PrivateVD =
889 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
890 const OMPDeclareReductionDecl *DRD =
891 getReductionInit(ClausesData[N].ReductionOp);
892 QualType PrivateType = PrivateVD->getType();
893 PrivateAddr = CGF.Builder.CreateElementBitCast(
894 PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
895 QualType SharedType = SharedAddresses[N].first.getType();
896 SharedLVal = CGF.MakeAddrLValue(
897 CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(CGF),
898 CGF.ConvertTypeForMem(SharedType)),
899 SharedType, SharedAddresses[N].first.getBaseInfo(),
900 CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
901 if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
902 if (DRD && DRD->getInitializer())
903 (void)DefaultInit(CGF);
904 emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
905 } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
906 (void)DefaultInit(CGF);
907 emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
908 PrivateAddr, SharedLVal.getAddress(CGF),
909 SharedLVal.getType());
910 } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
911 !CGF.isTrivialInitializer(PrivateVD->getInit())) {
912 CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
913 PrivateVD->getType().getQualifiers(),
914 /*IsInitializer=*/false);
915 }
916}
917
918bool ReductionCodeGen::needCleanups(unsigned N) {
919 const auto *PrivateVD =
920 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
921 QualType PrivateType = PrivateVD->getType();
922 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
923 return DTorKind != QualType::DK_none;
924}
925
926void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
927 Address PrivateAddr) {
928 const auto *PrivateVD =
929 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
930 QualType PrivateType = PrivateVD->getType();
931 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
932 if (needCleanups(N)) {
933 PrivateAddr = CGF.Builder.CreateElementBitCast(
934 PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
935 CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
936 }
937}
938
939static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
940 LValue BaseLV) {
941 BaseTy = BaseTy.getNonReferenceType();
942 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
943 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
944 if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
945 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy);
946 } else {
947 LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy);
948 BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
949 }
950 BaseTy = BaseTy->getPointeeType();
951 }
952 return CGF.MakeAddrLValue(
953 CGF.Builder.CreateElementBitCast(BaseLV.getAddress(CGF),
954 CGF.ConvertTypeForMem(ElTy)),
955 BaseLV.getType(), BaseLV.getBaseInfo(),
956 CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
957}
958
959static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
960 llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
961 llvm::Value *Addr) {
962 Address Tmp = Address::invalid();
963 Address TopTmp = Address::invalid();
964 Address MostTopTmp = Address::invalid();
965 BaseTy = BaseTy.getNonReferenceType();
966 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
967 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
968 Tmp = CGF.CreateMemTemp(BaseTy);
969 if (TopTmp.isValid())
970 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
971 else
972 MostTopTmp = Tmp;
973 TopTmp = Tmp;
974 BaseTy = BaseTy->getPointeeType();
975 }
976 llvm::Type *Ty = BaseLVType;
977 if (Tmp.isValid())
978 Ty = Tmp.getElementType();
979 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
980 if (Tmp.isValid()) {
981 CGF.Builder.CreateStore(Addr, Tmp);
982 return MostTopTmp;
983 }
984 return Address(Addr, BaseLVAlignment);
985}
986
987static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
988 const VarDecl *OrigVD = nullptr;
989 if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
990 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
991 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
992 Base = TempOASE->getBase()->IgnoreParenImpCasts();
993 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
994 Base = TempASE->getBase()->IgnoreParenImpCasts();
995 DE = cast<DeclRefExpr>(Base);
996 OrigVD = cast<VarDecl>(DE->getDecl());
997 } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
998 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
999 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1000 Base = TempASE->getBase()->IgnoreParenImpCasts();
1001 DE = cast<DeclRefExpr>(Base);
1002 OrigVD = cast<VarDecl>(DE->getDecl());
1003 }
1004 return OrigVD;
1005}
1006
1007Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
1008 Address PrivateAddr) {
1009 const DeclRefExpr *DE;
1010 if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
1011 BaseDecls.emplace_back(OrigVD);
1012 LValue OriginalBaseLValue = CGF.EmitLValue(DE);
1013 LValue BaseLValue =
1014 loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1015 OriginalBaseLValue);
1016 Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
1017 llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1018 BaseLValue.getPointer(CGF), SharedAddr.getPointer());
1019 llvm::Value *PrivatePointer =
1020 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1021 PrivateAddr.getPointer(), SharedAddr.getType());
1022 llvm::Value *Ptr = CGF.Builder.CreateGEP(
1023 SharedAddr.getElementType(), PrivatePointer, Adjustment);
1024 return castToBase(CGF, OrigVD->getType(),
1025 SharedAddresses[N].first.getType(),
1026 OriginalBaseLValue.getAddress(CGF).getType(),
1027 OriginalBaseLValue.getAlignment(), Ptr);
1028 }
1029 BaseDecls.emplace_back(
1030 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1031 return PrivateAddr;
1032}
1033
1034bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
1035 const OMPDeclareReductionDecl *DRD =
1036 getReductionInit(ClausesData[N].ReductionOp);
1037 return DRD && DRD->getInitializer();
1038}
1039
1040LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1041 return CGF.EmitLoadOfPointerLValue(
1042 CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1043 getThreadIDVariable()->getType()->castAs<PointerType>());
1044}
1045
1046void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt *S) {
1047 if (!CGF.HaveInsertPoint())
1048 return;
1049 // 1.2.2 OpenMP Language Terminology
1050 // Structured block - An executable statement with a single entry at the
1051 // top and a single exit at the bottom.
1052 // The point of exit cannot be a branch out of the structured block.
1053 // longjmp() and throw() must not violate the entry/exit criteria.
1054 CGF.EHStack.pushTerminate();
1055 if (S)
1056 CGF.incrementProfileCounter(S);
1057 CodeGen(CGF);
1058 CGF.EHStack.popTerminate();
1059}
1060
1061LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1062 CodeGenFunction &CGF) {
1063 return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1064 getThreadIDVariable()->getType(),
1065 AlignmentSource::Decl);
1066}
1067
1068static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
1069 QualType FieldTy) {
1070 auto *Field = FieldDecl::Create(
1071 C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1072 C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
1073 /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1074 Field->setAccess(AS_public);
1075 DC->addDecl(Field);
1076 return Field;
1077}
1078
1079CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
1080 StringRef Separator)
1081 : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
1082 OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager(CGM) {
1083 KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1084
1085 // Initialize Types used in OpenMPIRBuilder from OMPKinds.def
1086 OMPBuilder.initialize();
1087 loadOffloadInfoMetadata();
1088}
1089
1090void CGOpenMPRuntime::clear() {
1091 InternalVars.clear();
1092 // Clean non-target variable declarations possibly used only in debug info.
1093 for (const auto &Data : EmittedNonTargetVariables) {
1094 if (!Data.getValue().pointsToAliveValue())
1095 continue;
1096 auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
1097 if (!GV)
1098 continue;
1099 if (!GV->isDeclaration() || GV->getNumUses() > 0)
1100 continue;
1101 GV->eraseFromParent();
1102 }
1103}
1104
1105std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
1106 SmallString<128> Buffer;
1107 llvm::raw_svector_ostream OS(Buffer);
1108 StringRef Sep = FirstSeparator;
1109 for (StringRef Part : Parts) {
1110 OS << Sep << Part;
1111 Sep = Separator;
1112 }
1113 return std::string(OS.str());
1114}
1115
1116static llvm::Function *
1117emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
1118 const Expr *CombinerInitializer, const VarDecl *In,
1119 const VarDecl *Out, bool IsCombiner) {
1120 // void .omp_combiner.(Ty *in, Ty *out);
1121 ASTContext &C = CGM.getContext();
1122 QualType PtrTy = C.getPointerType(Ty).withRestrict();
1123 FunctionArgList Args;
1124 ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1125 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1126 ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1127 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1128 Args.push_back(&OmpOutParm);
1129 Args.push_back(&OmpInParm);
1130 const CGFunctionInfo &FnInfo =
1131 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1132 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1133 std::string Name = CGM.getOpenMPRuntime().getName(
1134 {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1135 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
1136 Name, &CGM.getModule());
1137 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1138 if (CGM.getLangOpts().Optimize) {
1139 Fn->removeFnAttr(llvm::Attribute::NoInline);
1140 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1141 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1142 }
1143 CodeGenFunction CGF(CGM);
1144 // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1145 // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1146 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1147 Out->getLocation());
1148 CodeGenFunction::OMPPrivateScope Scope(CGF);
1149 Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1150 Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
1151 return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1152 .getAddress(CGF);
1153 });
1154 Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1155 Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
1156 return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1157 .getAddress(CGF);
1158 });
1159 (void)Scope.Privatize();
1160 if (!IsCombiner && Out->hasInit() &&
1161 !CGF.isTrivialInitializer(Out->getInit())) {
1162 CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1163 Out->getType().getQualifiers(),
1164 /*IsInitializer=*/true);
1165 }
1166 if (CombinerInitializer)
1167 CGF.EmitIgnoredExpr(CombinerInitializer);
1168 Scope.ForceCleanup();
1169 CGF.FinishFunction();
1170 return Fn;
1171}
1172
1173void CGOpenMPRuntime::emitUserDefinedReduction(
1174 CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1175 if (UDRMap.count(D) > 0)
1176 return;
1177 llvm::Function *Combiner = emitCombinerOrInitializer(
1178 CGM, D->getType(), D->getCombiner(),
1179 cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
1180 cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
1181 /*IsCombiner=*/true);
1182 llvm::Function *Initializer = nullptr;
1183 if (const Expr *Init = D->getInitializer()) {
1184 Initializer = emitCombinerOrInitializer(
1185 CGM, D->getType(),
1186 D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
1187 : nullptr,
1188 cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
1189 cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
1190 /*IsCombiner=*/false);
1191 }
1192 UDRMap.try_emplace(D, Combiner, Initializer);
1193 if (CGF) {
1194 auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1195 Decls.second.push_back(D);
1196 }
1197}
1198
1199std::pair<llvm::Function *, llvm::Function *>
1200CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
1201 auto I = UDRMap.find(D);
1202 if (I != UDRMap.end())
1203 return I->second;
1204 emitUserDefinedReduction(/*CGF=*/nullptr, D);
1205 return UDRMap.lookup(D);
1206}
1207
1208namespace {
1209// Temporary RAII solution to perform a push/pop stack event on the OpenMP IR
1210// Builder if one is present.
1211struct PushAndPopStackRAII {
1212 PushAndPopStackRAII(llvm::OpenMPIRBuilder *OMPBuilder, CodeGenFunction &CGF,
1213 bool HasCancel, llvm::omp::Directive Kind)
1214 : OMPBuilder(OMPBuilder) {
1215 if (!OMPBuilder)
1216 return;
1217
1218 // The following callback is the crucial part of clangs cleanup process.
1219 //
1220 // NOTE:
1221 // Once the OpenMPIRBuilder is used to create parallel regions (and
1222 // similar), the cancellation destination (Dest below) is determined via
1223 // IP. That means if we have variables to finalize we split the block at IP,
1224 // use the new block (=BB) as destination to build a JumpDest (via
1225 // getJumpDestInCurrentScope(BB)) which then is fed to
1226 // EmitBranchThroughCleanup. Furthermore, there will not be the need
1227 // to push & pop an FinalizationInfo object.
1228 // The FiniCB will still be needed but at the point where the
1229 // OpenMPIRBuilder is asked to construct a parallel (or similar) construct.
1230 auto FiniCB = [&CGF](llvm::OpenMPIRBuilder::InsertPointTy IP) {
1231 assert(IP.getBlock()->end() == IP.getPoint() &&((void)0)
1232 "Clang CG should cause non-terminated block!")((void)0);
1233 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1234 CGF.Builder.restoreIP(IP);
1235 CodeGenFunction::JumpDest Dest =
1236 CGF.getOMPCancelDestination(OMPD_parallel);
1237 CGF.EmitBranchThroughCleanup(Dest);
1238 };
1239
1240 // TODO: Remove this once we emit parallel regions through the
1241 // OpenMPIRBuilder as it can do this setup internally.
1242 llvm::OpenMPIRBuilder::FinalizationInfo FI({FiniCB, Kind, HasCancel});
1243 OMPBuilder->pushFinalizationCB(std::move(FI));
1244 }
1245 ~PushAndPopStackRAII() {
1246 if (OMPBuilder)
1247 OMPBuilder->popFinalizationCB();
1248 }
1249 llvm::OpenMPIRBuilder *OMPBuilder;
1250};
1251} // namespace
1252
1253static llvm::Function *emitParallelOrTeamsOutlinedFunction(
1254 CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1255 const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1256 const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1257 assert(ThreadIDVar->getType()->isPointerType() &&((void)0)
1258 "thread id variable must be of type kmp_int32 *")((void)0);
1259 CodeGenFunction CGF(CGM, true);
1260 bool HasCancel = false;
1261 if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1262 HasCancel = OPD->hasCancel();
1263 else if (const auto *OPD = dyn_cast<OMPTargetParallelDirective>(&D))
1264 HasCancel = OPD->hasCancel();
1265 else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1266 HasCancel = OPSD->hasCancel();
1267 else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1268 HasCancel = OPFD->hasCancel();
1269 else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1270 HasCancel = OPFD->hasCancel();
1271 else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1272 HasCancel = OPFD->hasCancel();
1273 else if (const auto *OPFD =
1274 dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1275 HasCancel = OPFD->hasCancel();
1276 else if (const auto *OPFD =
1277 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1278 HasCancel = OPFD->hasCancel();
1279
1280 // TODO: Temporarily inform the OpenMPIRBuilder, if any, about the new
1281 // parallel region to make cancellation barriers work properly.
1282 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1283 PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel, InnermostKind);
1284 CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1285 HasCancel, OutlinedHelperName);
1286 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1287 return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
1288}
1289
1290llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
1291 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1292 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1293 const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1294 return emitParallelOrTeamsOutlinedFunction(
1295 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1296}
1297
1298llvm::Function *CGOpenMPRuntime::emitTeamsOutlinedFunction(
1299 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1300 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1301 const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1302 return emitParallelOrTeamsOutlinedFunction(
1303 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1304}
1305
1306llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
1307 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1308 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1309 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1310 bool Tied, unsigned &NumberOfParts) {
1311 auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1312 PrePostActionTy &) {
1313 llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
1314 llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
1315 llvm::Value *TaskArgs[] = {
1316 UpLoc, ThreadID,
1317 CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1318 TaskTVar->getType()->castAs<PointerType>())
1319 .getPointer(CGF)};
1320 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1321 CGM.getModule(), OMPRTL___kmpc_omp_task),
1322 TaskArgs);
1323 };
1324 CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1325 UntiedCodeGen);
1326 CodeGen.setAction(Action);
1327 assert(!ThreadIDVar->getType()->isPointerType() &&((void)0)
1328 "thread id variable must be of type kmp_int32 for tasks")((void)0);
1329 const OpenMPDirectiveKind Region =
1330 isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1331 : OMPD_task;
1332 const CapturedStmt *CS = D.getCapturedStmt(Region);
1333 bool HasCancel = false;
1334 if (const auto *TD = dyn_cast<OMPTaskDirective>(&D))
1335 HasCancel = TD->hasCancel();
1336 else if (const auto *TD = dyn_cast<OMPTaskLoopDirective>(&D))
1337 HasCancel = TD->hasCancel();
1338 else if (const auto *TD = dyn_cast<OMPMasterTaskLoopDirective>(&D))
1339 HasCancel = TD->hasCancel();
1340 else if (const auto *TD = dyn_cast<OMPParallelMasterTaskLoopDirective>(&D))
1341 HasCancel = TD->hasCancel();
1342
1343 CodeGenFunction CGF(CGM, true);
1344 CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1345 InnermostKind, HasCancel, Action);
1346 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1347 llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
1348 if (!Tied)
1349 NumberOfParts = Action.getNumberOfParts();
1350 return Res;
1351}
1352
1353static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
1354 const RecordDecl *RD, const CGRecordLayout &RL,
1355 ArrayRef<llvm::Constant *> Data) {
1356 llvm::StructType *StructTy = RL.getLLVMType();
1357 unsigned PrevIdx = 0;
1358 ConstantInitBuilder CIBuilder(CGM);
1359 auto DI = Data.begin();
1360 for (const FieldDecl *FD : RD->fields()) {
1361 unsigned Idx = RL.getLLVMFieldNo(FD);
1362 // Fill the alignment.
1363 for (unsigned I = PrevIdx; I < Idx; ++I)
1364 Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
1365 PrevIdx = Idx + 1;
1366 Fields.add(*DI);
1367 ++DI;
1368 }
1369}
1370
1371template <class... As>
1372static llvm::GlobalVariable *
1373createGlobalStruct(CodeGenModule &CGM, QualType Ty, bool IsConstant,
1374 ArrayRef<llvm::Constant *> Data, const Twine &Name,
1375 As &&... Args) {
1376 const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1377 const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1378 ConstantInitBuilder CIBuilder(CGM);
1379 ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
1380 buildStructValue(Fields, CGM, RD, RL, Data);
1381 return Fields.finishAndCreateGlobal(
1382 Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
1383 std::forward<As>(Args)...);
1384}
1385
1386template <typename T>
1387static void
1388createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
1389 ArrayRef<llvm::Constant *> Data,
1390 T &Parent) {
1391 const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1392 const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1393 ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
1394 buildStructValue(Fields, CGM, RD, RL, Data);
1395 Fields.finishAndAddTo(Parent);
1396}
1397
1398void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
1399 bool AtCurrentPoint) {
1400 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1401 assert(!Elem.second.ServiceInsertPt && "Insert point is set already.")((void)0);
1402
1403 llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
1404 if (AtCurrentPoint) {
1405 Elem.second.ServiceInsertPt = new llvm::BitCastInst(
1406 Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
1407 } else {
1408 Elem.second.ServiceInsertPt =
1409 new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
1410 Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
1411 }
1412}
1413
1414void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) {
1415 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1416 if (Elem.second.ServiceInsertPt) {
1417 llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
1418 Elem.second.ServiceInsertPt = nullptr;
1419 Ptr->eraseFromParent();
1420 }
1421}
1422
1423static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF,
1424 SourceLocation Loc,
1425 SmallString<128> &Buffer) {
1426 llvm::raw_svector_ostream OS(Buffer);
1427 // Build debug location
1428 PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1429 OS << ";" << PLoc.getFilename() << ";";
1430 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1431 OS << FD->getQualifiedNameAsString();
1432 OS << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1433 return OS.str();
1434}
1435
1436llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
1437 SourceLocation Loc,
1438 unsigned Flags) {
1439 llvm::Constant *SrcLocStr;
1440 if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1441 Loc.isInvalid()) {
1442 SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr();
1443 } else {
1444 std::string FunctionName = "";
1445 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1446 FunctionName = FD->getQualifiedNameAsString();
1447 PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1448 const char *FileName = PLoc.getFilename();
1449 unsigned Line = PLoc.getLine();
1450 unsigned Column = PLoc.getColumn();
1451 SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName.c_str(), FileName,
1452 Line, Column);
1453 }
1454 unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
1455 return OMPBuilder.getOrCreateIdent(SrcLocStr, llvm::omp::IdentFlag(Flags),
1456 Reserved2Flags);
1457}
1458
1459llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
1460 SourceLocation Loc) {
1461 assert(CGF.CurFn && "No function in current CodeGenFunction.")((void)0);
1462 // If the OpenMPIRBuilder is used we need to use it for all thread id calls as
1463 // the clang invariants used below might be broken.
1464 if (CGM.getLangOpts().OpenMPIRBuilder) {
1465 SmallString<128> Buffer;
1466 OMPBuilder.updateToLocation(CGF.Builder.saveIP());
1467 auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
1468 getIdentStringFromSourceLocation(CGF, Loc, Buffer));
1469 return OMPBuilder.getOrCreateThreadID(
1470 OMPBuilder.getOrCreateIdent(SrcLocStr));
1471 }
1472
1473 llvm::Value *ThreadID = nullptr;
1474 // Check whether we've already cached a load of the thread id in this
1475 // function.
1476 auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1477 if (I != OpenMPLocThreadIDMap.end()) {
1478 ThreadID = I->second.ThreadID;
1479 if (ThreadID != nullptr)
1480 return ThreadID;
1481 }
1482 // If exceptions are enabled, do not use parameter to avoid possible crash.
1483 if (auto *OMPRegionInfo =
1484 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1485 if (OMPRegionInfo->getThreadIDVariable()) {
1486 // Check if this an outlined function with thread id passed as argument.
1487 LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1488 llvm::BasicBlock *TopBlock = CGF.AllocaInsertPt->getParent();
1489 if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1490 !CGF.getLangOpts().CXXExceptions ||
1491 CGF.Builder.GetInsertBlock() == TopBlock ||
1492 !isa<llvm::Instruction>(LVal.getPointer(CGF)) ||
1493 cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
1494 TopBlock ||
1495 cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
1496 CGF.Builder.GetInsertBlock()) {
1497 ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1498 // If value loaded in entry block, cache it and use it everywhere in
1499 // function.
1500 if (CGF.Builder.GetInsertBlock() == TopBlock) {
1501 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1502 Elem.second.ThreadID = ThreadID;
1503 }
1504 return ThreadID;
1505 }
1506 }
1507 }
1508
1509 // This is not an outlined function region - need to call __kmpc_int32
1510 // kmpc_global_thread_num(ident_t *loc).
1511 // Generate thread id value and cache this value for use across the
1512 // function.
1513 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1514 if (!Elem.second.ServiceInsertPt)
1515 setLocThreadIdInsertPt(CGF);
1516 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1517 CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
1518 llvm::CallInst *Call = CGF.Builder.CreateCall(
1519 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
1520 OMPRTL___kmpc_global_thread_num),
1521 emitUpdateLocation(CGF, Loc));
1522 Call->setCallingConv(CGF.getRuntimeCC());
1523 Elem.second.ThreadID = Call;
1524 return Call;
1525}
1526
1527void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
1528 assert(CGF.CurFn && "No function in current CodeGenFunction.")((void)0);
1529 if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
1530 clearLocThreadIdInsertPt(CGF);
1531 OpenMPLocThreadIDMap.erase(CGF.CurFn);
1532 }
1533 if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1534 for(const auto *D : FunctionUDRMap[CGF.CurFn])
1535 UDRMap.erase(D);
1536 FunctionUDRMap.erase(CGF.CurFn);
1537 }
1538 auto I = FunctionUDMMap.find(CGF.CurFn);
1539 if (I != FunctionUDMMap.end()) {
1540 for(const auto *D : I->second)
1541 UDMMap.erase(D);
1542 FunctionUDMMap.erase(I);
1543 }
1544 LastprivateConditionalToTypes.erase(CGF.CurFn);
1545 FunctionToUntiedTaskStackMap.erase(CGF.CurFn);
1546}
1547
1548llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
1549 return OMPBuilder.IdentPtr;
1550}
1551
1552llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
1553 if (!Kmpc_MicroTy) {
1554 // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1555 llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1556 llvm::PointerType::getUnqual(CGM.Int32Ty)};
1557 Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1558 }
1559 return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1560}
1561
1562llvm::FunctionCallee
1563CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) {
1564 assert((IVSize == 32 || IVSize == 64) &&((void)0)
1565 "IV size is not compatible with the omp runtime")((void)0);
1566 StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
1567 : "__kmpc_for_static_init_4u")
1568 : (IVSigned ? "__kmpc_for_static_init_8"
1569 : "__kmpc_for_static_init_8u");
1570 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
1571 auto *PtrTy = llvm::PointerType::getUnqual(ITy);
1572 llvm::Type *TypeParams[] = {
1573 getIdentTyPointerTy(), // loc
1574 CGM.Int32Ty, // tid
1575 CGM.Int32Ty, // schedtype
1576 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
1577 PtrTy, // p_lower
1578 PtrTy, // p_upper
1579 PtrTy, // p_stride
1580 ITy, // incr
1581 ITy // chunk
1582 };
1583 auto *FnTy =
1584 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1585 return CGM.CreateRuntimeFunction(FnTy, Name);
1586}
1587
1588llvm::FunctionCallee
1589CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
1590 assert((IVSize == 32 || IVSize == 64) &&((void)0)
1591 "IV size is not compatible with the omp runtime")((void)0);
1592 StringRef Name =
1593 IVSize == 32
1594 ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
1595 : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
1596 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
1597 llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
1598 CGM.Int32Ty, // tid
1599 CGM.Int32Ty, // schedtype
1600 ITy, // lower
1601 ITy, // upper
1602 ITy, // stride
1603 ITy // chunk
1604 };
1605 auto *FnTy =
1606 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1607 return CGM.CreateRuntimeFunction(FnTy, Name);
1608}
1609
1610llvm::FunctionCallee
1611CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
1612 assert((IVSize == 32 || IVSize == 64) &&((void)0)
1613 "IV size is not compatible with the omp runtime")((void)0);
1614 StringRef Name =
1615 IVSize == 32
1616 ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
1617 : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
1618 llvm::Type *TypeParams[] = {
1619 getIdentTyPointerTy(), // loc
1620 CGM.Int32Ty, // tid
1621 };
1622 auto *FnTy =
1623 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1624 return CGM.CreateRuntimeFunction(FnTy, Name);
1625}
1626
1627llvm::FunctionCallee
1628CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
1629 assert((IVSize == 32 || IVSize == 64) &&((void)0)
1630 "IV size is not compatible with the omp runtime")((void)0);
1631 StringRef Name =
1632 IVSize == 32
1633 ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
1634 : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
1635 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
1636 auto *PtrTy = llvm::PointerType::getUnqual(ITy);
1637 llvm::Type *TypeParams[] = {
1638 getIdentTyPointerTy(), // loc
1639 CGM.Int32Ty, // tid
1640 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
1641 PtrTy, // p_lower
1642 PtrTy, // p_upper
1643 PtrTy // p_stride
1644 };
1645 auto *FnTy =
1646 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1647 return CGM.CreateRuntimeFunction(FnTy, Name);
1648}
1649
1650/// Obtain information that uniquely identifies a target entry. This
1651/// consists of the file and device IDs as well as line number associated with
1652/// the relevant entry source location.
1653static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
1654 unsigned &DeviceID, unsigned &FileID,
1655 unsigned &LineNum) {
1656 SourceManager &SM = C.getSourceManager();
1657
1658 // The loc should be always valid and have a file ID (the user cannot use
1659 // #pragma directives in macros)
1660
1661 assert(Loc.isValid() && "Source location is expected to be always valid.")((void)0);
1662
1663 PresumedLoc PLoc = SM.getPresumedLoc(Loc);
1664 assert(PLoc.isValid() && "Source location is expected to be always valid.")((void)0);
1665
1666 llvm::sys::fs::UniqueID ID;
1667 if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
1668 PLoc = SM.getPresumedLoc(Loc, /*UseLineDirectives=*/false);
1669 assert(PLoc.isValid() && "Source location is expected to be always valid.")((void)0);
1670 if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
1671 SM.getDiagnostics().Report(diag::err_cannot_open_file)
1672 << PLoc.getFilename() << EC.message();
1673 }
1674
1675 DeviceID = ID.getDevice();
1676 FileID = ID.getFile();
1677 LineNum = PLoc.getLine();
1678}
1679
1680Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
1681 if (CGM.getLangOpts().OpenMPSimd)
1682 return Address::invalid();
1683 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
1684 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
1685 if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
1686 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
1687 HasRequiresUnifiedSharedMemory))) {
1688 SmallString<64> PtrName;
1689 {
1690 llvm::raw_svector_ostream OS(PtrName);
1691 OS << CGM.getMangledName(GlobalDecl(VD));
1692 if (!VD->isExternallyVisible()) {
1693 unsigned DeviceID, FileID, Line;
1694 getTargetEntryUniqueInfo(CGM.getContext(),
1695 VD->getCanonicalDecl()->getBeginLoc(),
1696 DeviceID, FileID, Line);
1697 OS << llvm::format("_%x", FileID);
1698 }
1699 OS << "_decl_tgt_ref_ptr";
1700 }
1701 llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
1702 if (!Ptr) {
1703 QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
1704 Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
1705 PtrName);
1706
1707 auto *GV = cast<llvm::GlobalVariable>(Ptr);
1708 GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
1709
1710 if (!CGM.getLangOpts().OpenMPIsDevice)
1711 GV->setInitializer(CGM.GetAddrOfGlobal(VD));
1712 registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
1713 }
1714 return Address(Ptr, CGM.getContext().getDeclAlign(VD));
1715 }
1716 return Address::invalid();
1717}
1718
1719llvm::Constant *
1720CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
1721 assert(!CGM.getLangOpts().OpenMPUseTLS ||((void)0)
1722 !CGM.getContext().getTargetInfo().isTLSSupported())((void)0);
1723 // Lookup the entry, lazily creating it if necessary.
1724 std::string Suffix = getName({"cache", ""});
1725 return getOrCreateInternalVariable(
1726 CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
1727}
1728
1729Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
1730 const VarDecl *VD,
1731 Address VDAddr,
1732 SourceLocation Loc) {
1733 if (CGM.getLangOpts().OpenMPUseTLS &&
1734 CGM.getContext().getTargetInfo().isTLSSupported())
1735 return VDAddr;
1736
1737 llvm::Type *VarTy = VDAddr.getElementType();
1738 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
1739 CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
1740 CGM.Int8PtrTy),
1741 CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
1742 getOrCreateThreadPrivateCache(VD)};
1743 return Address(CGF.EmitRuntimeCall(
1744 OMPBuilder.getOrCreateRuntimeFunction(
1745 CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
1746 Args),
1747 VDAddr.getAlignment());
1748}
1749
1750void CGOpenMPRuntime::emitThreadPrivateVarInit(
1751 CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
1752 llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
1753 // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
1754 // library.
1755 llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
1756 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1757 CGM.getModule(), OMPRTL___kmpc_global_thread_num),
1758 OMPLoc);
1759 // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
1760 // to register constructor/destructor for variable.
1761 llvm::Value *Args[] = {
1762 OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
1763 Ctor, CopyCtor, Dtor};
1764 CGF.EmitRuntimeCall(
1765 OMPBuilder.getOrCreateRuntimeFunction(
1766 CGM.getModule(), OMPRTL___kmpc_threadprivate_register),
1767 Args);
1768}
1769
1770llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
1771 const VarDecl *VD, Address VDAddr, SourceLocation Loc,
1772 bool PerformInit, CodeGenFunction *CGF) {
1773 if (CGM.getLangOpts().OpenMPUseTLS &&
1774 CGM.getContext().getTargetInfo().isTLSSupported())
1775 return nullptr;
1776
1777 VD = VD->getDefinition(CGM.getContext());
1778 if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
1779 QualType ASTTy = VD->getType();
1780
1781 llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
1782 const Expr *Init = VD->getAnyInitializer();
1783 if (CGM.getLangOpts().CPlusPlus && PerformInit) {
1784 // Generate function that re-emits the declaration's initializer into the
1785 // threadprivate copy of the variable VD
1786 CodeGenFunction CtorCGF(CGM);
1787 FunctionArgList Args;
1788 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
1789 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
1790 ImplicitParamDecl::Other);
1791 Args.push_back(&Dst);
1792
1793 const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
1794 CGM.getContext().VoidPtrTy, Args);
1795 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1796 std::string Name = getName({"__kmpc_global_ctor_", ""});
1797 llvm::Function *Fn =
1798 CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
1799 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
1800 Args, Loc, Loc);
1801 llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
1802 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
1803 CGM.getContext().VoidPtrTy, Dst.getLocation());
1804 Address Arg = Address(ArgVal, VDAddr.getAlignment());
1805 Arg = CtorCGF.Builder.CreateElementBitCast(
1806 Arg, CtorCGF.ConvertTypeForMem(ASTTy));
1807 CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
1808 /*IsInitializer=*/true);
1809 ArgVal = CtorCGF.EmitLoadOfScalar(
1810 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
1811 CGM.getContext().VoidPtrTy, Dst.getLocation());
1812 CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
1813 CtorCGF.FinishFunction();
1814 Ctor = Fn;
1815 }
1816 if (VD->getType().isDestructedType() != QualType::DK_none) {
1817 // Generate function that emits destructor call for the threadprivate copy
1818 // of the variable VD
1819 CodeGenFunction DtorCGF(CGM);
1820 FunctionArgList Args;
1821 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
1822 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
1823 ImplicitParamDecl::Other);
1824 Args.push_back(&Dst);
1825
1826 const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
1827 CGM.getContext().VoidTy, Args);
1828 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1829 std::string Name = getName({"__kmpc_global_dtor_", ""});
1830 llvm::Function *Fn =
1831 CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
1832 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
1833 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
1834 Loc, Loc);
1835 // Create a scope with an artificial location for the body of this function.
1836 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
1837 llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
1838 DtorCGF.GetAddrOfLocalVar(&Dst),
1839 /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
1840 DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
1841 DtorCGF.getDestroyer(ASTTy.isDestructedType()),
1842 DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
1843 DtorCGF.FinishFunction();
1844 Dtor = Fn;
1845 }
1846 // Do not emit init function if it is not required.
1847 if (!Ctor && !Dtor)
1848 return nullptr;
1849
1850 llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1851 auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
1852 /*isVarArg=*/false)
1853 ->getPointerTo();
1854 // Copying constructor for the threadprivate variable.
1855 // Must be NULL - reserved by runtime, but currently it requires that this
1856 // parameter is always NULL. Otherwise it fires assertion.
1857 CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
1858 if (Ctor == nullptr) {
1859 auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1860 /*isVarArg=*/false)
1861 ->getPointerTo();
1862 Ctor = llvm::Constant::getNullValue(CtorTy);
1863 }
1864 if (Dtor == nullptr) {
1865 auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
1866 /*isVarArg=*/false)
1867 ->getPointerTo();
1868 Dtor = llvm::Constant::getNullValue(DtorTy);
1869 }
1870 if (!CGF) {
1871 auto *InitFunctionTy =
1872 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
1873 std::string Name = getName({"__omp_threadprivate_init_", ""});
1874 llvm::Function *InitFunction = CGM.CreateGlobalInitOrCleanUpFunction(
1875 InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
1876 CodeGenFunction InitCGF(CGM);
1877 FunctionArgList ArgList;
1878 InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
1879 CGM.getTypes().arrangeNullaryFunction(), ArgList,
1880 Loc, Loc);
1881 emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
1882 InitCGF.FinishFunction();
1883 return InitFunction;
1884 }
1885 emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
1886 }
1887 return nullptr;
1888}
1889
1890bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
1891 llvm::GlobalVariable *Addr,
1892 bool PerformInit) {
1893 if (CGM.getLangOpts().OMPTargetTriples.empty() &&
1894 !CGM.getLangOpts().OpenMPIsDevice)
1895 return false;
1896 Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
1897 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
1898 if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
1899 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
1900 HasRequiresUnifiedSharedMemory))
1901 return CGM.getLangOpts().OpenMPIsDevice;
1902 VD = VD->getDefinition(CGM.getContext());
1903 assert(VD && "Unknown VarDecl")((void)0);
1904
1905 if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
1906 return CGM.getLangOpts().OpenMPIsDevice;
1907
1908 QualType ASTTy = VD->getType();
1909 SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
1910
1911 // Produce the unique prefix to identify the new target regions. We use
1912 // the source location of the variable declaration which we know to not
1913 // conflict with any target region.
1914 unsigned DeviceID;
1915 unsigned FileID;
1916 unsigned Line;
1917 getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
1918 SmallString<128> Buffer, Out;
1919 {
1920 llvm::raw_svector_ostream OS(Buffer);
1921 OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
1922 << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
1923 }
1924
1925 const Expr *Init = VD->getAnyInitializer();
1926 if (CGM.getLangOpts().CPlusPlus && PerformInit) {
1927 llvm::Constant *Ctor;
1928 llvm::Constant *ID;
1929 if (CGM.getLangOpts().OpenMPIsDevice) {
1930 // Generate function that re-emits the declaration's initializer into
1931 // the threadprivate copy of the variable VD
1932 CodeGenFunction CtorCGF(CGM);
1933
1934 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
1935 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1936 llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
1937 FTy, Twine(Buffer, "_ctor"), FI, Loc);
1938 auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
1939 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
1940 FunctionArgList(), Loc, Loc);
1941 auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
1942 CtorCGF.EmitAnyExprToMem(Init,
1943 Address(Addr, CGM.getContext().getDeclAlign(VD)),
1944 Init->getType().getQualifiers(),
1945 /*IsInitializer=*/true);
1946 CtorCGF.FinishFunction();
1947 Ctor = Fn;
1948 ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
1949 CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
1950 } else {
1951 Ctor = new llvm::GlobalVariable(
1952 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1953 llvm::GlobalValue::PrivateLinkage,
1954 llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
1955 ID = Ctor;
1956 }
1957
1958 // Register the information for the entry associated with the constructor.
1959 Out.clear();
1960 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
1961 DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
1962 ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
1963 }
1964 if (VD->getType().isDestructedType() != QualType::DK_none) {
1965 llvm::Constant *Dtor;
1966 llvm::Constant *ID;
1967 if (CGM.getLangOpts().OpenMPIsDevice) {
1968 // Generate function that emits destructor call for the threadprivate
1969 // copy of the variable VD
1970 CodeGenFunction DtorCGF(CGM);
1971
1972 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
1973 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1974 llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
1975 FTy, Twine(Buffer, "_dtor"), FI, Loc);
1976 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
1977 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
1978 FunctionArgList(), Loc, Loc);
1979 // Create a scope with an artificial location for the body of this
1980 // function.
1981 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
1982 DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
1983 ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
1984 DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
1985 DtorCGF.FinishFunction();
1986 Dtor = Fn;
1987 ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
1988 CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
1989 } else {
1990 Dtor = new llvm::GlobalVariable(
1991 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1992 llvm::GlobalValue::PrivateLinkage,
1993 llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
1994 ID = Dtor;
1995 }
1996 // Register the information for the entry associated with the destructor.
1997 Out.clear();
1998 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
1999 DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
2000 ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
2001 }
2002 return CGM.getLangOpts().OpenMPIsDevice;
2003}
2004
2005Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
2006 QualType VarType,
2007 StringRef Name) {
2008 std::string Suffix = getName({"artificial", ""});
2009 llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2010 llvm::Value *GAddr =
2011 getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
2012 if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS &&
2013 CGM.getTarget().isTLSSupported()) {
2014 cast<llvm::GlobalVariable>(GAddr)->setThreadLocal(/*Val=*/true);
2015 return Address(GAddr, CGM.getContext().getTypeAlignInChars(VarType));
2016 }
2017 std::string CacheSuffix = getName({"cache", ""});
2018 llvm::Value *Args[] = {
2019 emitUpdateLocation(CGF, SourceLocation()),
2020 getThreadID(CGF, SourceLocation()),
2021 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
2022 CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2023 /*isSigned=*/false),
2024 getOrCreateInternalVariable(
2025 CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
2026 return Address(
2027 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2028 CGF.EmitRuntimeCall(
2029 OMPBuilder.getOrCreateRuntimeFunction(
2030 CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
2031 Args),
2032 VarLVType->getPointerTo(/*AddrSpace=*/0)),
2033 CGM.getContext().getTypeAlignInChars(VarType));
2034}
2035
2036void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
2037 const RegionCodeGenTy &ThenGen,
2038 const RegionCodeGenTy &ElseGen) {
2039 CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2040
2041 // If the condition constant folds and can be elided, try to avoid emitting
2042 // the condition and the dead arm of the if/else.
2043 bool CondConstant;
2044 if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2045 if (CondConstant)
2046 ThenGen(CGF);
2047 else
2048 ElseGen(CGF);
2049 return;
2050 }
2051
2052 // Otherwise, the condition did not fold, or we couldn't elide it. Just
2053 // emit the conditional branch.
2054 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
2055 llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
2056 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
2057 CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2058
2059 // Emit the 'then' code.
2060 CGF.EmitBlock(ThenBlock);
2061 ThenGen(CGF);
2062 CGF.EmitBranch(ContBlock);
2063 // Emit the 'else' code if present.
2064 // There is no need to emit line number for unconditional branch.
2065 (void)ApplyDebugLocation::CreateEmpty(CGF);
2066 CGF.EmitBlock(ElseBlock);
2067 ElseGen(CGF);
2068 // There is no need to emit line number for unconditional branch.
2069 (void)ApplyDebugLocation::CreateEmpty(CGF);
2070 CGF.EmitBranch(ContBlock);
2071 // Emit the continuation block for code after the if.
2072 CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2073}
2074
2075void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
2076 llvm::Function *OutlinedFn,
2077 ArrayRef<llvm::Value *> CapturedVars,
2078 const Expr *IfCond) {
2079 if (!CGF.HaveInsertPoint())
2080 return;
2081 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2082 auto &M = CGM.getModule();
2083 auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc,
2084 this](CodeGenFunction &CGF, PrePostActionTy &) {
2085 // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2086 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2087 llvm::Value *Args[] = {
2088 RTLoc,
2089 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2090 CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2091 llvm::SmallVector<llvm::Value *, 16> RealArgs;
2092 RealArgs.append(std::begin(Args), std::end(Args));
2093 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2094
2095 llvm::FunctionCallee RTLFn =
2096 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call);
2097 CGF.EmitRuntimeCall(RTLFn, RealArgs);
2098 };
2099 auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc,
2100 this](CodeGenFunction &CGF, PrePostActionTy &) {
2101 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2102 llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
2103 // Build calls:
2104 // __kmpc_serialized_parallel(&Loc, GTid);
2105 llvm::Value *Args[] = {RTLoc, ThreadID};
2106 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2107 M, OMPRTL___kmpc_serialized_parallel),
2108 Args);
2109
2110 // OutlinedFn(&GTid, &zero_bound, CapturedStruct);
2111 Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
2112 Address ZeroAddrBound =
2113 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2114 /*Name=*/".bound.zero.addr");
2115 CGF.InitTempAlloca(ZeroAddrBound, CGF.Builder.getInt32(/*C*/ 0));
2116 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2117 // ThreadId for serialized parallels is 0.
2118 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2119 OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
2120 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2121
2122 // Ensure we do not inline the function. This is trivially true for the ones
2123 // passed to __kmpc_fork_call but the ones called in serialized regions
2124 // could be inlined. This is not a perfect but it is closer to the invariant
2125 // we want, namely, every data environment starts with a new function.
2126 // TODO: We should pass the if condition to the runtime function and do the
2127 // handling there. Much cleaner code.
2128 OutlinedFn->removeFnAttr(llvm::Attribute::AlwaysInline);
2129 OutlinedFn->addFnAttr(llvm::Attribute::NoInline);
2130 RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2131
2132 // __kmpc_end_serialized_parallel(&Loc, GTid);
2133 llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2134 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2135 M, OMPRTL___kmpc_end_serialized_parallel),
2136 EndArgs);
2137 };
2138 if (IfCond) {
2139 emitIfClause(CGF, IfCond, ThenGen, ElseGen);
2140 } else {
2141 RegionCodeGenTy ThenRCG(ThenGen);
2142 ThenRCG(CGF);
2143 }
2144}
2145
2146// If we're inside an (outlined) parallel region, use the region info's
2147// thread-ID variable (it is passed in a first argument of the outlined function
2148// as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
2149// regular serial code region, get thread ID by calling kmp_int32
2150// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
2151// return the address of that temp.
2152Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
2153 SourceLocation Loc) {
2154 if (auto *OMPRegionInfo =
2155 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2156 if (OMPRegionInfo->getThreadIDVariable())
2157 return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF);
2158
2159 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2160 QualType Int32Ty =
2161 CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
2162 Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
2163 CGF.EmitStoreOfScalar(ThreadID,
2164 CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
2165
2166 return ThreadIDTemp;
2167}
2168
2169llvm::Constant *CGOpenMPRuntime::getOrCreateInternalVariable(
2170 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
2171 SmallString<256> Buffer;
2172 llvm::raw_svector_ostream Out(Buffer);
2173 Out << Name;
2174 StringRef RuntimeName = Out.str();
2175 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2176 if (Elem.second) {
2177 assert(Elem.second->getType()->getPointerElementType() == Ty &&((void)0)
2178 "OMP internal variable has different type than requested")((void)0);
2179 return &*Elem.second;
2180 }
2181
2182 return Elem.second = new llvm::GlobalVariable(
2183 CGM.getModule(), Ty, /*IsConstant*/ false,
2184 llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
2185 Elem.first(), /*InsertBefore=*/nullptr,
2186 llvm::GlobalValue::NotThreadLocal, AddressSpace);
2187}
2188
2189llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
2190 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2191 std::string Name = getName({Prefix, "var"});
2192 return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
2193}
2194
2195namespace {
2196/// Common pre(post)-action for different OpenMP constructs.
2197class CommonActionTy final : public PrePostActionTy {
2198 llvm::FunctionCallee EnterCallee;
2199 ArrayRef<llvm::Value *> EnterArgs;
2200 llvm::FunctionCallee ExitCallee;
2201 ArrayRef<llvm::Value *> ExitArgs;
2202 bool Conditional;
2203 llvm::BasicBlock *ContBlock = nullptr;
2204
2205public:
2206 CommonActionTy(llvm::FunctionCallee EnterCallee,
2207 ArrayRef<llvm::Value *> EnterArgs,
2208 llvm::FunctionCallee ExitCallee,
2209 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
2210 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
2211 ExitArgs(ExitArgs), Conditional(Conditional) {}
2212 void Enter(CodeGenFunction &CGF) override {
2213 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
2214 if (Conditional) {
2215 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
2216 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
2217 ContBlock = CGF.createBasicBlock("omp_if.end");
2218 // Generate the branch (If-stmt)
2219 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
2220 CGF.EmitBlock(ThenBlock);
2221 }
2222 }
2223 void Done(CodeGenFunction &CGF) {
2224 // Emit the rest of blocks/branches
2225 CGF.EmitBranch(ContBlock);
2226 CGF.EmitBlock(ContBlock, true);
2227 }
2228 void Exit(CodeGenFunction &CGF) override {
2229 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
2230 }
2231};
2232} // anonymous namespace
2233
2234void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
2235 StringRef CriticalName,
2236 const RegionCodeGenTy &CriticalOpGen,
2237 SourceLocation Loc, const Expr *Hint) {
2238 // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
2239 // CriticalOpGen();
2240 // __kmpc_end_critical(ident_t *, gtid, Lock);
2241 // Prepare arguments and build a call to __kmpc_critical
2242 if (!CGF.HaveInsertPoint())
2243 return;
2244 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2245 getCriticalRegionLock(CriticalName)};
2246 llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2247 std::end(Args));
2248 if (Hint) {
2249 EnterArgs.push_back(CGF.Builder.CreateIntCast(
2250 CGF.EmitScalarExpr(Hint), CGM.Int32Ty, /*isSigned=*/false));
2251 }
2252 CommonActionTy Action(
2253 OMPBuilder.getOrCreateRuntimeFunction(
2254 CGM.getModule(),
2255 Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical),
2256 EnterArgs,
2257 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2258 OMPRTL___kmpc_end_critical),
2259 Args);
2260 CriticalOpGen.setAction(Action);
2261 emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2262}
2263
2264void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
2265 const RegionCodeGenTy &MasterOpGen,
2266 SourceLocation Loc) {
2267 if (!CGF.HaveInsertPoint())
2268 return;
2269 // if(__kmpc_master(ident_t *, gtid)) {
2270 // MasterOpGen();
2271 // __kmpc_end_master(ident_t *, gtid);
2272 // }
2273 // Prepare arguments and build a call to __kmpc_master
2274 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2275 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2276 CGM.getModule(), OMPRTL___kmpc_master),
2277 Args,
2278 OMPBuilder.getOrCreateRuntimeFunction(
2279 CGM.getModule(), OMPRTL___kmpc_end_master),
2280 Args,
2281 /*Conditional=*/true);
2282 MasterOpGen.setAction(Action);
2283 emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2284 Action.Done(CGF);
2285}
2286
2287void CGOpenMPRuntime::emitMaskedRegion(CodeGenFunction &CGF,
2288 const RegionCodeGenTy &MaskedOpGen,
2289 SourceLocation Loc, const Expr *Filter) {
2290 if (!CGF.HaveInsertPoint())
2291 return;
2292 // if(__kmpc_masked(ident_t *, gtid, filter)) {
2293 // MaskedOpGen();
2294 // __kmpc_end_masked(iden_t *, gtid);
2295 // }
2296 // Prepare arguments and build a call to __kmpc_masked
2297 llvm::Value *FilterVal = Filter
2298 ? CGF.EmitScalarExpr(Filter, CGF.Int32Ty)
2299 : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
2300 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2301 FilterVal};
2302 llvm::Value *ArgsEnd[] = {emitUpdateLocation(CGF, Loc),
2303 getThreadID(CGF, Loc)};
2304 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2305 CGM.getModule(), OMPRTL___kmpc_masked),
2306 Args,
2307 OMPBuilder.getOrCreateRuntimeFunction(
2308 CGM.getModule(), OMPRTL___kmpc_end_masked),
2309 ArgsEnd,
2310 /*Conditional=*/true);
2311 MaskedOpGen.setAction(Action);
2312 emitInlinedDirective(CGF, OMPD_masked, MaskedOpGen);
2313 Action.Done(CGF);
2314}
2315
2316void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
2317 SourceLocation Loc) {
2318 if (!CGF.HaveInsertPoint())
2319 return;
2320 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2321 OMPBuilder.createTaskyield(CGF.Builder);
2322 } else {
2323 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
2324 llvm::Value *Args[] = {
2325 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2326 llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
2327 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2328 CGM.getModule(), OMPRTL___kmpc_omp_taskyield),
2329 Args);
2330 }
2331
2332 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2333 Region->emitUntiedSwitch(CGF);
2334}
2335
2336void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
2337 const RegionCodeGenTy &TaskgroupOpGen,
2338 SourceLocation Loc) {
2339 if (!CGF.HaveInsertPoint())
2340 return;
2341 // __kmpc_taskgroup(ident_t *, gtid);
2342 // TaskgroupOpGen();
2343 // __kmpc_end_taskgroup(ident_t *, gtid);
2344 // Prepare arguments and build a call to __kmpc_taskgroup
2345 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2346 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2347 CGM.getModule(), OMPRTL___kmpc_taskgroup),
2348 Args,
2349 OMPBuilder.getOrCreateRuntimeFunction(
2350 CGM.getModule(), OMPRTL___kmpc_end_taskgroup),
2351 Args);
2352 TaskgroupOpGen.setAction(Action);
2353 emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
2354}
2355
2356/// Given an array of pointers to variables, project the address of a
2357/// given variable.
2358static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
2359 unsigned Index, const VarDecl *Var) {
2360 // Pull out the pointer to the variable.
2361 Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
2362 llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
2363
2364 Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
2365 Addr = CGF.Builder.CreateElementBitCast(
2366 Addr, CGF.ConvertTypeForMem(Var->getType()));
2367 return Addr;
2368}
2369
2370static llvm::Value *emitCopyprivateCopyFunction(
2371 CodeGenModule &CGM, llvm::Type *ArgsType,
2372 ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
2373 ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
2374 SourceLocation Loc) {
2375 ASTContext &C = CGM.getContext();
2376 // void copy_func(void *LHSArg, void *RHSArg);
2377 FunctionArgList Args;
2378 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2379 ImplicitParamDecl::Other);
2380 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2381 ImplicitParamDecl::Other);
2382 Args.push_back(&LHSArg);
2383 Args.push_back(&RHSArg);
2384 const auto &CGFI =
2385 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2386 std::string Name =
2387 CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
2388 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2389 llvm::GlobalValue::InternalLinkage, Name,
2390 &CGM.getModule());
2391 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2392 Fn->setDoesNotRecurse();
2393 CodeGenFunction CGF(CGM);
2394 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2395 // Dest = (void*[n])(LHSArg);
2396 // Src = (void*[n])(RHSArg);
2397 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2398 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
2399 ArgsType), CGF.getPointerAlign());
2400 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2401 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
2402 ArgsType), CGF.getPointerAlign());
2403 // *(Type0*)Dst[0] = *(Type0*)Src[0];
2404 // *(Type1*)Dst[1] = *(Type1*)Src[1];
2405 // ...
2406 // *(Typen*)Dst[n] = *(Typen*)Src[n];
2407 for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
2408 const auto *DestVar =
2409 cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
2410 Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
2411
2412 const auto *SrcVar =
2413 cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
2414 Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
2415
2416 const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
2417 QualType Type = VD->getType();
2418 CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
2419 }
2420 CGF.FinishFunction();
2421 return Fn;
2422}
2423
2424void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
2425 const RegionCodeGenTy &SingleOpGen,
2426 SourceLocation Loc,
2427 ArrayRef<const Expr *> CopyprivateVars,
2428 ArrayRef<const Expr *> SrcExprs,
2429 ArrayRef<const Expr *> DstExprs,
2430 ArrayRef<const Expr *> AssignmentOps) {
2431 if (!CGF.HaveInsertPoint())
2432 return;
2433 assert(CopyprivateVars.size() == SrcExprs.size() &&((void)0)
2434 CopyprivateVars.size() == DstExprs.size() &&((void)0)
2435 CopyprivateVars.size() == AssignmentOps.size())((void)0);
2436 ASTContext &C = CGM.getContext();
2437 // int32 did_it = 0;
2438 // if(__kmpc_single(ident_t *, gtid)) {
2439 // SingleOpGen();
2440 // __kmpc_end_single(ident_t *, gtid);
2441 // did_it = 1;
2442 // }
2443 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2444 // <copy_func>, did_it);
2445
2446 Address DidIt = Address::invalid();
2447 if (!CopyprivateVars.empty()) {
2448 // int32 did_it = 0;
2449 QualType KmpInt32Ty =
2450 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2451 DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
2452 CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
2453 }
2454 // Prepare arguments and build a call to __kmpc_single
2455 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2456 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2457 CGM.getModule(), OMPRTL___kmpc_single),
2458 Args,
2459 OMPBuilder.getOrCreateRuntimeFunction(
2460 CGM.getModule(), OMPRTL___kmpc_end_single),
2461 Args,
2462 /*Conditional=*/true);
2463 SingleOpGen.setAction(Action);
2464 emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
2465 if (DidIt.isValid()) {
2466 // did_it = 1;
2467 CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
2468 }
2469 Action.Done(CGF);
2470 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2471 // <copy_func>, did_it);
2472 if (DidIt.isValid()) {
2473 llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
2474 QualType CopyprivateArrayTy = C.getConstantArrayType(
2475 C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
2476 /*IndexTypeQuals=*/0);
2477 // Create a list of all private variables for copyprivate.
2478 Address CopyprivateList =
2479 CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
2480 for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
2481 Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
2482 CGF.Builder.CreateStore(
2483 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2484 CGF.EmitLValue(CopyprivateVars[I]).getPointer(CGF),
2485 CGF.VoidPtrTy),
2486 Elem);
2487 }
2488 // Build function that copies private values from single region to all other
2489 // threads in the corresponding parallel region.
2490 llvm::Value *CpyFn = emitCopyprivateCopyFunction(
2491 CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
2492 CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
2493 llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
2494 Address CL =
2495 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
2496 CGF.VoidPtrTy);
2497 llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
2498 llvm::Value *Args[] = {
2499 emitUpdateLocation(CGF, Loc), // ident_t *<loc>
2500 getThreadID(CGF, Loc), // i32 <gtid>
2501 BufSize, // size_t <buf_size>
2502 CL.getPointer(), // void *<copyprivate list>
2503 CpyFn, // void (*) (void *, void *) <copy_func>
2504 DidItVal // i32 did_it
2505 };
2506 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2507 CGM.getModule(), OMPRTL___kmpc_copyprivate),
2508 Args);
2509 }
2510}
2511
2512void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
2513 const RegionCodeGenTy &OrderedOpGen,
2514 SourceLocation Loc, bool IsThreads) {
2515 if (!CGF.HaveInsertPoint())
2516 return;
2517 // __kmpc_ordered(ident_t *, gtid);
2518 // OrderedOpGen();
2519 // __kmpc_end_ordered(ident_t *, gtid);
2520 // Prepare arguments and build a call to __kmpc_ordered
2521 if (IsThreads) {
2522 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2523 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2524 CGM.getModule(), OMPRTL___kmpc_ordered),
2525 Args,
2526 OMPBuilder.getOrCreateRuntimeFunction(
2527 CGM.getModule(), OMPRTL___kmpc_end_ordered),
2528 Args);
2529 OrderedOpGen.setAction(Action);
2530 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2531 return;
2532 }
2533 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2534}
2535
2536unsigned CGOpenMPRuntime::getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind) {
2537 unsigned Flags;
2538 if (Kind == OMPD_for)
2539 Flags = OMP_IDENT_BARRIER_IMPL_FOR;
2540 else if (Kind == OMPD_sections)
2541 Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
2542 else if (Kind == OMPD_single)
2543 Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
2544 else if (Kind == OMPD_barrier)
2545 Flags = OMP_IDENT_BARRIER_EXPL;
2546 else
2547 Flags = OMP_IDENT_BARRIER_IMPL;
2548 return Flags;
2549}
2550
2551void CGOpenMPRuntime::getDefaultScheduleAndChunk(
2552 CodeGenFunction &CGF, const OMPLoopDirective &S,
2553 OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
2554 // Check if the loop directive is actually a doacross loop directive. In this
2555 // case choose static, 1 schedule.
2556 if (llvm::any_of(
2557 S.getClausesOfKind<OMPOrderedClause>(),
2558 [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
2559 ScheduleKind = OMPC_SCHEDULE_static;
2560 // Chunk size is 1 in this case.
2561 llvm::APInt ChunkSize(32, 1);
2562 ChunkExpr = IntegerLiteral::Create(
2563 CGF.getContext(), ChunkSize,
2564 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
2565 SourceLocation());
2566 }
2567}
2568
2569void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
2570 OpenMPDirectiveKind Kind, bool EmitChecks,
2571 bool ForceSimpleCall) {
2572 // Check if we should use the OMPBuilder
2573 auto *OMPRegionInfo =
2574 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
2575 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2576 CGF.Builder.restoreIP(OMPBuilder.createBarrier(
2577 CGF.Builder, Kind, ForceSimpleCall, EmitChecks));
2578 return;
2579 }
2580
2581 if (!CGF.HaveInsertPoint())
2582 return;
2583 // Build call __kmpc_cancel_barrier(loc, thread_id);
2584 // Build call __kmpc_barrier(loc, thread_id);
2585 unsigned Flags = getDefaultFlagsForBarriers(Kind);
2586 // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
2587 // thread_id);
2588 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2589 getThreadID(CGF, Loc)};
2590 if (OMPRegionInfo) {
2591 if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
2592 llvm::Value *Result = CGF.EmitRuntimeCall(
2593 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2594 OMPRTL___kmpc_cancel_barrier),
2595 Args);
2596 if (EmitChecks) {
2597 // if (__kmpc_cancel_barrier()) {
2598 // exit from construct;
2599 // }
2600 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
2601 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
2602 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
2603 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
2604 CGF.EmitBlock(ExitBB);
2605 // exit from construct;
2606 CodeGenFunction::JumpDest CancelDestination =
2607 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
2608 CGF.EmitBranchThroughCleanup(CancelDestination);
2609 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
2610 }
2611 return;
2612 }
2613 }
2614 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2615 CGM.getModule(), OMPRTL___kmpc_barrier),
2616 Args);
2617}
2618
2619/// Map the OpenMP loop schedule to the runtime enumeration.
2620static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
2621 bool Chunked, bool Ordered) {
2622 switch (ScheduleKind) {
2623 case OMPC_SCHEDULE_static:
2624 return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
2625 : (Ordered ? OMP_ord_static : OMP_sch_static);
2626 case OMPC_SCHEDULE_dynamic:
2627 return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
2628 case OMPC_SCHEDULE_guided:
2629 return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
2630 case OMPC_SCHEDULE_runtime:
2631 return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
2632 case OMPC_SCHEDULE_auto:
2633 return Ordered ? OMP_ord_auto : OMP_sch_auto;
2634 case OMPC_SCHEDULE_unknown:
2635 assert(!Chunked && "chunk was specified but schedule kind not known")((void)0);
2636 return Ordered ? OMP_ord_static : OMP_sch_static;
2637 }
2638 llvm_unreachable("Unexpected runtime schedule")__builtin_unreachable();
2639}
2640
2641/// Map the OpenMP distribute schedule to the runtime enumeration.
2642static OpenMPSchedType
2643getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
2644 // only static is allowed for dist_schedule
2645 return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
2646}
2647
2648bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
2649 bool Chunked) const {
2650 OpenMPSchedType Schedule =
2651 getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
2652 return Schedule == OMP_sch_static;
2653}
2654
2655bool CGOpenMPRuntime::isStaticNonchunked(
2656 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
2657 OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
2658 return Schedule == OMP_dist_sch_static;
2659}
2660
2661bool CGOpenMPRuntime::isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
2662 bool Chunked) const {
2663 OpenMPSchedType Schedule =
2664 getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
2665 return Schedule == OMP_sch_static_chunked;
2666}
2667
2668bool CGOpenMPRuntime::isStaticChunked(
2669 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
2670 OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
2671 return Schedule == OMP_dist_sch_static_chunked;
2672}
2673
2674bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
2675 OpenMPSchedType Schedule =
2676 getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
2677 assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here")((void)0);
2678 return Schedule != OMP_sch_static;
2679}
2680
2681static int addMonoNonMonoModifier(CodeGenModule &CGM, OpenMPSchedType Schedule,
2682 OpenMPScheduleClauseModifier M1,
2683 OpenMPScheduleClauseModifier M2) {
2684 int Modifier = 0;
2685 switch (M1) {
2686 case OMPC_SCHEDULE_MODIFIER_monotonic:
2687 Modifier = OMP_sch_modifier_monotonic;
2688 break;
2689 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
2690 Modifier = OMP_sch_modifier_nonmonotonic;
2691 break;
2692 case OMPC_SCHEDULE_MODIFIER_simd:
2693 if (Schedule == OMP_sch_static_chunked)
2694 Schedule = OMP_sch_static_balanced_chunked;
2695 break;
2696 case OMPC_SCHEDULE_MODIFIER_last:
2697 case OMPC_SCHEDULE_MODIFIER_unknown:
2698 break;
2699 }
2700 switch (M2) {
2701 case OMPC_SCHEDULE_MODIFIER_monotonic:
2702 Modifier = OMP_sch_modifier_monotonic;
2703 break;
2704 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
2705 Modifier = OMP_sch_modifier_nonmonotonic;
2706 break;
2707 case OMPC_SCHEDULE_MODIFIER_simd:
2708 if (Schedule == OMP_sch_static_chunked)
2709 Schedule = OMP_sch_static_balanced_chunked;
2710 break;
2711 case OMPC_SCHEDULE_MODIFIER_last:
2712 case OMPC_SCHEDULE_MODIFIER_unknown:
2713 break;
2714 }
2715 // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Desription.
2716 // If the static schedule kind is specified or if the ordered clause is
2717 // specified, and if the nonmonotonic modifier is not specified, the effect is
2718 // as if the monotonic modifier is specified. Otherwise, unless the monotonic
2719 // modifier is specified, the effect is as if the nonmonotonic modifier is
2720 // specified.
2721 if (CGM.getLangOpts().OpenMP >= 50 && Modifier == 0) {
2722 if (!(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static ||
2723 Schedule == OMP_sch_static_balanced_chunked ||
2724 Schedule == OMP_ord_static_chunked || Schedule == OMP_ord_static ||
2725 Schedule == OMP_dist_sch_static_chunked ||
2726 Schedule == OMP_dist_sch_static))
2727 Modifier = OMP_sch_modifier_nonmonotonic;
2728 }
2729 return Schedule | Modifier;
2730}
2731
2732void CGOpenMPRuntime::emitForDispatchInit(
2733 CodeGenFunction &CGF, SourceLocation Loc,
2734 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
2735 bool Ordered, const DispatchRTInput &DispatchValues) {
2736 if (!CGF.HaveInsertPoint())
2737 return;
2738 OpenMPSchedType Schedule = getRuntimeSchedule(
2739 ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
2740 assert(Ordered ||((void)0)
2741 (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&((void)0)
2742 Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&((void)0)
2743 Schedule != OMP_sch_static_balanced_chunked))((void)0);
2744 // Call __kmpc_dispatch_init(
2745 // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
2746 // kmp_int[32|64] lower, kmp_int[32|64] upper,
2747 // kmp_int[32|64] stride, kmp_int[32|64] chunk);
2748
2749 // If the Chunk was not specified in the clause - use default value 1.
2750 llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
2751 : CGF.Builder.getIntN(IVSize, 1);
2752 llvm::Value *Args[] = {
2753 emitUpdateLocation(CGF, Loc),
2754 getThreadID(CGF, Loc),
2755 CGF.Builder.getInt32(addMonoNonMonoModifier(
2756 CGM, Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
2757 DispatchValues.LB, // Lower
2758 DispatchValues.UB, // Upper
2759 CGF.Builder.getIntN(IVSize, 1), // Stride
2760 Chunk // Chunk
2761 };
2762 CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
2763}
2764
2765static void emitForStaticInitCall(
2766 CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
2767 llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
2768 OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
2769 const CGOpenMPRuntime::StaticRTInput &Values) {
2770 if (!CGF.HaveInsertPoint())
2771 return;
2772
2773 assert(!Values.Ordered)((void)0);
2774 assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||((void)0)
2775 Schedule == OMP_sch_static_balanced_chunked ||((void)0)
2776 Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||((void)0)
2777 Schedule == OMP_dist_sch_static ||((void)0)
2778 Schedule == OMP_dist_sch_static_chunked)((void)0);
2779
2780 // Call __kmpc_for_static_init(
2781 // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
2782 // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
2783 // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
2784 // kmp_int[32|64] incr, kmp_int[32|64] chunk);
2785 llvm::Value *Chunk = Values.Chunk;
2786 if (Chunk == nullptr) {
2787 assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||((void)0)
2788 Schedule == OMP_dist_sch_static) &&((void)0)
2789 "expected static non-chunked schedule")((void)0);
2790 // If the Chunk was not specified in the clause - use default value 1.
2791 Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
2792 } else {
2793 assert((Schedule == OMP_sch_static_chunked ||((void)0)
2794 Schedule == OMP_sch_static_balanced_chunked ||((void)0)
2795 Schedule == OMP_ord_static_chunked ||((void)0)
2796 Schedule == OMP_dist_sch_static_chunked) &&((void)0)
2797 "expected static chunked schedule")((void)0);
2798 }
2799 llvm::Value *Args[] = {
2800 UpdateLocation,
2801 ThreadId,
2802 CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1,
2803 M2)), // Schedule type
2804 Values.IL.getPointer(), // &isLastIter
2805 Values.LB.getPointer(), // &LB
2806 Values.UB.getPointer(), // &UB
2807 Values.ST.getPointer(), // &Stride
2808 CGF.Builder.getIntN(Values.IVSize, 1), // Incr
2809 Chunk // Chunk
2810 };
2811 CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
2812}
2813
2814void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
2815 SourceLocation Loc,
2816 OpenMPDirectiveKind DKind,
2817 const OpenMPScheduleTy &ScheduleKind,
2818 const StaticRTInput &Values) {
2819 OpenMPSchedType ScheduleNum = getRuntimeSchedule(
2820 ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
2821 assert(isOpenMPWorksharingDirective(DKind) &&((void)0)
2822 "Expected loop-based or sections-based directive.")((void)0);
2823 llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
2824 isOpenMPLoopDirective(DKind)
2825 ? OMP_IDENT_WORK_LOOP
2826 : OMP_IDENT_WORK_SECTIONS);
2827 llvm::Value *ThreadId = getThreadID(CGF, Loc);
2828 llvm::FunctionCallee StaticInitFunction =
2829 createForStaticInitFunction(Values.IVSize, Values.IVSigned);
2830 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
2831 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
2832 ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
2833}
2834
2835void CGOpenMPRuntime::emitDistributeStaticInit(
2836 CodeGenFunction &CGF, SourceLocation Loc,
2837 OpenMPDistScheduleClauseKind SchedKind,
2838 const CGOpenMPRuntime::StaticRTInput &Values) {
2839 OpenMPSchedType ScheduleNum =
2840 getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
2841 llvm::Value *UpdatedLocation =
2842 emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
2843 llvm::Value *ThreadId = getThreadID(CGF, Loc);
2844 llvm::FunctionCallee StaticInitFunction =
2845 createForStaticInitFunction(Values.IVSize, Values.IVSigned);
2846 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
2847 ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
2848 OMPC_SCHEDULE_MODIFIER_unknown, Values);
2849}
2850
2851void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
2852 SourceLocation Loc,
2853 OpenMPDirectiveKind DKind) {
2854 if (!CGF.HaveInsertPoint())
2855 return;
2856 // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
2857 llvm::Value *Args[] = {
2858 emitUpdateLocation(CGF, Loc,
2859 isOpenMPDistributeDirective(DKind)
2860 ? OMP_IDENT_WORK_DISTRIBUTE
2861 : isOpenMPLoopDirective(DKind)
2862 ? OMP_IDENT_WORK_LOOP
2863 : OMP_IDENT_WORK_SECTIONS),
2864 getThreadID(CGF, Loc)};
2865 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
2866 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2867 CGM.getModule(), OMPRTL___kmpc_for_static_fini),
2868 Args);
2869}
2870
2871void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
2872 SourceLocation Loc,
2873 unsigned IVSize,
2874 bool IVSigned) {
2875 if (!CGF.HaveInsertPoint())
2876 return;
2877 // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
2878 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2879 CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
2880}
2881
2882llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
2883 SourceLocation Loc, unsigned IVSize,
2884 bool IVSigned, Address IL,
2885 Address LB, Address UB,
2886 Address ST) {
2887 // Call __kmpc_dispatch_next(
2888 // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
2889 // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
2890 // kmp_int[32|64] *p_stride);
2891 llvm::Value *Args[] = {
2892 emitUpdateLocation(CGF, Loc),
2893 getThreadID(CGF, Loc),
2894 IL.getPointer(), // &isLastIter
2895 LB.getPointer(), // &Lower
2896 UB.getPointer(), // &Upper
2897 ST.getPointer() // &Stride
2898 };
2899 llvm::Value *Call =
2900 CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
2901 return CGF.EmitScalarConversion(
2902 Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
2903 CGF.getContext().BoolTy, Loc);
2904}
2905
2906void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
2907 llvm::Value *NumThreads,
2908 SourceLocation Loc) {
2909 if (!CGF.HaveInsertPoint())
2910 return;
2911 // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
2912 llvm::Value *Args[] = {
2913 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2914 CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
2915 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2916 CGM.getModule(), OMPRTL___kmpc_push_num_threads),
2917 Args);
2918}
2919
2920void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
2921 ProcBindKind ProcBind,
2922 SourceLocation Loc) {
2923 if (!CGF.HaveInsertPoint())
2924 return;
2925 assert(ProcBind != OMP_PROC_BIND_unknown && "Unsupported proc_bind value.")((void)0);
2926 // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
2927 llvm::Value *Args[] = {
2928 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2929 llvm::ConstantInt::get(CGM.IntTy, unsigned(ProcBind), /*isSigned=*/true)};
2930 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2931 CGM.getModule(), OMPRTL___kmpc_push_proc_bind),
2932 Args);
2933}
2934
2935void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
2936 SourceLocation Loc, llvm::AtomicOrdering AO) {
2937 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2938 OMPBuilder.createFlush(CGF.Builder);
2939 } else {
2940 if (!CGF.HaveInsertPoint())
2941 return;
2942 // Build call void __kmpc_flush(ident_t *loc)
2943 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2944 CGM.getModule(), OMPRTL___kmpc_flush),
2945 emitUpdateLocation(CGF, Loc));
2946 }
2947}
2948
2949namespace {
2950/// Indexes of fields for type kmp_task_t.
2951enum KmpTaskTFields {
2952 /// List of shared variables.
2953 KmpTaskTShareds,
2954 /// Task routine.
2955 KmpTaskTRoutine,
2956 /// Partition id for the untied tasks.
2957 KmpTaskTPartId,
2958 /// Function with call of destructors for private variables.
2959 Data1,
2960 /// Task priority.
2961 Data2,
2962 /// (Taskloops only) Lower bound.
2963 KmpTaskTLowerBound,
2964 /// (Taskloops only) Upper bound.
2965 KmpTaskTUpperBound,
2966 /// (Taskloops only) Stride.
2967 KmpTaskTStride,
2968 /// (Taskloops only) Is last iteration flag.
2969 KmpTaskTLastIter,
2970 /// (Taskloops only) Reduction data.
2971 KmpTaskTReductions,
2972};
2973} // anonymous namespace
2974
2975bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
2976 return OffloadEntriesTargetRegion.empty() &&
2977 OffloadEntriesDeviceGlobalVar.empty();
2978}
2979
2980/// Initialize target region entry.
2981void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
2982 initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
2983 StringRef ParentName, unsigned LineNum,
2984 unsigned Order) {
2985 assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "((void)0)
2986 "only required for the device "((void)0)
2987 "code generation.")((void)0);
2988 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
2989 OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
2990 OMPTargetRegionEntryTargetRegion);
2991 ++OffloadingEntriesNum;
2992}
2993
2994void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
2995 registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
2996 StringRef ParentName, unsigned LineNum,
2997 llvm::Constant *Addr, llvm::Constant *ID,
2998 OMPTargetRegionEntryKind Flags) {
2999 // If we are emitting code for a target, the entry is already initialized,
3000 // only has to be registered.
3001 if (CGM.getLangOpts().OpenMPIsDevice) {
3002 // This could happen if the device compilation is invoked standalone.
3003 if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum))
3004 return;
3005 auto &Entry =
3006 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3007 Entry.setAddress(Addr);
3008 Entry.setID(ID);
3009 Entry.setFlags(Flags);
3010 } else {
3011 if (Flags ==
3012 OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion &&
3013 hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
3014 /*IgnoreAddressId*/ true))
3015 return;
3016 assert(!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) &&((void)0)
3017 "Target region entry already registered!")((void)0);
3018 OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
3019 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3020 ++OffloadingEntriesNum;
3021 }
3022}
3023
3024bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3025 unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum,
3026 bool IgnoreAddressId) const {
3027 auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3028 if (PerDevice == OffloadEntriesTargetRegion.end())
3029 return false;
3030 auto PerFile = PerDevice->second.find(FileID);
3031 if (PerFile == PerDevice->second.end())
3032 return false;
3033 auto PerParentName = PerFile->second.find(ParentName);
3034 if (PerParentName == PerFile->second.end())
3035 return false;
3036 auto PerLine = PerParentName->second.find(LineNum);
3037 if (PerLine == PerParentName->second.end())
3038 return false;
3039 // Fail if this entry is already registered.
3040 if (!IgnoreAddressId &&
3041 (PerLine->second.getAddress() || PerLine->second.getID()))
3042 return false;
3043 return true;
3044}
3045
3046void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3047 const OffloadTargetRegionEntryInfoActTy &Action) {
3048 // Scan all target region entries and perform the provided action.
3049 for (const auto &D : OffloadEntriesTargetRegion)
3050 for (const auto &F : D.second)
3051 for (const auto &P : F.second)
3052 for (const auto &L : P.second)
3053 Action(D.first, F.first, P.first(), L.first, L.second);
3054}
3055
3056void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3057 initializeDeviceGlobalVarEntryInfo(StringRef Name,
3058 OMPTargetGlobalVarEntryKind Flags,
3059 unsigned Order) {
3060 assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "((void)0)
3061 "only required for the device "((void)0)
3062 "code generation.")((void)0);
3063 OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
3064 ++OffloadingEntriesNum;
3065}
3066
3067void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3068 registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
3069 CharUnits VarSize,
3070 OMPTargetGlobalVarEntryKind Flags,
3071 llvm::GlobalValue::LinkageTypes Linkage) {
3072 if (CGM.getLangOpts().OpenMPIsDevice) {
3073 // This could happen if the device compilation is invoked standalone.
3074 if (!hasDeviceGlobalVarEntryInfo(VarName))
3075 return;
3076 auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3077 if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
3078 if (Entry.getVarSize().isZero()) {
3079 Entry.setVarSize(VarSize);
3080 Entry.setLinkage(Linkage);
3081 }
3082 return;
3083 }
3084 Entry.setVarSize(VarSize);
3085 Entry.setLinkage(Linkage);
3086 Entry.setAddress(Addr);
3087 } else {
3088 if (hasDeviceGlobalVarEntryInfo(VarName)) {
3089 auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3090 assert(Entry.isValid() && Entry.getFlags() == Flags &&((void)0)
3091 "Entry not initialized!")((void)0);
3092 if (Entry.getVarSize().isZero()) {
3093 Entry.setVarSize(VarSize);
3094 Entry.setLinkage(Linkage);
3095 }
3096 return;
3097 }
3098 OffloadEntriesDeviceGlobalVar.try_emplace(
3099 VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
3100 ++OffloadingEntriesNum;
3101 }
3102}
3103
3104void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3105 actOnDeviceGlobalVarEntriesInfo(
3106 const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
3107 // Scan all target region entries and perform the provided action.
3108 for (const auto &E : OffloadEntriesDeviceGlobalVar)
3109 Action(E.getKey(), E.getValue());
3110}
3111
3112void CGOpenMPRuntime::createOffloadEntry(
3113 llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
3114 llvm::GlobalValue::LinkageTypes Linkage) {
3115 StringRef Name = Addr->getName();
3116 llvm::Module &M = CGM.getModule();
3117 llvm::LLVMContext &C = M.getContext();
3118
3119 // Create constant string with the name.
3120 llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
3121
3122 std::string StringName = getName({"omp_offloading", "entry_name"});
3123 auto *Str = new llvm::GlobalVariable(
3124 M, StrPtrInit->getType(), /*isConstant=*/true,
3125 llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
3126 Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3127
3128 llvm::Constant *Data[] = {
3129 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(ID, CGM.VoidPtrTy),
3130 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(Str, CGM.Int8PtrTy),
3131 llvm::ConstantInt::get(CGM.SizeTy, Size),
3132 llvm::ConstantInt::get(CGM.Int32Ty, Flags),
3133 llvm::ConstantInt::get(CGM.Int32Ty, 0)};
3134 std::string EntryName = getName({"omp_offloading", "entry", ""});
3135 llvm::GlobalVariable *Entry = createGlobalStruct(
3136 CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
3137 Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
3138
3139 // The entry has to be created in the section the linker expects it to be.
3140 Entry->setSection("omp_offloading_entries");
3141}
3142
3143void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
3144 // Emit the offloading entries and metadata so that the device codegen side
3145 // can easily figure out what to emit. The produced metadata looks like
3146 // this:
3147 //
3148 // !omp_offload.info = !{!1, ...}
3149 //
3150 // Right now we only generate metadata for function that contain target
3151 // regions.
3152
3153 // If we are in simd mode or there are no entries, we don't need to do
3154 // anything.
3155 if (CGM.getLangOpts().OpenMPSimd || OffloadEntriesInfoManager.empty())
3156 return;
3157
3158 llvm::Module &M = CGM.getModule();
3159 llvm::LLVMContext &C = M.getContext();
3160 SmallVector<std::tuple<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *,
3161 SourceLocation, StringRef>,
3162 16>
3163 OrderedEntries(OffloadEntriesInfoManager.size());
3164 llvm::SmallVector<StringRef, 16> ParentFunctions(
3165 OffloadEntriesInfoManager.size());
3166
3167 // Auxiliary methods to create metadata values and strings.
3168 auto &&GetMDInt = [this](unsigned V) {
3169 return llvm::ConstantAsMetadata::get(
3170 llvm::ConstantInt::get(CGM.Int32Ty, V));
3171 };
3172
3173 auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
3174
3175 // Create the offloading info metadata node.
3176 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
3177
3178 // Create function that emits metadata for each target region entry;
3179 auto &&TargetRegionMetadataEmitter =
3180 [this, &C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt,
3181 &GetMDString](
3182 unsigned DeviceID, unsigned FileID, StringRef ParentName,
3183 unsigned Line,
3184 const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
3185 // Generate metadata for target regions. Each entry of this metadata
3186 // contains:
3187 // - Entry 0 -> Kind of this type of metadata (0).
3188 // - Entry 1 -> Device ID of the file where the entry was identified.
3189 // - Entry 2 -> File ID of the file where the entry was identified.
3190 // - Entry 3 -> Mangled name of the function where the entry was
3191 // identified.
3192 // - Entry 4 -> Line in the file where the entry was identified.
3193 // - Entry 5 -> Order the entry was created.
3194 // The first element of the metadata node is the kind.
3195 llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
3196 GetMDInt(FileID), GetMDString(ParentName),
3197 GetMDInt(Line), GetMDInt(E.getOrder())};
3198
3199 SourceLocation Loc;
3200 for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(),
3201 E = CGM.getContext().getSourceManager().fileinfo_end();
3202 I != E; ++I) {
3203 if (I->getFirst()->getUniqueID().getDevice() == DeviceID &&
3204 I->getFirst()->getUniqueID().getFile() == FileID) {
3205 Loc = CGM.getContext().getSourceManager().translateFileLineCol(
3206 I->getFirst(), Line, 1);
3207 break;
3208 }
3209 }
3210 // Save this entry in the right position of the ordered entries array.
3211 OrderedEntries[E.getOrder()] = std::make_tuple(&E, Loc, ParentName);
3212 ParentFunctions[E.getOrder()] = ParentName;
3213
3214 // Add metadata to the named metadata node.
3215 MD->addOperand(llvm::MDNode::get(C, Ops));
3216 };
3217
3218 OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
3219 TargetRegionMetadataEmitter);
3220
3221 // Create function that emits metadata for each device global variable entry;
3222 auto &&DeviceGlobalVarMetadataEmitter =
3223 [&C, &OrderedEntries, &GetMDInt, &GetMDString,
3224 MD](StringRef MangledName,
3225 const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
3226 &E) {
3227 // Generate metadata for global variables. Each entry of this metadata
3228 // contains:
3229 // - Entry 0 -> Kind of this type of metadata (1).
3230 // - Entry 1 -> Mangled name of the variable.
3231 // - Entry 2 -> Declare target kind.
3232 // - Entry 3 -> Order the entry was created.
3233 // The first element of the metadata node is the kind.
3234 llvm::Metadata *Ops[] = {
3235 GetMDInt(E.getKind()), GetMDString(MangledName),
3236 GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
3237
3238 // Save this entry in the right position of the ordered entries array.
3239 OrderedEntries[E.getOrder()] =
3240 std::make_tuple(&E, SourceLocation(), MangledName);
3241
3242 // Add metadata to the named metadata node.
3243 MD->addOperand(llvm::MDNode::get(C, Ops));
3244 };
3245
3246 OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
3247 DeviceGlobalVarMetadataEmitter);
3248
3249 for (const auto &E : OrderedEntries) {
3250 assert(std::get<0>(E) && "All ordered entries must exist!")((void)0);
3251 if (const auto *CE =
3252 dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
3253 std::get<0>(E))) {
3254 if (!CE->getID() || !CE->getAddress()) {
3255 // Do not blame the entry if the parent funtion is not emitted.
3256 StringRef FnName = ParentFunctions[CE->getOrder()];
3257 if (!CGM.GetGlobalValue(FnName))
3258 continue;
3259 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3260 DiagnosticsEngine::Error,
3261 "Offloading entry for target region in %0 is incorrect: either the "
3262 "address or the ID is invalid.");
3263 CGM.getDiags().Report(std::get<1>(E), DiagID) << FnName;
3264 continue;
3265 }
3266 createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
3267 CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
3268 } else if (const auto *CE = dyn_cast<OffloadEntriesInfoManagerTy::
3269 OffloadEntryInfoDeviceGlobalVar>(
3270 std::get<0>(E))) {
3271 OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
3272 static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
3273 CE->getFlags());
3274 switch (Flags) {
3275 case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
3276 if (CGM.getLangOpts().OpenMPIsDevice &&
3277 CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())
3278 continue;
3279 if (!CE->getAddress()) {
3280 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3281 DiagnosticsEngine::Error, "Offloading entry for declare target "
3282 "variable %0 is incorrect: the "
3283 "address is invalid.");
3284 CGM.getDiags().Report(std::get<1>(E), DiagID) << std::get<2>(E);
3285 continue;
3286 }
3287 // The vaiable has no definition - no need to add the entry.
3288 if (CE->getVarSize().isZero())
3289 continue;
3290 break;
3291 }
3292 case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
3293 assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||((void)0)
3294 (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&((void)0)
3295 "Declaret target link address is set.")((void)0);
3296 if (CGM.getLangOpts().OpenMPIsDevice)
3297 continue;
3298 if (!CE->getAddress()) {
3299 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3300 DiagnosticsEngine::Error,
3301 "Offloading entry for declare target variable is incorrect: the "
3302 "address is invalid.");
3303 CGM.getDiags().Report(DiagID);
3304 continue;
3305 }
3306 break;
3307 }
3308 createOffloadEntry(CE->getAddress(), CE->getAddress(),
3309 CE->getVarSize().getQuantity(), Flags,
3310 CE->getLinkage());
3311 } else {
3312 llvm_unreachable("Unsupported entry kind.")__builtin_unreachable();
3313 }
3314 }
3315}
3316
3317/// Loads all the offload entries information from the host IR
3318/// metadata.
3319void CGOpenMPRuntime::loadOffloadInfoMetadata() {
3320 // If we are in target mode, load the metadata from the host IR. This code has
3321 // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
3322
3323 if (!CGM.getLangOpts().OpenMPIsDevice)
3324 return;
3325
3326 if (CGM.getLangOpts().OMPHostIRFile.empty())
3327 return;
3328
3329 auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
3330 if (auto EC = Buf.getError()) {
3331 CGM.getDiags().Report(diag::err_cannot_open_file)
3332 << CGM.getLangOpts().OMPHostIRFile << EC.message();
3333 return;
3334 }
3335
3336 llvm::LLVMContext C;
3337 auto ME = expectedToErrorOrAndEmitErrors(
3338 C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
3339
3340 if (auto EC = ME.getError()) {
3341 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3342 DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
3343 CGM.getDiags().Report(DiagID)
3344 << CGM.getLangOpts().OMPHostIRFile << EC.message();
3345 return;
3346 }
3347
3348 llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
3349 if (!MD)
3350 return;
3351
3352 for (llvm::MDNode *MN : MD->operands()) {
3353 auto &&GetMDInt = [MN](unsigned Idx) {
3354 auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
3355 return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
3356 };
3357
3358 auto &&GetMDString = [MN](unsigned Idx) {
3359 auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
3360 return V->getString();
3361 };
3362
3363 switch (GetMDInt(0)) {
3364 default:
3365 llvm_unreachable("Unexpected metadata!")__builtin_unreachable();
3366 break;
3367 case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
3368 OffloadingEntryInfoTargetRegion:
3369 OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
3370 /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
3371 /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
3372 /*Order=*/GetMDInt(5));
3373 break;
3374 case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
3375 OffloadingEntryInfoDeviceGlobalVar:
3376 OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
3377 /*MangledName=*/GetMDString(1),
3378 static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
3379 /*Flags=*/GetMDInt(2)),
3380 /*Order=*/GetMDInt(3));
3381 break;
3382 }
3383 }
3384}
3385
3386void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
3387 if (!KmpRoutineEntryPtrTy) {
3388 // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
3389 ASTContext &C = CGM.getContext();
3390 QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
3391 FunctionProtoType::ExtProtoInfo EPI;
3392 KmpRoutineEntryPtrQTy = C.getPointerType(
3393 C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
3394 KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
3395 }
3396}
3397
3398QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
3399 // Make sure the type of the entry is already created. This is the type we
3400 // have to create:
3401 // struct __tgt_offload_entry{
3402 // void *addr; // Pointer to the offload entry info.
3403 // // (function or global)
3404 // char *name; // Name of the function or global.
3405 // size_t size; // Size of the entry info (0 if it a function).
3406 // int32_t flags; // Flags associated with the entry, e.g. 'link'.
3407 // int32_t reserved; // Reserved, to use by the runtime library.
3408 // };
3409 if (TgtOffloadEntryQTy.isNull()) {
3410 ASTContext &C = CGM.getContext();
3411 RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
3412 RD->startDefinition();
3413 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3414 addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
3415 addFieldToRecordDecl(C, RD, C.getSizeType());
3416 addFieldToRecordDecl(
3417 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3418 addFieldToRecordDecl(
3419 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3420 RD->completeDefinition();
3421 RD->addAttr(PackedAttr::CreateImplicit(C));
3422 TgtOffloadEntryQTy = C.getRecordType(RD);
3423 }
3424 return TgtOffloadEntryQTy;
3425}
3426
3427namespace {
3428struct PrivateHelpersTy {
3429 PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
3430 const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit)
3431 : OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
3432 PrivateElemInit(PrivateElemInit) {}
3433 PrivateHelpersTy(const VarDecl *Original) : Original(Original) {}
3434 const Expr *OriginalRef = nullptr;
3435 const VarDecl *Original = nullptr;
3436 const VarDecl *PrivateCopy = nullptr;
3437 const VarDecl *PrivateElemInit = nullptr;
3438 bool isLocalPrivate() const {
3439 return !OriginalRef && !PrivateCopy && !PrivateElemInit;
3440 }
3441};
3442typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
3443} // anonymous namespace
3444
3445static bool isAllocatableDecl(const VarDecl *VD) {
3446 const VarDecl *CVD = VD->getCanonicalDecl();
3447 if (!CVD->hasAttr<OMPAllocateDeclAttr>())
3448 return false;
3449 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
3450 // Use the default allocation.
3451 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
3452 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
3453 !AA->getAllocator());
3454}
3455
3456static RecordDecl *
3457createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
3458 if (!Privates.empty()) {
3459 ASTContext &C = CGM.getContext();
3460 // Build struct .kmp_privates_t. {
3461 // /* private vars */
3462 // };
3463 RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
3464 RD->startDefinition();
3465 for (const auto &Pair : Privates) {
3466 const VarDecl *VD = Pair.second.Original;
3467 QualType Type = VD->getType().getNonReferenceType();
3468 // If the private variable is a local variable with lvalue ref type,
3469 // allocate the pointer instead of the pointee type.
3470 if (Pair.second.isLocalPrivate()) {
3471 if (VD->getType()->isLValueReferenceType())
3472 Type = C.getPointerType(Type);
3473 if (isAllocatableDecl(VD))
3474 Type = C.getPointerType(Type);
3475 }
3476 FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
3477 if (VD->hasAttrs()) {
3478 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
3479 E(VD->getAttrs().end());
3480 I != E; ++I)
3481 FD->addAttr(*I);
3482 }
3483 }
3484 RD->completeDefinition();
3485 return RD;
3486 }
3487 return nullptr;
3488}
3489
3490static RecordDecl *
3491createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
3492 QualType KmpInt32Ty,
3493 QualType KmpRoutineEntryPointerQTy) {
3494 ASTContext &C = CGM.getContext();
3495 // Build struct kmp_task_t {
3496 // void * shareds;
3497 // kmp_routine_entry_t routine;
3498 // kmp_int32 part_id;
3499 // kmp_cmplrdata_t data1;
3500 // kmp_cmplrdata_t data2;
3501 // For taskloops additional fields:
3502 // kmp_uint64 lb;
3503 // kmp_uint64 ub;
3504 // kmp_int64 st;
3505 // kmp_int32 liter;
3506 // void * reductions;
3507 // };
3508 RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
3509 UD->startDefinition();
3510 addFieldToRecordDecl(C, UD, KmpInt32Ty);
3511 addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
3512 UD->completeDefinition();
3513 QualType KmpCmplrdataTy = C.getRecordType(UD);
3514 RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
3515 RD->startDefinition();
3516 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3517 addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
3518 addFieldToRecordDecl(C, RD, KmpInt32Ty);
3519 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3520 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3521 if (isOpenMPTaskLoopDirective(Kind)) {
3522 QualType KmpUInt64Ty =
3523 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
3524 QualType KmpInt64Ty =
3525 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
3526 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3527 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3528 addFieldToRecordDecl(C, RD, KmpInt64Ty);
3529 addFieldToRecordDecl(C, RD, KmpInt32Ty);
3530 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3531 }
3532 RD->completeDefinition();
3533 return RD;
3534}
3535
3536static RecordDecl *
3537createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
3538 ArrayRef<PrivateDataTy> Privates) {
3539 ASTContext &C = CGM.getContext();
3540 // Build struct kmp_task_t_with_privates {
3541 // kmp_task_t task_data;
3542 // .kmp_privates_t. privates;
3543 // };
3544 RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
3545 RD->startDefinition();
3546 addFieldToRecordDecl(C, RD, KmpTaskTQTy);
3547 if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
3548 addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
3549 RD->completeDefinition();
3550 return RD;
3551}
3552
3553/// Emit a proxy function which accepts kmp_task_t as the second
3554/// argument.
3555/// \code
3556/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
3557/// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
3558/// For taskloops:
3559/// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3560/// tt->reductions, tt->shareds);
3561/// return 0;
3562/// }
3563/// \endcode
3564static llvm::Function *
3565emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
3566 OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
3567 QualType KmpTaskTWithPrivatesPtrQTy,
3568 QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
3569 QualType SharedsPtrTy, llvm::Function *TaskFunction,
3570 llvm::Value *TaskPrivatesMap) {
3571 ASTContext &C = CGM.getContext();
3572 FunctionArgList Args;
3573 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
3574 ImplicitParamDecl::Other);
3575 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3576 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
3577 ImplicitParamDecl::Other);
3578 Args.push_back(&GtidArg);
3579 Args.push_back(&TaskTypeArg);
3580 const auto &TaskEntryFnInfo =
3581 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
3582 llvm::FunctionType *TaskEntryTy =
3583 CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
3584 std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
3585 auto *TaskEntry = llvm::Function::Create(
3586 TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
3587 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
3588 TaskEntry->setDoesNotRecurse();
3589 CodeGenFunction CGF(CGM);
3590 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
3591 Loc, Loc);
3592
3593 // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
3594 // tt,
3595 // For taskloops:
3596 // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3597 // tt->task_data.shareds);
3598 llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
3599 CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
3600 LValue TDBase = CGF.EmitLoadOfPointerLValue(
3601 CGF.GetAddrOfLocalVar(&TaskTypeArg),
3602 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3603 const auto *KmpTaskTWithPrivatesQTyRD =
3604 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3605 LValue Base =
3606 CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3607 const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
3608 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
3609 LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
3610 llvm::Value *PartidParam = PartIdLVal.getPointer(CGF);
3611
3612 auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
3613 LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
3614 llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3615 CGF.EmitLoadOfScalar(SharedsLVal, Loc),
3616 CGF.ConvertTypeForMem(SharedsPtrTy));
3617
3618 auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
3619 llvm::Value *PrivatesParam;
3620 if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
3621 LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
3622 PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3623 PrivatesLVal.getPointer(CGF), CGF.VoidPtrTy);
3624 } else {
3625 PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
3626 }
3627
3628 llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
3629 TaskPrivatesMap,
3630 CGF.Builder
3631 .CreatePointerBitCastOrAddrSpaceCast(
3632 TDBase.getAddress(CGF), CGF.VoidPtrTy)
3633 .getPointer()};
3634 SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
3635 std::end(CommonArgs));
3636 if (isOpenMPTaskLoopDirective(Kind)) {
3637 auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
3638 LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
3639 llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
3640 auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
3641 LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
3642 llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
3643 auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
3644 LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
3645 llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
3646 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
3647 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
3648 llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
3649 auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
3650 LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
3651 llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
3652 CallArgs.push_back(LBParam);
3653 CallArgs.push_back(UBParam);
3654 CallArgs.push_back(StParam);
3655 CallArgs.push_back(LIParam);
3656 CallArgs.push_back(RParam);
3657 }
3658 CallArgs.push_back(SharedsParam);
3659
3660 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
3661 CallArgs);
3662 CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
3663 CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
3664 CGF.FinishFunction();
3665 return TaskEntry;
3666}
3667
3668static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
3669 SourceLocation Loc,
3670 QualType KmpInt32Ty,
3671 QualType KmpTaskTWithPrivatesPtrQTy,
3672 QualType KmpTaskTWithPrivatesQTy) {
3673 ASTContext &C = CGM.getContext();
3674 FunctionArgList Args;
3675 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
3676 ImplicitParamDecl::Other);
3677 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3678 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
3679 ImplicitParamDecl::Other);
3680 Args.push_back(&GtidArg);
3681 Args.push_back(&TaskTypeArg);
3682 const auto &DestructorFnInfo =
3683 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
3684 llvm::FunctionType *DestructorFnTy =
3685 CGM.getTypes().GetFunctionType(DestructorFnInfo);
3686 std::string Name =
3687 CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
3688 auto *DestructorFn =
3689 llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
3690 Name, &CGM.getModule());
3691 CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
3692 DestructorFnInfo);
3693 DestructorFn->setDoesNotRecurse();
3694 CodeGenFunction CGF(CGM);
3695 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
3696 Args, Loc, Loc);
3697
3698 LValue Base = CGF.EmitLoadOfPointerLValue(
3699 CGF.GetAddrOfLocalVar(&TaskTypeArg),
3700 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3701 const auto *KmpTaskTWithPrivatesQTyRD =
3702 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3703 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
3704 Base = CGF.EmitLValueForField(Base, *FI);
3705 for (const auto *Field :
3706 cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
3707 if (QualType::DestructionKind DtorKind =
3708 Field->getType().isDestructedType()) {
3709 LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
3710 CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType());
3711 }
3712 }
3713 CGF.FinishFunction();
3714 return DestructorFn;
3715}
3716
3717/// Emit a privates mapping function for correct handling of private and
3718/// firstprivate variables.
3719/// \code
3720/// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
3721/// **noalias priv1,..., <tyn> **noalias privn) {
3722/// *priv1 = &.privates.priv1;
3723/// ...;
3724/// *privn = &.privates.privn;
3725/// }
3726/// \endcode
3727static llvm::Value *
3728emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
3729 const OMPTaskDataTy &Data, QualType PrivatesQTy,
3730 ArrayRef<PrivateDataTy> Privates) {
3731 ASTContext &C = CGM.getContext();
3732 FunctionArgList Args;
3733 ImplicitParamDecl TaskPrivatesArg(
3734 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3735 C.getPointerType(PrivatesQTy).withConst().withRestrict(),
3736 ImplicitParamDecl::Other);
3737 Args.push_back(&TaskPrivatesArg);
3738 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, unsigned> PrivateVarsPos;
3739 unsigned Counter = 1;
3740 for (const Expr *E : Data.PrivateVars) {
3741 Args.push_back(ImplicitParamDecl::Create(
3742 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3743 C.getPointerType(C.getPointerType(E->getType()))
3744 .withConst()
3745 .withRestrict(),
3746 ImplicitParamDecl::Other));
3747 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3748 PrivateVarsPos[VD] = Counter;
3749 ++Counter;
3750 }
3751 for (const Expr *E : Data.FirstprivateVars) {
3752 Args.push_back(ImplicitParamDecl::Create(
3753 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3754 C.getPointerType(C.getPointerType(E->getType()))
3755 .withConst()
3756 .withRestrict(),
3757 ImplicitParamDecl::Other));
3758 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3759 PrivateVarsPos[VD] = Counter;
3760 ++Counter;
3761 }
3762 for (const Expr *E : Data.LastprivateVars) {
3763 Args.push_back(ImplicitParamDecl::Create(
3764 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3765 C.getPointerType(C.getPointerType(E->getType()))
3766 .withConst()
3767 .withRestrict(),
3768 ImplicitParamDecl::Other));
3769 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3770 PrivateVarsPos[VD] = Counter;
3771 ++Counter;
3772 }
3773 for (const VarDecl *VD : Data.PrivateLocals) {
3774 QualType Ty = VD->getType().getNonReferenceType();
3775 if (VD->getType()->isLValueReferenceType())
3776 Ty = C.getPointerType(Ty);
3777 if (isAllocatableDecl(VD))
3778 Ty = C.getPointerType(Ty);
3779 Args.push_back(ImplicitParamDecl::Create(
3780 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3781 C.getPointerType(C.getPointerType(Ty)).withConst().withRestrict(),
3782 ImplicitParamDecl::Other));
3783 PrivateVarsPos[VD] = Counter;
3784 ++Counter;
3785 }
3786 const auto &TaskPrivatesMapFnInfo =
3787 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3788 llvm::FunctionType *TaskPrivatesMapTy =
3789 CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
3790 std::string Name =
3791 CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
3792 auto *TaskPrivatesMap = llvm::Function::Create(
3793 TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
3794 &CGM.getModule());
3795 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
3796 TaskPrivatesMapFnInfo);
3797 if (CGM.getLangOpts().Optimize) {
3798 TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
3799 TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
3800 TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
3801 }
3802 CodeGenFunction CGF(CGM);
3803 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
3804 TaskPrivatesMapFnInfo, Args, Loc, Loc);
3805
3806 // *privi = &.privates.privi;
3807 LValue Base = CGF.EmitLoadOfPointerLValue(
3808 CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
3809 TaskPrivatesArg.getType()->castAs<PointerType>());
3810 const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
3811 Counter = 0;
3812 for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
3813 LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
3814 const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
3815 LValue RefLVal =
3816 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
3817 LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
3818 RefLVal.getAddress(CGF), RefLVal.getType()->castAs<PointerType>());
3819 CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal);
3820 ++Counter;
3821 }
3822 CGF.FinishFunction();
3823 return TaskPrivatesMap;
3824}
3825
3826/// Emit initialization for private variables in task-based directives.
3827static void emitPrivatesInit(CodeGenFunction &CGF,
3828 const OMPExecutableDirective &D,
3829 Address KmpTaskSharedsPtr, LValue TDBase,
3830 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
3831 QualType SharedsTy, QualType SharedsPtrTy,
3832 const OMPTaskDataTy &Data,
3833 ArrayRef<PrivateDataTy> Privates, bool ForDup) {
3834 ASTContext &C = CGF.getContext();
3835 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
3836 LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
3837 OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
3838 ? OMPD_taskloop
3839 : OMPD_task;
3840 const CapturedStmt &CS = *D.getCapturedStmt(Kind);
3841 CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
3842 LValue SrcBase;
3843 bool IsTargetTask =
3844 isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
3845 isOpenMPTargetExecutionDirective(D.getDirectiveKind());
3846 // For target-based directives skip 4 firstprivate arrays BasePointersArray,
3847 // PointersArray, SizesArray, and MappersArray. The original variables for
3848 // these arrays are not captured and we get their addresses explicitly.
3849 if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) ||
3850 (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
3851 SrcBase = CGF.MakeAddrLValue(
3852 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3853 KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
3854 SharedsTy);
3855 }
3856 FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
3857 for (const PrivateDataTy &Pair : Privates) {
3858 // Do not initialize private locals.
3859 if (Pair.second.isLocalPrivate()) {
3860 ++FI;
3861 continue;
3862 }
3863 const VarDecl *VD = Pair.second.PrivateCopy;
3864 const Expr *Init = VD->getAnyInitializer();
3865 if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
3866 !CGF.isTrivialInitializer(Init)))) {
3867 LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
3868 if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
3869 const VarDecl *OriginalVD = Pair.second.Original;
3870 // Check if the variable is the target-based BasePointersArray,
3871 // PointersArray, SizesArray, or MappersArray.
3872 LValue SharedRefLValue;
3873 QualType Type = PrivateLValue.getType();
3874 const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
3875 if (IsTargetTask && !SharedField) {
3876 assert(isa<ImplicitParamDecl>(OriginalVD) &&((void)0)
3877 isa<CapturedDecl>(OriginalVD->getDeclContext()) &&((void)0)
3878 cast<CapturedDecl>(OriginalVD->getDeclContext())((void)0)
3879 ->getNumParams() == 0 &&((void)0)
3880 isa<TranslationUnitDecl>(((void)0)
3881 cast<CapturedDecl>(OriginalVD->getDeclContext())((void)0)
3882 ->getDeclContext()) &&((void)0)
3883 "Expected artificial target data variable.")((void)0);
3884 SharedRefLValue =
3885 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
3886 } else if (ForDup) {
3887 SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
3888 SharedRefLValue = CGF.MakeAddrLValue(
3889 Address(SharedRefLValue.getPointer(CGF),
3890 C.getDeclAlign(OriginalVD)),
3891 SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
3892 SharedRefLValue.getTBAAInfo());
3893 } else if (CGF.LambdaCaptureFields.count(
3894 Pair.second.Original->getCanonicalDecl()) > 0 ||
3895 dyn_cast_or_null<BlockDecl>(CGF.CurCodeDecl)) {
3896 SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
3897 } else {
3898 // Processing for implicitly captured variables.
3899 InlinedOpenMPRegionRAII Region(
3900 CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown,
3901 /*HasCancel=*/false, /*NoInheritance=*/true);
3902 SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
3903 }
3904 if (Type->isArrayType()) {
3905 // Initialize firstprivate array.
3906 if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
3907 // Perform simple memcpy.
3908 CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
3909 } else {
3910 // Initialize firstprivate array using element-by-element
3911 // initialization.
3912 CGF.EmitOMPAggregateAssign(
3913 PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF),
3914 Type,
3915 [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
3916 Address SrcElement) {
3917 // Clean up any temporaries needed by the initialization.
3918 CodeGenFunction::OMPPrivateScope InitScope(CGF);
3919 InitScope.addPrivate(
3920 Elem, [SrcElement]() -> Address { return SrcElement; });
3921 (void)InitScope.Privatize();
3922 // Emit initialization for single element.
3923 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
3924 CGF, &CapturesInfo);
3925 CGF.EmitAnyExprToMem(Init, DestElement,
3926 Init->getType().getQualifiers(),
3927 /*IsInitializer=*/false);
3928 });
3929 }
3930 } else {
3931 CodeGenFunction::OMPPrivateScope InitScope(CGF);
3932 InitScope.addPrivate(Elem, [SharedRefLValue, &CGF]() -> Address {
3933 return SharedRefLValue.getAddress(CGF);
3934 });
3935 (void)InitScope.Privatize();
3936 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
3937 CGF.EmitExprAsInit(Init, VD, PrivateLValue,
3938 /*capturedByInit=*/false);
3939 }
3940 } else {
3941 CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
3942 }
3943 }
3944 ++FI;
3945 }
3946}
3947
3948/// Check if duplication function is required for taskloops.
3949static bool checkInitIsRequired(CodeGenFunction &CGF,
3950 ArrayRef<PrivateDataTy> Privates) {
3951 bool InitRequired = false;
3952 for (const PrivateDataTy &Pair : Privates) {
3953 if (Pair.second.isLocalPrivate())
3954 continue;
3955 const VarDecl *VD = Pair.second.PrivateCopy;
3956 const Expr *Init = VD->getAnyInitializer();
3957 InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
3958 !CGF.isTrivialInitializer(Init));
3959 if (InitRequired)
3960 break;
3961 }
3962 return InitRequired;
3963}
3964
3965
3966/// Emit task_dup function (for initialization of
3967/// private/firstprivate/lastprivate vars and last_iter flag)
3968/// \code
3969/// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
3970/// lastpriv) {
3971/// // setup lastprivate flag
3972/// task_dst->last = lastpriv;
3973/// // could be constructor calls here...
3974/// }
3975/// \endcode
3976static llvm::Value *
3977emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
3978 const OMPExecutableDirective &D,
3979 QualType KmpTaskTWithPrivatesPtrQTy,
3980 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
3981 const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
3982 QualType SharedsPtrTy, const OMPTaskDataTy &Data,
3983 ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
3984 ASTContext &C = CGM.getContext();
3985 FunctionArgList Args;
3986 ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3987 KmpTaskTWithPrivatesPtrQTy,
3988 ImplicitParamDecl::Other);
3989 ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3990 KmpTaskTWithPrivatesPtrQTy,
3991 ImplicitParamDecl::Other);
3992 ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3993 ImplicitParamDecl::Other);
3994 Args.push_back(&DstArg);
3995 Args.push_back(&SrcArg);
3996 Args.push_back(&LastprivArg);
3997 const auto &TaskDupFnInfo =
3998 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3999 llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
4000 std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
4001 auto *TaskDup = llvm::Function::Create(
4002 TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4003 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
4004 TaskDup->setDoesNotRecurse();
4005 CodeGenFunction CGF(CGM);
4006 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
4007 Loc);
4008
4009 LValue TDBase = CGF.EmitLoadOfPointerLValue(
4010 CGF.GetAddrOfLocalVar(&DstArg),
4011 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4012 // task_dst->liter = lastpriv;
4013 if (WithLastIter) {
4014 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4015 LValue Base = CGF.EmitLValueForField(
4016 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4017 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4018 llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
4019 CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
4020 CGF.EmitStoreOfScalar(Lastpriv, LILVal);
4021 }
4022
4023 // Emit initial values for private copies (if any).
4024 assert(!Privates.empty())((void)0);
4025 Address KmpTaskSharedsPtr = Address::invalid();
4026 if (!Data.FirstprivateVars.empty()) {
4027 LValue TDBase = CGF.EmitLoadOfPointerLValue(
4028 CGF.GetAddrOfLocalVar(&SrcArg),
4029 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4030 LValue Base = CGF.EmitLValueForField(
4031 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4032 KmpTaskSharedsPtr = Address(
4033 CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
4034 Base, *std::next(KmpTaskTQTyRD->field_begin(),
4035 KmpTaskTShareds)),
4036 Loc),
4037 CGM.getNaturalTypeAlignment(SharedsTy));
4038 }
4039 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
4040 SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
4041 CGF.FinishFunction();
4042 return TaskDup;
4043}
4044
4045/// Checks if destructor function is required to be generated.
4046/// \return true if cleanups are required, false otherwise.
4047static bool
4048checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4049 ArrayRef<PrivateDataTy> Privates) {
4050 for (const PrivateDataTy &P : Privates) {
4051 if (P.second.isLocalPrivate())
4052 continue;
4053 QualType Ty = P.second.Original->getType().getNonReferenceType();
4054 if (Ty.isDestructedType())
4055 return true;
4056 }
4057 return false;
4058}
4059
4060namespace {
4061/// Loop generator for OpenMP iterator expression.
4062class OMPIteratorGeneratorScope final
4063 : public CodeGenFunction::OMPPrivateScope {
4064 CodeGenFunction &CGF;
4065 const OMPIteratorExpr *E = nullptr;
4066 SmallVector<CodeGenFunction::JumpDest, 4> ContDests;
4067 SmallVector<CodeGenFunction::JumpDest, 4> ExitDests;
4068 OMPIteratorGeneratorScope() = delete;
4069 OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) = delete;
4070
4071public:
4072 OMPIteratorGeneratorScope(CodeGenFunction &CGF, const OMPIteratorExpr *E)
4073 : CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) {
4074 if (!E)
4075 return;
4076 SmallVector<llvm::Value *, 4> Uppers;
4077 for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
4078 Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
4079 const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
4080 addPrivate(VD, [&CGF, VD]() {
4081 return CGF.CreateMemTemp(VD->getType(), VD->getName());
4082 });
4083 const OMPIteratorHelperData &HelperData = E->getHelper(I);
4084 addPrivate(HelperData.CounterVD, [&CGF, &HelperData]() {
4085 return CGF.CreateMemTemp(HelperData.CounterVD->getType(),
4086 "counter.addr");
4087 });
4088 }
4089 Privatize();
4090
4091 for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
4092 const OMPIteratorHelperData &HelperData = E->getHelper(I);
4093 LValue CLVal =
4094 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD),
4095 HelperData.CounterVD->getType());
4096 // Counter = 0;
4097 CGF.EmitStoreOfScalar(
4098 llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
4099 CLVal);
4100 CodeGenFunction::JumpDest &ContDest =
4101 ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
4102 CodeGenFunction::JumpDest &ExitDest =
4103 ExitDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.exit"));
4104 // N = <number-of_iterations>;
4105 llvm::Value *N = Uppers[I];
4106 // cont:
4107 // if (Counter < N) goto body; else goto exit;
4108 CGF.EmitBlock(ContDest.getBlock());
4109 auto *CVal =
4110 CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation());
4111 llvm::Value *Cmp =
4112 HelperData.CounterVD->getType()->isSignedIntegerOrEnumerationType()
4113 ? CGF.Builder.CreateICmpSLT(CVal, N)
4114 : CGF.Builder.CreateICmpULT(CVal, N);
4115 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("iter.body");
4116 CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock());
4117 // body:
4118 CGF.EmitBlock(BodyBB);
4119 // Iteri = Begini + Counter * Stepi;
4120 CGF.EmitIgnoredExpr(HelperData.Update);
4121 }
4122 }
4123 ~OMPIteratorGeneratorScope() {
4124 if (!E)
4125 return;
4126 for (unsigned I = E->numOfIterators(); I > 0; --I) {
4127 // Counter = Counter + 1;
4128 const OMPIteratorHelperData &HelperData = E->getHelper(I - 1);
4129 CGF.EmitIgnoredExpr(HelperData.CounterUpdate);
4130 // goto cont;
4131 CGF.EmitBranchThroughCleanup(ContDests[I - 1]);
4132 // exit:
4133 CGF.EmitBlock(ExitDests[I - 1].getBlock(), /*IsFinished=*/I == 1);
4134 }
4135 }
4136};
4137} // namespace
4138
4139static std::pair<llvm::Value *, llvm::Value *>
4140getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
4141 const auto *OASE = dyn_cast<OMPArrayShapingExpr>(E);
4142 llvm::Value *Addr;
4143 if (OASE) {
4144 const Expr *Base = OASE->getBase();
4145 Addr = CGF.EmitScalarExpr(Base);
4146 } else {
4147 Addr = CGF.EmitLValue(E).getPointer(CGF);
4148 }
4149 llvm::Value *SizeVal;
4150 QualType Ty = E->getType();
4151 if (OASE) {
4152 SizeVal = CGF.getTypeSize(OASE->getBase()->getType()->getPointeeType());
4153 for (const Expr *SE : OASE->getDimensions()) {
4154 llvm::Value *Sz = CGF.EmitScalarExpr(SE);
4155 Sz = CGF.EmitScalarConversion(
4156 Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc());
4157 SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz);
4158 }
4159 } else if (const auto *ASE =
4160 dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
4161 LValue UpAddrLVal =
4162 CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
4163 Address UpAddrAddress = UpAddrLVal.getAddress(CGF);
4164 llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
4165 UpAddrAddress.getElementType(), UpAddrAddress.getPointer(), /*Idx0=*/1);
4166 llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
4167 llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
4168 SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
4169 } else {
4170 SizeVal = CGF.getTypeSize(Ty);
4171 }
4172 return std::make_pair(Addr, SizeVal);
4173}
4174
4175/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
4176static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
4177 QualType FlagsTy = C.getIntTypeForBitwidth(32, /*Signed=*/false);
4178 if (KmpTaskAffinityInfoTy.isNull()) {
4179 RecordDecl *KmpAffinityInfoRD =
4180 C.buildImplicitRecord("kmp_task_affinity_info_t");
4181 KmpAffinityInfoRD->startDefinition();
4182 addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getIntPtrType());
4183 addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
4184 addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
4185 KmpAffinityInfoRD->completeDefinition();
4186 KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
4187 }
4188}
4189
4190CGOpenMPRuntime::TaskResultTy
4191CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
4192 const OMPExecutableDirective &D,
4193 llvm::Function *TaskFunction, QualType SharedsTy,
4194 Address Shareds, const OMPTaskDataTy &Data) {
4195 ASTContext &C = CGM.getContext();
4196 llvm::SmallVector<PrivateDataTy, 4> Privates;
4197 // Aggregate privates and sort them by the alignment.
4198 const auto *I = Data.PrivateCopies.begin();
4199 for (const Expr *E : Data.PrivateVars) {
4200 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4201 Privates.emplace_back(
4202 C.getDeclAlign(VD),
4203 PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4204 /*PrivateElemInit=*/nullptr));
4205 ++I;
4206 }
4207 I = Data.FirstprivateCopies.begin();
4208 const auto *IElemInitRef = Data.FirstprivateInits.begin();
4209 for (const Expr *E : Data.FirstprivateVars) {
4210 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4211 Privates.emplace_back(
4212 C.getDeclAlign(VD),
4213 PrivateHelpersTy(
4214 E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4215 cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
4216 ++I;
4217 ++IElemInitRef;
4218 }
4219 I = Data.LastprivateCopies.begin();
4220 for (const Expr *E : Data.LastprivateVars) {
4221 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4222 Privates.emplace_back(
4223 C.getDeclAlign(VD),
4224 PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4225 /*PrivateElemInit=*/nullptr));
4226 ++I;
4227 }
4228 for (const VarDecl *VD : Data.PrivateLocals) {
4229 if (isAllocatableDecl(VD))
4230 Privates.emplace_back(CGM.getPointerAlign(), PrivateHelpersTy(VD));
4231 else
4232 Privates.emplace_back(C.getDeclAlign(VD), PrivateHelpersTy(VD));
4233 }
4234 llvm::stable_sort(Privates,
4235 [](const PrivateDataTy &L, const PrivateDataTy &R) {
4236 return L.first > R.first;
4237 });
4238 QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4239 // Build type kmp_routine_entry_t (if not built yet).
4240 emitKmpRoutineEntryT(KmpInt32Ty);
4241 // Build type kmp_task_t (if not built yet).
4242 if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
4243 if (SavedKmpTaskloopTQTy.isNull()) {
4244 SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4245 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4246 }
4247 KmpTaskTQTy = SavedKmpTaskloopTQTy;
4248 } else {
4249 assert((D.getDirectiveKind() == OMPD_task ||((void)0)
4250 isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||((void)0)
4251 isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&((void)0)
4252 "Expected taskloop, task or target directive")((void)0);
4253 if (SavedKmpTaskTQTy.isNull()) {
4254 SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4255 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4256 }
4257 KmpTaskTQTy = SavedKmpTaskTQTy;
4258 }
4259 const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4260 // Build particular struct kmp_task_t for the given task.
4261 const RecordDecl *KmpTaskTWithPrivatesQTyRD =
4262 createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
4263 QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
4264 QualType KmpTaskTWithPrivatesPtrQTy =
4265 C.getPointerType(KmpTaskTWithPrivatesQTy);
4266 llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
4267 llvm::Type *KmpTaskTWithPrivatesPtrTy =
4268 KmpTaskTWithPrivatesTy->getPointerTo();
4269 llvm::Value *KmpTaskTWithPrivatesTySize =
4270 CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
4271 QualType SharedsPtrTy = C.getPointerType(SharedsTy);
4272
4273 // Emit initial values for private copies (if any).
4274 llvm::Value *TaskPrivatesMap = nullptr;
4275 llvm::Type *TaskPrivatesMapTy =
4276 std::next(TaskFunction->arg_begin(), 3)->getType();
4277 if (!Privates.empty()) {
4278 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4279 TaskPrivatesMap =
4280 emitTaskPrivateMappingFunction(CGM, Loc, Data, FI->getType(), Privates);
4281 TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4282 TaskPrivatesMap, TaskPrivatesMapTy);
4283 } else {
4284 TaskPrivatesMap = llvm::ConstantPointerNull::get(
4285 cast<llvm::PointerType>(TaskPrivatesMapTy));
4286 }
4287 // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
4288 // kmp_task_t *tt);
4289 llvm::Function *TaskEntry = emitProxyTaskFunction(
4290 CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4291 KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
4292 TaskPrivatesMap);
4293
4294 // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
4295 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
4296 // kmp_routine_entry_t *task_entry);
4297 // Task flags. Format is taken from
4298 // https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h,
4299 // description of kmp_tasking_flags struct.
4300 enum {
4301 TiedFlag = 0x1,
4302 FinalFlag = 0x2,
4303 DestructorsFlag = 0x8,
4304 PriorityFlag = 0x20,
4305 DetachableFlag = 0x40,
4306 };
4307 unsigned Flags = Data.Tied ? TiedFlag : 0;
4308 bool NeedsCleanup = false;
4309 if (!Privates.empty()) {
4310 NeedsCleanup =
4311 checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD, Privates);
4312 if (NeedsCleanup)
4313 Flags = Flags | DestructorsFlag;
4314 }
4315 if (Data.Priority.getInt())
4316 Flags = Flags | PriorityFlag;
4317 if (D.hasClausesOfKind<OMPDetachClause>())
4318 Flags = Flags | DetachableFlag;
4319 llvm::Value *TaskFlags =
4320 Data.Final.getPointer()
4321 ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
4322 CGF.Builder.getInt32(FinalFlag),
4323 CGF.Builder.getInt32(/*C=*/0))
4324 : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
4325 TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
4326 llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
4327 SmallVector<llvm::Value *, 8> AllocArgs = {emitUpdateLocation(CGF, Loc),
4328 getThreadID(CGF, Loc), TaskFlags, KmpTaskTWithPrivatesTySize,
4329 SharedsSize, CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4330 TaskEntry, KmpRoutineEntryPtrTy)};
4331 llvm::Value *NewTask;
4332 if (D.hasClausesOfKind<OMPNowaitClause>()) {
4333 // Check if we have any device clause associated with the directive.
4334 const Expr *Device = nullptr;
4335 if (auto *C = D.getSingleClause<OMPDeviceClause>())
4336 Device = C->getDevice();
4337 // Emit device ID if any otherwise use default value.
4338 llvm::Value *DeviceID;
4339 if (Device)
4340 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
4341 CGF.Int64Ty, /*isSigned=*/true);
4342 else
4343 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
4344 AllocArgs.push_back(DeviceID);
4345 NewTask = CGF.EmitRuntimeCall(
4346 OMPBuilder.getOrCreateRuntimeFunction(
4347 CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc),
4348 AllocArgs);
4349 } else {
4350 NewTask =
4351 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
4352 CGM.getModule(), OMPRTL___kmpc_omp_task_alloc),
4353 AllocArgs);
4354 }
4355 // Emit detach clause initialization.
4356 // evt = (typeof(evt))__kmpc_task_allow_completion_event(loc, tid,
4357 // task_descriptor);
4358 if (const auto *DC = D.getSingleClause<OMPDetachClause>()) {
4359 const Expr *Evt = DC->getEventHandler()->IgnoreParenImpCasts();
4360 LValue EvtLVal = CGF.EmitLValue(Evt);
4361
4362 // Build kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref,
4363 // int gtid, kmp_task_t *task);
4364 llvm::Value *Loc = emitUpdateLocation(CGF, DC->getBeginLoc());
4365 llvm::Value *Tid = getThreadID(CGF, DC->getBeginLoc());
4366 Tid = CGF.Builder.CreateIntCast(Tid, CGF.IntTy, /*isSigned=*/false);
4367 llvm::Value *EvtVal = CGF.EmitRuntimeCall(
4368 OMPBuilder.getOrCreateRuntimeFunction(
4369 CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event),
4370 {Loc, Tid, NewTask});
4371 EvtVal = CGF.EmitScalarConversion(EvtVal, C.VoidPtrTy, Evt->getType(),
4372 Evt->getExprLoc());
4373 CGF.EmitStoreOfScalar(EvtVal, EvtLVal);
4374 }
4375 // Process affinity clauses.
4376 if (D.hasClausesOfKind<OMPAffinityClause>()) {
4377 // Process list of affinity data.
4378 ASTContext &C = CGM.getContext();
4379 Address AffinitiesArray = Address::invalid();
4380 // Calculate number of elements to form the array of affinity data.
4381 llvm::Value *NumOfElements = nullptr;
4382 unsigned NumAffinities = 0;
4383 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
4384 if (const Expr *Modifier = C->getModifier()) {
4385 const auto *IE = cast<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts());
4386 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4387 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4388 Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
4389 NumOfElements =
4390 NumOfElements ? CGF.Builder.CreateNUWMul(NumOfElements, Sz) : Sz;
4391 }
4392 } else {
4393 NumAffinities += C->varlist_size();
4394 }
4395 }
4396 getKmpAffinityType(CGM.getContext(), KmpTaskAffinityInfoTy);
4397 // Fields ids in kmp_task_affinity_info record.
4398 enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags };
4399
4400 QualType KmpTaskAffinityInfoArrayTy;
4401 if (NumOfElements) {
4402 NumOfElements = CGF.Builder.CreateNUWAdd(
4403 llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements);
4404 OpaqueValueExpr OVE(
4405 Loc,
4406 C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0),
4407 VK_PRValue);
4408 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
4409 RValue::get(NumOfElements));
4410 KmpTaskAffinityInfoArrayTy =
4411 C.getVariableArrayType(KmpTaskAffinityInfoTy, &OVE, ArrayType::Normal,
4412 /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
4413 // Properly emit variable-sized array.
4414 auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy,
4415 ImplicitParamDecl::Other);
4416 CGF.EmitVarDecl(*PD);
4417 AffinitiesArray = CGF.GetAddrOfLocalVar(PD);
4418 NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
4419 /*isSigned=*/false);
4420 } else {
4421 KmpTaskAffinityInfoArrayTy = C.getConstantArrayType(
4422 KmpTaskAffinityInfoTy,
4423 llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr,
4424 ArrayType::Normal, /*IndexTypeQuals=*/0);
4425 AffinitiesArray =
4426 CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr");
4427 AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0);
4428 NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumAffinities,
4429 /*isSigned=*/false);
4430 }
4431
4432 const auto *KmpAffinityInfoRD = KmpTaskAffinityInfoTy->getAsRecordDecl();
4433 // Fill array by elements without iterators.
4434 unsigned Pos = 0;
4435 bool HasIterator = false;
4436 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
4437 if (C->getModifier()) {
4438 HasIterator = true;
4439 continue;
4440 }
4441 for (const Expr *E : C->varlists()) {
4442 llvm::Value *Addr;
4443 llvm::Value *Size;
4444 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
4445 LValue Base =
4446 CGF.MakeAddrLValue(CGF.Builder.CreateConstGEP(AffinitiesArray, Pos),
4447 KmpTaskAffinityInfoTy);
4448 // affs[i].base_addr = &<Affinities[i].second>;
4449 LValue BaseAddrLVal = CGF.EmitLValueForField(
4450 Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
4451 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
4452 BaseAddrLVal);
4453 // affs[i].len = sizeof(<Affinities[i].second>);
4454 LValue LenLVal = CGF.EmitLValueForField(
4455 Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
4456 CGF.EmitStoreOfScalar(Size, LenLVal);
4457 ++Pos;
4458 }
4459 }
4460 LValue PosLVal;
4461 if (HasIterator) {
4462 PosLVal = CGF.MakeAddrLValue(
4463 CGF.CreateMemTemp(C.getSizeType(), "affs.counter.addr"),
4464 C.getSizeType());
4465 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
4466 }
4467 // Process elements with iterators.
4468 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
4469 const Expr *Modifier = C->getModifier();
4470 if (!Modifier)
4471 continue;
4472 OMPIteratorGeneratorScope IteratorScope(
4473 CGF, cast_or_null<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts()));
4474 for (const Expr *E : C->varlists()) {
4475 llvm::Value *Addr;
4476 llvm::Value *Size;
4477 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
4478 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4479 LValue Base = CGF.MakeAddrLValue(
4480 Address(CGF.Builder.CreateGEP(AffinitiesArray.getElementType(),
4481 AffinitiesArray.getPointer(), Idx),
4482 AffinitiesArray.getAlignment()),
4483 KmpTaskAffinityInfoTy);
4484 // affs[i].base_addr = &<Affinities[i].second>;
4485 LValue BaseAddrLVal = CGF.EmitLValueForField(
4486 Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
4487 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
4488 BaseAddrLVal);
4489 // affs[i].len = sizeof(<Affinities[i].second>);
4490 LValue LenLVal = CGF.EmitLValueForField(
4491 Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
4492 CGF.EmitStoreOfScalar(Size, LenLVal);
4493 Idx = CGF.Builder.CreateNUWAdd(
4494 Idx, llvm::ConstantInt::get(Idx->getType(), 1));
4495 CGF.EmitStoreOfScalar(Idx, PosLVal);
4496 }
4497 }
4498 // Call to kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref,
4499 // kmp_int32 gtid, kmp_task_t *new_task, kmp_int32
4500 // naffins, kmp_task_affinity_info_t *affin_list);
4501 llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
4502 llvm::Value *GTid = getThreadID(CGF, Loc);
4503 llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4504 AffinitiesArray.getPointer(), CGM.VoidPtrTy);
4505 // FIXME: Emit the function and ignore its result for now unless the
4506 // runtime function is properly implemented.
4507 (void)CGF.EmitRuntimeCall(
4508 OMPBuilder.getOrCreateRuntimeFunction(
4509 CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity),
4510 {LocRef, GTid, NewTask, NumOfElements, AffinListPtr});
4511 }
4512 llvm::Value *NewTaskNewTaskTTy =
4513 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4514 NewTask, KmpTaskTWithPrivatesPtrTy);
4515 LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
4516 KmpTaskTWithPrivatesQTy);
4517 LValue TDBase =
4518 CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
4519 // Fill the data in the resulting kmp_task_t record.
4520 // Copy shareds if there are any.
4521 Address KmpTaskSharedsPtr = Address::invalid();
4522 if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
4523 KmpTaskSharedsPtr =
4524 Address(CGF.EmitLoadOfScalar(
4525 CGF.EmitLValueForField(
4526 TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
4527 KmpTaskTShareds)),
4528 Loc),
4529 CGM.getNaturalTypeAlignment(SharedsTy));
4530 LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
4531 LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
4532 CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
4533 }
4534 // Emit initial values for private copies (if any).
4535 TaskResultTy Result;
4536 if (!Privates.empty()) {
4537 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
4538 SharedsTy, SharedsPtrTy, Data, Privates,
4539 /*ForDup=*/false);
4540 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
4541 (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
4542 Result.TaskDupFn = emitTaskDupFunction(
4543 CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
4544 KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
4545 /*WithLastIter=*/!Data.LastprivateVars.empty());
4546 }
4547 }
4548 // Fields of union "kmp_cmplrdata_t" for destructors and priority.
4549 enum { Priority = 0, Destructors = 1 };
4550 // Provide pointer to function with destructors for privates.
4551 auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
4552 const RecordDecl *KmpCmplrdataUD =
4553 (*FI)->getType()->getAsUnionType()->getDecl();
4554 if (NeedsCleanup) {
4555 llvm::Value *DestructorFn = emitDestructorsFunction(
4556 CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4557 KmpTaskTWithPrivatesQTy);
4558 LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
4559 LValue DestructorsLV = CGF.EmitLValueForField(
4560 Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4561 CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4562 DestructorFn, KmpRoutineEntryPtrTy),
4563 DestructorsLV);
4564 }
4565 // Set priority.
4566 if (Data.Priority.getInt()) {
4567 LValue Data2LV = CGF.EmitLValueForField(
4568 TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4569 LValue PriorityLV = CGF.EmitLValueForField(
4570 Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4571 CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
4572 }
4573 Result.NewTask = NewTask;
4574 Result.TaskEntry = TaskEntry;
4575 Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4576 Result.TDBase = TDBase;
4577 Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4578 return Result;
4579}
4580
4581namespace {
4582/// Dependence kind for RTL.
4583enum RTLDependenceKindTy {
4584 DepIn = 0x01,
4585 DepInOut = 0x3,
4586 DepMutexInOutSet = 0x4
4587};
4588/// Fields ids in kmp_depend_info record.
4589enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
4590} // namespace
4591
4592/// Translates internal dependency kind into the runtime kind.
4593static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) {
4594 RTLDependenceKindTy DepKind;
4595 switch (K) {
4596 case OMPC_DEPEND_in:
4597 DepKind = DepIn;
4598 break;
4599 // Out and InOut dependencies must use the same code.
4600 case OMPC_DEPEND_out:
4601 case OMPC_DEPEND_inout:
4602 DepKind = DepInOut;
4603 break;
4604 case OMPC_DEPEND_mutexinoutset:
4605 DepKind = DepMutexInOutSet;
4606 break;
4607 case OMPC_DEPEND_source:
4608 case OMPC_DEPEND_sink:
4609 case OMPC_DEPEND_depobj:
4610 case OMPC_DEPEND_unknown:
4611 llvm_unreachable("Unknown task dependence type")__builtin_unreachable();
4612 }
4613 return DepKind;
4614}
4615
4616/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
4617static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
4618 QualType &FlagsTy) {
4619 FlagsTy = C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4620 if (KmpDependInfoTy.isNull()) {
4621 RecordDecl *KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
4622 KmpDependInfoRD->startDefinition();
4623 addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
4624 addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
4625 addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
4626 KmpDependInfoRD->completeDefinition();
4627 KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
4628 }
4629}
4630
4631std::pair<llvm::Value *, LValue>
4632CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
4633 SourceLocation Loc) {
4634 ASTContext &C = CGM.getContext();
4635 QualType FlagsTy;
4636 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4637 RecordDecl *KmpDependInfoRD =
4638 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4639 LValue Base = CGF.EmitLoadOfPointerLValue(
4640 DepobjLVal.getAddress(CGF),
4641 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
4642 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4643 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4644 Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
4645 Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
4646 Base.getTBAAInfo());
4647 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
4648 Addr.getElementType(), Addr.getPointer(),
4649 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4650 LValue NumDepsBase = CGF.MakeAddrLValue(
4651 Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
4652 Base.getBaseInfo(), Base.getTBAAInfo());
4653 // NumDeps = deps[i].base_addr;
4654 LValue BaseAddrLVal = CGF.EmitLValueForField(
4655 NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4656 llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc);
4657 return std::make_pair(NumDeps, Base);
4658}
4659
4660static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4661 llvm::PointerUnion<unsigned *, LValue *> Pos,
4662 const OMPTaskDataTy::DependData &Data,
4663 Address DependenciesArray) {
4664 CodeGenModule &CGM = CGF.CGM;
4665 ASTContext &C = CGM.getContext();
4666 QualType FlagsTy;
4667 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4668 RecordDecl *KmpDependInfoRD =
4669 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4670 llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4671
4672 OMPIteratorGeneratorScope IteratorScope(
4673 CGF, cast_or_null<OMPIteratorExpr>(
4674 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4675 : nullptr));
4676 for (const Expr *E : Data.DepExprs) {
4677 llvm::Value *Addr;
4678 llvm::Value *Size;
4679 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
4680 LValue Base;
4681 if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
4682 Base = CGF.MakeAddrLValue(
4683 CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
4684 } else {
4685 LValue &PosLVal = *Pos.get<LValue *>();
4686 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4687 Base = CGF.MakeAddrLValue(
4688 Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(),
4689 DependenciesArray.getPointer(), Idx),
4690 DependenciesArray.getAlignment()),
4691 KmpDependInfoTy);
4692 }
4693 // deps[i].base_addr = &<Dependencies[i].second>;
4694 LValue BaseAddrLVal = CGF.EmitLValueForField(
4695 Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4696 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
4697 BaseAddrLVal);
4698 // deps[i].len = sizeof(<Dependencies[i].second>);
4699 LValue LenLVal = CGF.EmitLValueForField(
4700 Base, *std::next(KmpDependInfoRD->field_begin(), Len));
4701 CGF.EmitStoreOfScalar(Size, LenLVal);
4702 // deps[i].flags = <Dependencies[i].first>;
4703 RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind);
4704 LValue FlagsLVal = CGF.EmitLValueForField(
4705 Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
4706 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
4707 FlagsLVal);
4708 if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
4709 ++(*P);
4710 } else {
4711 LValue &PosLVal = *Pos.get<LValue *>();
4712 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4713 Idx = CGF.Builder.CreateNUWAdd(Idx,
4714 llvm::ConstantInt::get(Idx->getType(), 1));
4715 CGF.EmitStoreOfScalar(Idx, PosLVal);
4716 }
4717 }
4718}
4719
4720static SmallVector<llvm::Value *, 4>
4721emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4722 const OMPTaskDataTy::DependData &Data) {
4723 assert(Data.DepKind == OMPC_DEPEND_depobj &&((void)0)
4724 "Expected depobj dependecy kind.")((void)0);
4725 SmallVector<llvm::Value *, 4> Sizes;
4726 SmallVector<LValue, 4> SizeLVals;
4727 ASTContext &C = CGF.getContext();
4728 QualType FlagsTy;
4729 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4730 RecordDecl *KmpDependInfoRD =
4731 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4732 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4733 llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
4734 {
4735 OMPIteratorGeneratorScope IteratorScope(
4736 CGF, cast_or_null<OMPIteratorExpr>(
4737 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4738 : nullptr));
4739 for (const Expr *E : Data.DepExprs) {
4740 LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
4741 LValue Base = CGF.EmitLoadOfPointerLValue(
4742 DepobjLVal.getAddress(CGF),
4743 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
4744 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4745 Base.getAddress(CGF), KmpDependInfoPtrT);
4746 Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
4747 Base.getTBAAInfo());
4748 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
4749 Addr.getElementType(), Addr.getPointer(),
4750 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4751 LValue NumDepsBase = CGF.MakeAddrLValue(
4752 Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
4753 Base.getBaseInfo(), Base.getTBAAInfo());
4754 // NumDeps = deps[i].base_addr;
4755 LValue BaseAddrLVal = CGF.EmitLValueForField(
4756 NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4757 llvm::Value *NumDeps =
4758 CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
4759 LValue NumLVal = CGF.MakeAddrLValue(
4760 CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
4761 C.getUIntPtrType());
4762 CGF.InitTempAlloca(NumLVal.getAddress(CGF),
4763 llvm::ConstantInt::get(CGF.IntPtrTy, 0));
4764 llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
4765 llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
4766 CGF.EmitStoreOfScalar(Add, NumLVal);
4767 SizeLVals.push_back(NumLVal);
4768 }
4769 }
4770 for (unsigned I = 0, E = SizeLVals.size(); I < E; ++I) {
4771 llvm::Value *Size =
4772 CGF.EmitLoadOfScalar(SizeLVals[I], Data.DepExprs[I]->getExprLoc());
4773 Sizes.push_back(Size);
4774 }
4775 return Sizes;
4776}
4777
4778static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4779 LValue PosLVal,
4780 const OMPTaskDataTy::DependData &Data,
4781 Address DependenciesArray) {
4782 assert(Data.DepKind == OMPC_DEPEND_depobj &&((void)0)
4783 "Expected depobj dependecy kind.")((void)0);
4784 ASTContext &C = CGF.getContext();
4785 QualType FlagsTy;
4786 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4787 RecordDecl *KmpDependInfoRD =
4788 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4789 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4790 llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
4791 llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
4792 {
4793 OMPIteratorGeneratorScope IteratorScope(
4794 CGF, cast_or_null<OMPIteratorExpr>(
4795 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4796 : nullptr));
4797 for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
4798 const Expr *E = Data.DepExprs[I];
4799 LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
4800 LValue Base = CGF.EmitLoadOfPointerLValue(
4801 DepobjLVal.getAddress(CGF),
4802 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
4803 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4804 Base.getAddress(CGF), KmpDependInfoPtrT);
4805 Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
4806 Base.getTBAAInfo());
4807
4808 // Get number of elements in a single depobj.
4809 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
4810 Addr.getElementType(), Addr.getPointer(),
4811 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4812 LValue NumDepsBase = CGF.MakeAddrLValue(
4813 Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
4814 Base.getBaseInfo(), Base.getTBAAInfo());
4815 // NumDeps = deps[i].base_addr;
4816 LValue BaseAddrLVal = CGF.EmitLValueForField(
4817 NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4818 llvm::Value *NumDeps =
4819 CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
4820
4821 // memcopy dependency data.
4822 llvm::Value *Size = CGF.Builder.CreateNUWMul(
4823 ElSize,
4824 CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
4825 llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4826 Address DepAddr =
4827 Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(),
4828 DependenciesArray.getPointer(), Pos),
4829 DependenciesArray.getAlignment());
4830 CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
4831
4832 // Increase pos.
4833 // pos += size;
4834 llvm::Value *Add = CGF.Builder.CreateNUWAdd(Pos, NumDeps);
4835 CGF.EmitStoreOfScalar(Add, PosLVal);
4836 }
4837 }
4838}
4839
4840std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
4841 CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies,
4842 SourceLocation Loc) {
4843 if (llvm::all_of(Dependencies, [](const OMPTaskDataTy::DependData &D) {
4844 return D.DepExprs.empty();
4845 }))
4846 return std::make_pair(nullptr, Address::invalid());
4847 // Process list of dependencies.
4848 ASTContext &C = CGM.getContext();
4849 Address DependenciesArray = Address::invalid();
4850 llvm::Value *NumOfElements = nullptr;
4851 unsigned NumDependencies = std::accumulate(
4852 Dependencies.begin(), Dependencies.end(), 0,
4853 [](unsigned V, const OMPTaskDataTy::DependData &D) {
4854 return D.DepKind == OMPC_DEPEND_depobj
4855 ? V
4856 : (V + (D.IteratorExpr ? 0 : D.DepExprs.size()));
4857 });
4858 QualType FlagsTy;
4859 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4860 bool HasDepobjDeps = false;
4861 bool HasRegularWithIterators = false;
4862 llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
4863 llvm::Value *NumOfRegularWithIterators =
4864 llvm::ConstantInt::get(CGF.IntPtrTy, 1);
4865 // Calculate number of depobj dependecies and regular deps with the iterators.
4866 for (const OMPTaskDataTy::DependData &D : Dependencies) {
4867 if (D.DepKind == OMPC_DEPEND_depobj) {
4868 SmallVector<llvm::Value *, 4> Sizes =
4869 emitDepobjElementsSizes(CGF, KmpDependInfoTy, D);
4870 for (llvm::Value *Size : Sizes) {
4871 NumOfDepobjElements =
4872 CGF.Builder.CreateNUWAdd(NumOfDepobjElements, Size);
4873 }
4874 HasDepobjDeps = true;
4875 continue;
4876 }
4877 // Include number of iterations, if any.
4878 if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
4879 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4880 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4881 Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false);
4882 NumOfRegularWithIterators =
4883 CGF.Builder.CreateNUWMul(NumOfRegularWithIterators, Sz);
4884 }
4885 HasRegularWithIterators = true;
4886 continue;
4887 }
4888 }
4889
4890 QualType KmpDependInfoArrayTy;
4891 if (HasDepobjDeps || HasRegularWithIterators) {
4892 NumOfElements = llvm::ConstantInt::get(CGM.IntPtrTy, NumDependencies,
4893 /*isSigned=*/false);
4894 if (HasDepobjDeps) {
4895 NumOfElements =
4896 CGF.Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements);
4897 }
4898 if (HasRegularWithIterators) {
4899 NumOfElements =
4900 CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
4901 }
4902 OpaqueValueExpr OVE(Loc,
4903 C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
4904 VK_PRValue);
4905 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
4906 RValue::get(NumOfElements));
4907 KmpDependInfoArrayTy =
4908 C.getVariableArrayType(KmpDependInfoTy, &OVE, ArrayType::Normal,
4909 /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
4910 // CGF.EmitVariablyModifiedType(KmpDependInfoArrayTy);
4911 // Properly emit variable-sized array.
4912 auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy,
4913 ImplicitParamDecl::Other);
4914 CGF.EmitVarDecl(*PD);
4915 DependenciesArray = CGF.GetAddrOfLocalVar(PD);
4916 NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
4917 /*isSigned=*/false);
4918 } else {
4919 KmpDependInfoArrayTy = C.getConstantArrayType(
4920 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), nullptr,
4921 ArrayType::Normal, /*IndexTypeQuals=*/0);
4922 DependenciesArray =
4923 CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
4924 DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0);
4925 NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumDependencies,
4926 /*isSigned=*/false);
4927 }
4928 unsigned Pos = 0;
4929 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4930 if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
4931 Dependencies[I].IteratorExpr)
4932 continue;
4933 emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I],
4934 DependenciesArray);
4935 }
4936 // Copy regular dependecies with iterators.
4937 LValue PosLVal = CGF.MakeAddrLValue(
4938 CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType());
4939 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
4940 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4941 if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
4942 !Dependencies[I].IteratorExpr)
4943 continue;
4944 emitDependData(CGF, KmpDependInfoTy, &PosLVal, Dependencies[I],
4945 DependenciesArray);
4946 }
4947 // Copy final depobj arrays without iterators.
4948 if (HasDepobjDeps) {
4949 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4950 if (Dependencies[I].DepKind != OMPC_DEPEND_depobj)
4951 continue;
4952 emitDepobjElements(CGF, KmpDependInfoTy, PosLVal, Dependencies[I],
4953 DependenciesArray);
4954 }
4955 }
4956 DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4957 DependenciesArray, CGF.VoidPtrTy);
4958 return std::make_pair(NumOfElements, DependenciesArray);
4959}
4960
4961Address CGOpenMPRuntime::emitDepobjDependClause(
4962 CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies,
4963 SourceLocation Loc) {
4964 if (Dependencies.DepExprs.empty())
4965 return Address::invalid();
4966 // Process list of dependencies.
4967 ASTContext &C = CGM.getContext();
4968 Address DependenciesArray = Address::invalid();
4969 unsigned NumDependencies = Dependencies.DepExprs.size();
4970 QualType FlagsTy;
4971 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4972 RecordDecl *KmpDependInfoRD =
4973 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4974
4975 llvm::Value *Size;
4976 // Define type kmp_depend_info[<Dependencies.size()>];
4977 // For depobj reserve one extra element to store the number of elements.
4978 // It is required to handle depobj(x) update(in) construct.
4979 // kmp_depend_info[<Dependencies.size()>] deps;
4980 llvm::Value *NumDepsVal;
4981 CharUnits Align = C.getTypeAlignInChars(KmpDependInfoTy);
4982 if (const auto *IE =
4983 cast_or_null<OMPIteratorExpr>(Dependencies.IteratorExpr)) {
4984 NumDepsVal = llvm::ConstantInt::get(CGF.SizeTy, 1);
4985 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4986 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4987 Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
4988 NumDepsVal = CGF.Builder.CreateNUWMul(NumDepsVal, Sz);
4989 }
4990 Size = CGF.Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.SizeTy, 1),
4991 NumDepsVal);
4992 CharUnits SizeInBytes =
4993 C.getTypeSizeInChars(KmpDependInfoTy).alignTo(Align);
4994 llvm::Value *RecSize = CGM.getSize(SizeInBytes);
4995 Size = CGF.Builder.CreateNUWMul(Size, RecSize);
4996 NumDepsVal =
4997 CGF.Builder.CreateIntCast(NumDepsVal, CGF.IntPtrTy, /*isSigned=*/false);
4998 } else {
4999 QualType KmpDependInfoArrayTy = C.getConstantArrayType(
5000 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies + 1),
5001 nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
5002 CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy);
5003 Size = CGM.getSize(Sz.alignTo(Align));
5004 NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies);
5005 }
5006 // Need to allocate on the dynamic memory.
5007 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5008 // Use default allocator.
5009 llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5010 llvm::Value *Args[] = {ThreadID, Size, Allocator};
5011
5012 llvm::Value *Addr =
5013 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5014 CGM.getModule(), OMPRTL___kmpc_alloc),
5015 Args, ".dep.arr.addr");
5016 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5017 Addr, CGF.ConvertTypeForMem(KmpDependInfoTy)->getPointerTo());
5018 DependenciesArray = Address(Addr, Align);
5019 // Write number of elements in the first element of array for depobj.
5020 LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
5021 // deps[i].base_addr = NumDependencies;
5022 LValue BaseAddrLVal = CGF.EmitLValueForField(
5023 Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
5024 CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal);
5025 llvm::PointerUnion<unsigned *, LValue *> Pos;
5026 unsigned Idx = 1;
5027 LValue PosLVal;
5028 if (Dependencies.IteratorExpr) {
5029 PosLVal = CGF.MakeAddrLValue(
5030 CGF.CreateMemTemp(C.getSizeType(), "iterator.counter.addr"),
5031 C.getSizeType());
5032 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Idx), PosLVal,
5033 /*IsInit=*/true);
5034 Pos = &PosLVal;
5035 } else {
5036 Pos = &Idx;
5037 }
5038 emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray);
5039 DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5040 CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy);
5041 return DependenciesArray;
5042}
5043
5044void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
5045 SourceLocation Loc) {
5046 ASTContext &C = CGM.getContext();
5047 QualType FlagsTy;
5048 getDependTypes(C, KmpDependInfoTy, FlagsTy);
5049 LValue Base = CGF.EmitLoadOfPointerLValue(
5050 DepobjLVal.getAddress(CGF),
5051 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5052 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
5053 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5054 Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
5055 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
5056 Addr.getElementType(), Addr.getPointer(),
5057 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
5058 DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
5059 CGF.VoidPtrTy);
5060 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5061 // Use default allocator.
5062 llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5063 llvm::Value *Args[] = {ThreadID, DepObjAddr, Allocator};
5064
5065 // _kmpc_free(gtid, addr, nullptr);
5066 (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5067 CGM.getModule(), OMPRTL___kmpc_free),
5068 Args);
5069}
5070
5071void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
5072 OpenMPDependClauseKind NewDepKind,
5073 SourceLocation Loc) {
5074 ASTContext &C = CGM.getContext();
5075 QualType FlagsTy;
5076 getDependTypes(C, KmpDependInfoTy, FlagsTy);
5077 RecordDecl *KmpDependInfoRD =
5078 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
5079 llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
5080 llvm::Value *NumDeps;
5081 LValue Base;
5082 std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
5083
5084 Address Begin = Base.getAddress(CGF);
5085 // Cast from pointer to array type to pointer to single element.
5086 llvm::Value *End = CGF.Builder.CreateGEP(
5087 Begin.getElementType(), Begin.getPointer(), NumDeps);
5088 // The basic structure here is a while-do loop.
5089 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
5090 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
5091 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
5092 CGF.EmitBlock(BodyBB);
5093 llvm::PHINode *ElementPHI =
5094 CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
5095 ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
5096 Begin = Address(ElementPHI, Begin.getAlignment());
5097 Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
5098 Base.getTBAAInfo());
5099 // deps[i].flags = NewDepKind;
5100 RTLDependenceKindTy DepKind = translateDependencyKind(NewDepKind);
5101 LValue FlagsLVal = CGF.EmitLValueForField(
5102 Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
5103 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
5104 FlagsLVal);
5105
5106 // Shift the address forward by one element.
5107 Address ElementNext =
5108 CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext");
5109 ElementPHI->addIncoming(ElementNext.getPointer(),
5110 CGF.Builder.GetInsertBlock());
5111 llvm::Value *IsEmpty =
5112 CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty");
5113 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
5114 // Done.
5115 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
5116}
5117
5118void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
5119 const OMPExecutableDirective &D,
5120 llvm::Function *TaskFunction,
5121 QualType SharedsTy, Address Shareds,
5122 const Expr *IfCond,
5123 const OMPTaskDataTy &Data) {
5124 if (!CGF.HaveInsertPoint())
5125 return;
5126
5127 TaskResultTy Result =
5128 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
5129 llvm::Value *NewTask = Result.NewTask;
5130 llvm::Function *TaskEntry = Result.TaskEntry;
5131 llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
5132 LValue TDBase = Result.TDBase;
5133 const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
5134 // Process list of dependences.
5135 Address DependenciesArray = Address::invalid();
5136 llvm::Value *NumOfElements;
5137 std::tie(NumOfElements, DependenciesArray) =
5138 emitDependClause(CGF, Data.Dependences, Loc);
5139
5140 // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5141 // libcall.
5142 // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
5143 // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
5144 // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
5145 // list is not empty
5146 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5147 llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5148 llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
5149 llvm::Value *DepTaskArgs[7];
5150 if (!Data.Dependences.empty()) {
5151 DepTaskArgs[0] = UpLoc;
5152 DepTaskArgs[1] = ThreadID;
5153 DepTaskArgs[2] = NewTask;
5154 DepTaskArgs[3] = NumOfElements;
5155 DepTaskArgs[4] = DependenciesArray.getPointer();
5156 DepTaskArgs[5] = CGF.Builder.getInt32(0);
5157 DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5158 }
5159 auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, &TaskArgs,
5160 &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
5161 if (!Data.Tied) {
5162 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
5163 LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
5164 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
5165 }
5166 if (!Data.Dependences.empty()) {
5167 CGF.EmitRuntimeCall(
5168 OMPBuilder.getOrCreateRuntimeFunction(
5169 CGM.getModule(), OMPRTL___kmpc_omp_task_with_deps),
5170 DepTaskArgs);
5171 } else {
5172 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5173 CGM.getModule(), OMPRTL___kmpc_omp_task),
5174 TaskArgs);
5175 }
5176 // Check if parent region is untied and build return for untied task;
5177 if (auto *Region =
5178 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
5179 Region->emitUntiedSwitch(CGF);
5180 };
5181
5182 llvm::Value *DepWaitTaskArgs[6];
5183 if (!Data.Dependences.empty()) {
5184 DepWaitTaskArgs[0] = UpLoc;
5185 DepWaitTaskArgs[1] = ThreadID;
5186 DepWaitTaskArgs[2] = NumOfElements;
5187 DepWaitTaskArgs[3] = DependenciesArray.getPointer();
5188 DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
5189 DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5190 }
5191 auto &M = CGM.getModule();
5192 auto &&ElseCodeGen = [this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy,
5193 TaskEntry, &Data, &DepWaitTaskArgs,
5194 Loc](CodeGenFunction &CGF, PrePostActionTy &) {
5195 CodeGenFunction::RunCleanupsScope LocalScope(CGF);
5196 // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
5197 // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
5198 // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
5199 // is specified.
5200 if (!Data.Dependences.empty())
5201 CGF.EmitRuntimeCall(
5202 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps),
5203 DepWaitTaskArgs);
5204 // Call proxy_task_entry(gtid, new_task);
5205 auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
5206 Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
5207 Action.Enter(CGF);
5208 llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
5209 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
5210 OutlinedFnArgs);
5211 };
5212
5213 // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
5214 // kmp_task_t *new_task);
5215 // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
5216 // kmp_task_t *new_task);
5217 RegionCodeGenTy RCG(CodeGen);
5218 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
5219 M, OMPRTL___kmpc_omp_task_begin_if0),
5220 TaskArgs,
5221 OMPBuilder.getOrCreateRuntimeFunction(
5222 M, OMPRTL___kmpc_omp_task_complete_if0),
5223 TaskArgs);
5224 RCG.setAction(Action);
5225 RCG(CGF);
5226 };
5227
5228 if (IfCond) {
5229 emitIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
5230 } else {
5231 RegionCodeGenTy ThenRCG(ThenCodeGen);
5232 ThenRCG(CGF);
5233 }
5234}
5235
5236void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
5237 const OMPLoopDirective &D,
5238 llvm::Function *TaskFunction,
5239 QualType SharedsTy, Address Shareds,
5240 const Expr *IfCond,
5241 const OMPTaskDataTy &Data) {
5242 if (!CGF.HaveInsertPoint())
5243 return;
5244 TaskResultTy Result =
5245 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
5246 // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5247 // libcall.
5248 // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
5249 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
5250 // sched, kmp_uint64 grainsize, void *task_dup);
5251 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5252 llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5253 llvm::Value *IfVal;
5254 if (IfCond) {
5255 IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
5256 /*isSigned=*/true);
5257 } else {
5258 IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
5259 }
5260
5261 LValue LBLVal = CGF.EmitLValueForField(
5262 Result.TDBase,
5263 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
5264 const auto *LBVar =
5265 cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
5266 CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF),
5267 LBLVal.getQuals(),
5268 /*IsInitializer=*/true);
5269 LValue UBLVal = CGF.EmitLValueForField(
5270 Result.TDBase,
5271 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
5272 const auto *UBVar =
5273 cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
5274 CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF),
5275 UBLVal.getQuals(),
5276 /*IsInitializer=*/true);
5277 LValue StLVal = CGF.EmitLValueForField(
5278 Result.TDBase,
5279 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
5280 const auto *StVar =
5281 cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
5282 CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF),
5283 StLVal.getQuals(),
5284 /*IsInitializer=*/true);
5285 // Store reductions address.
5286 LValue RedLVal = CGF.EmitLValueForField(
5287 Result.TDBase,
5288 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
5289 if (Data.Reductions) {
5290 CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
5291 } else {
5292 CGF.EmitNullInitialization(RedLVal.getAddress(CGF),
5293 CGF.getContext().VoidPtrTy);
5294 }
5295 enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
5296 llvm::Value *TaskArgs[] = {
5297 UpLoc,
5298 ThreadID,
5299 Result.NewTask,
5300 IfVal,
5301 LBLVal.getPointer(CGF),
5302 UBLVal.getPointer(CGF),
5303 CGF.EmitLoadOfScalar(StLVal, Loc),
5304 llvm::ConstantInt::getSigned(
5305 CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler
5306 llvm::ConstantInt::getSigned(
5307 CGF.IntTy, Data.Schedule.getPointer()
5308 ? Data.Schedule.getInt() ? NumTasks : Grainsize
5309 : NoSchedule),
5310 Data.Schedule.getPointer()
5311 ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
5312 /*isSigned=*/false)
5313 : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
5314 Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5315 Result.TaskDupFn, CGF.VoidPtrTy)
5316 : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
5317 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5318 CGM.getModule(), OMPRTL___kmpc_taskloop),
5319 TaskArgs);
5320}
5321
5322/// Emit reduction operation for each element of array (required for
5323/// array sections) LHS op = RHS.
5324/// \param Type Type of array.
5325/// \param LHSVar Variable on the left side of the reduction operation
5326/// (references element of array in original variable).
5327/// \param RHSVar Variable on the right side of the reduction operation
5328/// (references element of array in original variable).
5329/// \param RedOpGen Generator of reduction operation with use of LHSVar and
5330/// RHSVar.
5331static void EmitOMPAggregateReduction(
5332 CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
5333 const VarDecl *RHSVar,
5334 const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
5335 const Expr *, const Expr *)> &RedOpGen,
5336 const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
5337 const Expr *UpExpr = nullptr) {
5338 // Perform element-by-element initialization.
5339 QualType ElementTy;
5340 Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
5341 Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
5342
5343 // Drill down to the base element type on both arrays.
5344 const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
5345 llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
5346
5347 llvm::Value *RHSBegin = RHSAddr.getPointer();
5348 llvm::Value *LHSBegin = LHSAddr.getPointer();
5349 // Cast from pointer to array type to pointer to single element.
5350 llvm::Value *LHSEnd =
5351 CGF.Builder.CreateGEP(LHSAddr.getElementType(), LHSBegin, NumElements);
5352 // The basic structure here is a while-do loop.
5353 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
5354 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
5355 llvm::Value *IsEmpty =
5356 CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
5357 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
5358
5359 // Enter the loop body, making that address the current address.
5360 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
5361 CGF.EmitBlock(BodyBB);
5362
5363 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
5364
5365 llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
5366 RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
5367 RHSElementPHI->addIncoming(RHSBegin, EntryBB);
5368 Address RHSElementCurrent =
5369 Address(RHSElementPHI,
5370 RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5371
5372 llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
5373 LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
5374 LHSElementPHI->addIncoming(LHSBegin, EntryBB);
5375 Address LHSElementCurrent =
5376 Address(LHSElementPHI,
5377 LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5378
5379 // Emit copy.
5380 CodeGenFunction::OMPPrivateScope Scope(CGF);
5381 Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
5382 Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
5383 Scope.Privatize();
5384 RedOpGen(CGF, XExpr, EExpr, UpExpr);
5385 Scope.ForceCleanup();
5386
5387 // Shift the address forward by one element.
5388 llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
5389 LHSAddr.getElementType(), LHSElementPHI, /*Idx0=*/1,
5390 "omp.arraycpy.dest.element");
5391 llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
5392 RHSAddr.getElementType(), RHSElementPHI, /*Idx0=*/1,
5393 "omp.arraycpy.src.element");
5394 // Check whether we've reached the end.
5395 llvm::Value *Done =
5396 CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
5397 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
5398 LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
5399 RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
5400
5401 // Done.
5402 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
5403}
5404
5405/// Emit reduction combiner. If the combiner is a simple expression emit it as
5406/// is, otherwise consider it as combiner of UDR decl and emit it as a call of
5407/// UDR combiner function.
5408static void emitReductionCombiner(CodeGenFunction &CGF,
5409 const Expr *ReductionOp) {
5410 if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
5411 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
5412 if (const auto *DRE =
5413 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
5414 if (const auto *DRD =
5415 dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
5416 std::pair<llvm::Function *, llvm::Function *> Reduction =
5417 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
5418 RValue Func = RValue::get(Reduction.first);
5419 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
5420 CGF.EmitIgnoredExpr(ReductionOp);
5421 return;
5422 }
5423 CGF.EmitIgnoredExpr(ReductionOp);
5424}
5425
5426llvm::Function *CGOpenMPRuntime::emitReductionFunction(
5427 SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates,
5428 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
5429 ArrayRef<const Expr *> ReductionOps) {
5430 ASTContext &C = CGM.getContext();
5431
5432 // void reduction_func(void *LHSArg, void *RHSArg);
5433 FunctionArgList Args;
5434 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5435 ImplicitParamDecl::Other);
5436 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5437 ImplicitParamDecl::Other);
5438 Args.push_back(&LHSArg);
5439 Args.push_back(&RHSArg);
5440 const auto &CGFI =
5441 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5442 std::string Name = getName({"omp", "reduction", "reduction_func"});
5443 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
5444 llvm::GlobalValue::InternalLinkage, Name,
5445 &CGM.getModule());
5446 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
5447 Fn->setDoesNotRecurse();
5448 CodeGenFunction CGF(CGM);
5449 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
5450
5451 // Dst = (void*[n])(LHSArg);
5452 // Src = (void*[n])(RHSArg);
5453 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5454 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
5455 ArgsType), CGF.getPointerAlign());
5456 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5457 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
5458 ArgsType), CGF.getPointerAlign());
5459
5460 // ...
5461 // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
5462 // ...
5463 CodeGenFunction::OMPPrivateScope Scope(CGF);
5464 auto IPriv = Privates.begin();
5465 unsigned Idx = 0;
5466 for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
5467 const auto *RHSVar =
5468 cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
5469 Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
5470 return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
5471 });
5472 const auto *LHSVar =
5473 cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
5474 Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
5475 return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
5476 });
5477 QualType PrivTy = (*IPriv)->getType();
5478 if (PrivTy->isVariablyModifiedType()) {
5479 // Get array size and emit VLA type.
5480 ++Idx;
5481 Address Elem = CGF.Builder.CreateConstArrayGEP(LHS, Idx);
5482 llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
5483 const VariableArrayType *VLA =
5484 CGF.getContext().getAsVariableArrayType(PrivTy);
5485 const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
5486 CodeGenFunction::OpaqueValueMapping OpaqueMap(
5487 CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
5488 CGF.EmitVariablyModifiedType(PrivTy);
5489 }
5490 }
5491 Scope.Privatize();
5492 IPriv = Privates.begin();
5493 auto ILHS = LHSExprs.begin();
5494 auto IRHS = RHSExprs.begin();
5495 for (const Expr *E : ReductionOps) {
5496 if ((*IPriv)->getType()->isArrayType()) {
5497 // Emit reduction for array section.
5498 const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5499 const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5500 EmitOMPAggregateReduction(
5501 CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5502 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5503 emitReductionCombiner(CGF, E);
5504 });
5505 } else {
5506 // Emit reduction for array subscript or single variable.
5507 emitReductionCombiner(CGF, E);
5508 }
5509 ++IPriv;
5510 ++ILHS;
5511 ++IRHS;
5512 }
5513 Scope.ForceCleanup();
5514 CGF.FinishFunction();
5515 return Fn;
5516}
5517
5518void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
5519 const Expr *ReductionOp,
5520 const Expr *PrivateRef,
5521 const DeclRefExpr *LHS,
5522 const DeclRefExpr *RHS) {
5523 if (PrivateRef->getType()->isArrayType()) {
5524 // Emit reduction for array section.
5525 const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
5526 const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
5527 EmitOMPAggregateReduction(
5528 CGF, PrivateRef->getType(), LHSVar, RHSVar,
5529 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5530 emitReductionCombiner(CGF, ReductionOp);
5531 });
5532 } else {
5533 // Emit reduction for array subscript or single variable.
5534 emitReductionCombiner(CGF, ReductionOp);
5535 }
5536}
5537
5538void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
5539 ArrayRef<const Expr *> Privates,
5540 ArrayRef<const Expr *> LHSExprs,
5541 ArrayRef<const Expr *> RHSExprs,
5542 ArrayRef<const Expr *> ReductionOps,
5543 ReductionOptionsTy Options) {
5544 if (!CGF.HaveInsertPoint())
5545 return;
5546
5547 bool WithNowait = Options.WithNowait;
5548 bool SimpleReduction = Options.SimpleReduction;
5549
5550 // Next code should be emitted for reduction:
5551 //
5552 // static kmp_critical_name lock = { 0 };
5553 //
5554 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
5555 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
5556 // ...
5557 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
5558 // *(Type<n>-1*)rhs[<n>-1]);
5559 // }
5560 //
5561 // ...
5562 // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
5563 // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5564 // RedList, reduce_func, &<lock>)) {
5565 // case 1:
5566 // ...
5567 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5568 // ...
5569 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5570 // break;
5571 // case 2:
5572 // ...
5573 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5574 // ...
5575 // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
5576 // break;
5577 // default:;
5578 // }
5579 //
5580 // if SimpleReduction is true, only the next code is generated:
5581 // ...
5582 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5583 // ...
5584
5585 ASTContext &C = CGM.getContext();
5586
5587 if (SimpleReduction) {
5588 CodeGenFunction::RunCleanupsScope Scope(CGF);
5589 auto IPriv = Privates.begin();
5590 auto ILHS = LHSExprs.begin();
5591 auto IRHS = RHSExprs.begin();
5592 for (const Expr *E : ReductionOps) {
5593 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5594 cast<DeclRefExpr>(*IRHS));
5595 ++IPriv;
5596 ++ILHS;
5597 ++IRHS;
5598 }
5599 return;
5600 }
5601
5602 // 1. Build a list of reduction variables.
5603 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
5604 auto Size = RHSExprs.size();
5605 for (const Expr *E : Privates) {
5606 if (E->getType()->isVariablyModifiedType())
5607 // Reserve place for array size.
5608 ++Size;
5609 }
5610 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
5611 QualType ReductionArrayTy =
5612 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
5613 /*IndexTypeQuals=*/0);
5614 Address ReductionList =
5615 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
5616 auto IPriv = Privates.begin();
5617 unsigned Idx = 0;
5618 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
5619 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
5620 CGF.Builder.CreateStore(
5621 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5622 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
5623 Elem);
5624 if ((*IPriv)->getType()->isVariablyModifiedType()) {
5625 // Store array size.
5626 ++Idx;
5627 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
5628 llvm::Value *Size = CGF.Builder.CreateIntCast(
5629 CGF.getVLASize(
5630 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
5631 .NumElts,
5632 CGF.SizeTy, /*isSigned=*/false);
5633 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
5634 Elem);
5635 }
5636 }
5637
5638 // 2. Emit reduce_func().
5639 llvm::Function *ReductionFn = emitReductionFunction(
5640 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
5641 LHSExprs, RHSExprs, ReductionOps);
5642
5643 // 3. Create static kmp_critical_name lock = { 0 };
5644 std::string Name = getName({"reduction"});
5645 llvm::Value *Lock = getCriticalRegionLock(Name);
5646
5647 // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5648 // RedList, reduce_func, &<lock>);
5649 llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
5650 llvm::Value *ThreadId = getThreadID(CGF, Loc);
5651 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
5652 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5653 ReductionList.getPointer(), CGF.VoidPtrTy);
5654 llvm::Value *Args[] = {
5655 IdentTLoc, // ident_t *<loc>
5656 ThreadId, // i32 <gtid>
5657 CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
5658 ReductionArrayTySize, // size_type sizeof(RedList)
5659 RL, // void *RedList
5660 ReductionFn, // void (*) (void *, void *) <reduce_func>
5661 Lock // kmp_critical_name *&<lock>
5662 };
5663 llvm::Value *Res = CGF.EmitRuntimeCall(
5664 OMPBuilder.getOrCreateRuntimeFunction(
5665 CGM.getModule(),
5666 WithNowait ? OMPRTL___kmpc_reduce_nowait : OMPRTL___kmpc_reduce),
5667 Args);
5668
5669 // 5. Build switch(res)
5670 llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
5671 llvm::SwitchInst *SwInst =
5672 CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
5673
5674 // 6. Build case 1:
5675 // ...
5676 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5677 // ...
5678 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5679 // break;
5680 llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
5681 SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
5682 CGF.EmitBlock(Case1BB);
5683
5684 // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5685 llvm::Value *EndArgs[] = {
5686 IdentTLoc, // ident_t *<loc>
5687 ThreadId, // i32 <gtid>
5688 Lock // kmp_critical_name *&<lock>
5689 };
5690 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
5691 CodeGenFunction &CGF, PrePostActionTy &Action) {
5692 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5693 auto IPriv = Privates.begin();
5694 auto ILHS = LHSExprs.begin();
5695 auto IRHS = RHSExprs.begin();
5696 for (const Expr *E : ReductionOps) {
5697 RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5698 cast<DeclRefExpr>(*IRHS));
5699 ++IPriv;
5700 ++ILHS;
5701 ++IRHS;
5702 }
5703 };
5704 RegionCodeGenTy RCG(CodeGen);
5705 CommonActionTy Action(
5706 nullptr, llvm::None,
5707 OMPBuilder.getOrCreateRuntimeFunction(
5708 CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait
5709 : OMPRTL___kmpc_end_reduce),
5710 EndArgs);
5711 RCG.setAction(Action);
5712 RCG(CGF);
5713
5714 CGF.EmitBranch(DefaultBB);
5715
5716 // 7. Build case 2:
5717 // ...
5718 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5719 // ...
5720 // break;
5721 llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
5722 SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
5723 CGF.EmitBlock(Case2BB);
5724
5725 auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
5726 CodeGenFunction &CGF, PrePostActionTy &Action) {
5727 auto ILHS = LHSExprs.begin();
5728 auto IRHS = RHSExprs.begin();
5729 auto IPriv = Privates.begin();
5730 for (const Expr *E : ReductionOps) {
5731 const Expr *XExpr = nullptr;
5732 const Expr *EExpr = nullptr;
5733 const Expr *UpExpr = nullptr;
5734 BinaryOperatorKind BO = BO_Comma;
5735 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
5736 if (BO->getOpcode() == BO_Assign) {
5737 XExpr = BO->getLHS();
5738 UpExpr = BO->getRHS();
5739 }
5740 }
5741 // Try to emit update expression as a simple atomic.
5742 const Expr *RHSExpr = UpExpr;
5743 if (RHSExpr) {
5744 // Analyze RHS part of the whole expression.
5745 if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
5746 RHSExpr->IgnoreParenImpCasts())) {
5747 // If this is a conditional operator, analyze its condition for
5748 // min/max reduction operator.
5749 RHSExpr = ACO->getCond();
5750 }
5751 if (const auto *BORHS =
5752 dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
5753 EExpr = BORHS->getRHS();
5754 BO = BORHS->getOpcode();
5755 }
5756 }
5757 if (XExpr) {
5758 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5759 auto &&AtomicRedGen = [BO, VD,
5760 Loc](CodeGenFunction &CGF, const Expr *XExpr,
5761 const Expr *EExpr, const Expr *UpExpr) {
5762 LValue X = CGF.EmitLValue(XExpr);
5763 RValue E;
5764 if (EExpr)
5765 E = CGF.EmitAnyExpr(EExpr);
5766 CGF.EmitOMPAtomicSimpleUpdateExpr(
5767 X, E, BO, /*IsXLHSInRHSPart=*/true,
5768 llvm::AtomicOrdering::Monotonic, Loc,
5769 [&CGF, UpExpr, VD, Loc](RValue XRValue) {
5770 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5771 PrivateScope.addPrivate(
5772 VD, [&CGF, VD, XRValue, Loc]() {
5773 Address LHSTemp = CGF.CreateMemTemp(VD->getType());
5774 CGF.emitOMPSimpleStore(
5775 CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
5776 VD->getType().getNonReferenceType(), Loc);
5777 return LHSTemp;
5778 });
5779 (void)PrivateScope.Privatize();
5780 return CGF.EmitAnyExpr(UpExpr);
5781 });
5782 };
5783 if ((*IPriv)->getType()->isArrayType()) {
5784 // Emit atomic reduction for array section.
5785 const auto *RHSVar =
5786 cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5787 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
5788 AtomicRedGen, XExpr, EExpr, UpExpr);
5789 } else {
5790 // Emit atomic reduction for array subscript or single variable.
5791 AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
5792 }
5793 } else {
5794 // Emit as a critical region.
5795 auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
5796 const Expr *, const Expr *) {
5797 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5798 std::string Name = RT.getName({"atomic_reduction"});
5799 RT.emitCriticalRegion(
5800 CGF, Name,
5801 [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
5802 Action.Enter(CGF);
5803 emitReductionCombiner(CGF, E);
5804 },
5805 Loc);
5806 };
5807 if ((*IPriv)->getType()->isArrayType()) {
5808 const auto *LHSVar =
5809 cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5810 const auto *RHSVar =
5811 cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5812 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5813 CritRedGen);
5814 } else {
5815 CritRedGen(CGF, nullptr, nullptr, nullptr);
5816 }
5817 }
5818 ++ILHS;
5819 ++IRHS;
5820 ++IPriv;
5821 }
5822 };
5823 RegionCodeGenTy AtomicRCG(AtomicCodeGen);
5824 if (!WithNowait) {
5825 // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
5826 llvm::Value *EndArgs[] = {
5827 IdentTLoc, // ident_t *<loc>
5828 ThreadId, // i32 <gtid>
5829 Lock // kmp_critical_name *&<lock>
5830 };
5831 CommonActionTy Action(nullptr, llvm::None,
5832 OMPBuilder.getOrCreateRuntimeFunction(
5833 CGM.getModule(), OMPRTL___kmpc_end_reduce),
5834 EndArgs);
5835 AtomicRCG.setAction(Action);
5836 AtomicRCG(CGF);
5837 } else {
5838 AtomicRCG(CGF);
5839 }
5840
5841 CGF.EmitBranch(DefaultBB);
5842 CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
5843}
5844
5845/// Generates unique name for artificial threadprivate variables.
5846/// Format is: <Prefix> "." <Decl_mangled_name> "_" "<Decl_start_loc_raw_enc>"
5847static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
5848 const Expr *Ref) {
5849 SmallString<256> Buffer;
5850 llvm::raw_svector_ostream Out(Buffer);
5851 const clang::DeclRefExpr *DE;
5852 const VarDecl *D = ::getBaseDecl(Ref, DE);
5853 if (!D)
5854 D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
5855 D = D->getCanonicalDecl();
5856 std::string Name = CGM.getOpenMPRuntime().getName(
5857 {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
5858 Out << Prefix << Name << "_"
5859 << D->getCanonicalDecl()->getBeginLoc().getRawEncoding();
5860 return std::string(Out.str());
5861}
5862
5863/// Emits reduction initializer function:
5864/// \code
5865/// void @.red_init(void* %arg, void* %orig) {
5866/// %0 = bitcast void* %arg to <type>*
5867/// store <type> <init>, <type>* %0
5868/// ret void
5869/// }
5870/// \endcode
5871static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
5872 SourceLocation Loc,
5873 ReductionCodeGen &RCG, unsigned N) {
5874 ASTContext &C = CGM.getContext();
5875 QualType VoidPtrTy = C.VoidPtrTy;
5876 VoidPtrTy.addRestrict();
5877 FunctionArgList Args;
5878 ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
5879 ImplicitParamDecl::Other);
5880 ImplicitParamDecl ParamOrig(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
5881 ImplicitParamDecl::Other);
5882 Args.emplace_back(&Param);
5883 Args.emplace_back(&ParamOrig);
5884 const auto &FnInfo =
5885 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5886 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5887 std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
5888 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5889 Name, &CGM.getModule());
5890 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5891 Fn->setDoesNotRecurse();
5892 CodeGenFunction CGF(CGM);
5893 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5894 Address PrivateAddr = CGF.EmitLoadOfPointer(
5895 CGF.GetAddrOfLocalVar(&Param),
5896 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5897 llvm::Value *Size = nullptr;
5898 // If the size of the reduction item is non-constant, load it from global
5899 // threadprivate variable.
5900 if (RCG.getSizes(N).second) {
5901 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5902 CGF, CGM.getContext().getSizeType(),
5903 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5904 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5905 CGM.getContext().getSizeType(), Loc);
5906 }
5907 RCG.emitAggregateType(CGF, N, Size);
5908 LValue OrigLVal;
5909 // If initializer uses initializer from declare reduction construct, emit a
5910 // pointer to the address of the original reduction item (reuired by reduction
5911 // initializer)
5912 if (RCG.usesReductionInitializer(N)) {
5913 Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig);
5914 SharedAddr = CGF.EmitLoadOfPointer(
5915 SharedAddr,
5916 CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
5917 OrigLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
5918 } else {
5919 OrigLVal = CGF.MakeNaturalAlignAddrLValue(
5920 llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
5921 CGM.getContext().VoidPtrTy);
5922 }
5923 // Emit the initializer:
5924 // %0 = bitcast void* %arg to <type>*
5925 // store <type> <init>, <type>* %0
5926 RCG.emitInitialization(CGF, N, PrivateAddr, OrigLVal,
5927 [](CodeGenFunction &) { return false; });
5928 CGF.FinishFunction();
5929 return Fn;
5930}
5931
5932/// Emits reduction combiner function:
5933/// \code
5934/// void @.red_comb(void* %arg0, void* %arg1) {
5935/// %lhs = bitcast void* %arg0 to <type>*
5936/// %rhs = bitcast void* %arg1 to <type>*
5937/// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
5938/// store <type> %2, <type>* %lhs
5939/// ret void
5940/// }
5941/// \endcode
5942static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
5943 SourceLocation Loc,
5944 ReductionCodeGen &RCG, unsigned N,
5945 const Expr *ReductionOp,
5946 const Expr *LHS, const Expr *RHS,
5947 const Expr *PrivateRef) {
5948 ASTContext &C = CGM.getContext();
5949 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
5950 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
5951 FunctionArgList Args;
5952 ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
5953 C.VoidPtrTy, ImplicitParamDecl::Other);
5954 ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5955 ImplicitParamDecl::Other);
5956 Args.emplace_back(&ParamInOut);
5957 Args.emplace_back(&ParamIn);
5958 const auto &FnInfo =
5959 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5960 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5961 std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
5962 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5963 Name, &CGM.getModule());
5964 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5965 Fn->setDoesNotRecurse();
5966 CodeGenFunction CGF(CGM);
5967 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5968 llvm::Value *Size = nullptr;
5969 // If the size of the reduction item is non-constant, load it from global
5970 // threadprivate variable.
5971 if (RCG.getSizes(N).second) {
5972 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5973 CGF, CGM.getContext().getSizeType(),
5974 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5975 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5976 CGM.getContext().getSizeType(), Loc);
5977 }
5978 RCG.emitAggregateType(CGF, N, Size);
5979 // Remap lhs and rhs variables to the addresses of the function arguments.
5980 // %lhs = bitcast void* %arg0 to <type>*
5981 // %rhs = bitcast void* %arg1 to <type>*
5982 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5983 PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
5984 // Pull out the pointer to the variable.
5985 Address PtrAddr = CGF.EmitLoadOfPointer(
5986 CGF.GetAddrOfLocalVar(&ParamInOut),
5987 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5988 return CGF.Builder.CreateElementBitCast(
5989 PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
5990 });
5991 PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
5992 // Pull out the pointer to the variable.
5993 Address PtrAddr = CGF.EmitLoadOfPointer(
5994 CGF.GetAddrOfLocalVar(&ParamIn),
5995 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5996 return CGF.Builder.CreateElementBitCast(
5997 PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
5998 });
5999 PrivateScope.Privatize();
6000 // Emit the combiner body:
6001 // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
6002 // store <type> %2, <type>* %lhs
6003 CGM.getOpenMPRuntime().emitSingleReductionCombiner(
6004 CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
6005 cast<DeclRefExpr>(RHS));
6006 CGF.FinishFunction();
6007 return Fn;
6008}
6009
6010/// Emits reduction finalizer function:
6011/// \code
6012/// void @.red_fini(void* %arg) {
6013/// %0 = bitcast void* %arg to <type>*
6014/// <destroy>(<type>* %0)
6015/// ret void
6016/// }
6017/// \endcode
6018static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
6019 SourceLocation Loc,
6020 ReductionCodeGen &RCG, unsigned N) {
6021 if (!RCG.needCleanups(N))
6022 return nullptr;
6023 ASTContext &C = CGM.getContext();
6024 FunctionArgList Args;
6025 ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
6026 ImplicitParamDecl::Other);
6027 Args.emplace_back(&Param);
6028 const auto &FnInfo =
6029 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
6030 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
6031 std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
6032 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
6033 Name, &CGM.getModule());
6034 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
6035 Fn->setDoesNotRecurse();
6036 CodeGenFunction CGF(CGM);
6037 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
6038 Address PrivateAddr = CGF.EmitLoadOfPointer(
6039 CGF.GetAddrOfLocalVar(&Param),
6040 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
6041 llvm::Value *Size = nullptr;
6042 // If the size of the reduction item is non-constant, load it from global
6043 // threadprivate variable.
6044 if (RCG.getSizes(N).second) {
6045 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
6046 CGF, CGM.getContext().getSizeType(),
6047 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
6048 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
6049 CGM.getContext().getSizeType(), Loc);
6050 }
6051 RCG.emitAggregateType(CGF, N, Size);
6052 // Emit the finalizer body:
6053 // <destroy>(<type>* %0)
6054 RCG.emitCleanups(CGF, N, PrivateAddr);
6055 CGF.FinishFunction(Loc);
6056 return Fn;
6057}
6058
6059llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
6060 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
6061 ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
6062 if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
6063 return nullptr;
6064
6065 // Build typedef struct:
6066 // kmp_taskred_input {
6067 // void *reduce_shar; // shared reduction item
6068 // void *reduce_orig; // original reduction item used for initialization
6069 // size_t reduce_size; // size of data item
6070 // void *reduce_init; // data initialization routine
6071 // void *reduce_fini; // data finalization routine
6072 // void *reduce_comb; // data combiner routine
6073 // kmp_task_red_flags_t flags; // flags for additional info from compiler
6074 // } kmp_taskred_input_t;
6075 ASTContext &C = CGM.getContext();
6076 RecordDecl *RD = C.buildImplicitRecord("kmp_taskred_input_t");
6077 RD->startDefinition();
6078 const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6079 const FieldDecl *OrigFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6080 const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
6081 const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6082 const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6083 const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6084 const FieldDecl *FlagsFD = addFieldToRecordDecl(
6085 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
6086 RD->completeDefinition();
6087 QualType RDType = C.getRecordType(RD);
6088 unsigned Size = Data.ReductionVars.size();
6089 llvm::APInt ArraySize(/*numBits=*/64, Size);
6090 QualType ArrayRDType = C.getConstantArrayType(
6091 RDType, ArraySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
6092 // kmp_task_red_input_t .rd_input.[Size];
6093 Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
6094 ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs,
6095 Data.ReductionCopies, Data.ReductionOps);
6096 for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
6097 // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
6098 llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
6099 llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
6100 llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
6101 TaskRedInput.getPointer(), Idxs,
6102 /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
6103 ".rd_input.gep.");
6104 LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
6105 // ElemLVal.reduce_shar = &Shareds[Cnt];
6106 LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
6107 RCG.emitSharedOrigLValue(CGF, Cnt);
6108 llvm::Value *CastedShared =
6109 CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF));
6110 CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
6111 // ElemLVal.reduce_orig = &Origs[Cnt];
6112 LValue OrigLVal = CGF.EmitLValueForField(ElemLVal, OrigFD);
6113 llvm::Value *CastedOrig =
6114 CGF.EmitCastToVoidPtr(RCG.getOrigLValue(Cnt).getPointer(CGF));
6115 CGF.EmitStoreOfScalar(CastedOrig, OrigLVal);
6116 RCG.emitAggregateType(CGF, Cnt);
6117 llvm::Value *SizeValInChars;
6118 llvm::Value *SizeVal;
6119 std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
6120 // We use delayed creation/initialization for VLAs and array sections. It is
6121 // required because runtime does not provide the way to pass the sizes of
6122 // VLAs/array sections to initializer/combiner/finalizer functions. Instead
6123 // threadprivate global variables are used to store these values and use
6124 // them in the functions.
6125 bool DelayedCreation = !!SizeVal;
6126 SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
6127 /*isSigned=*/false);
6128 LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
6129 CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
6130 // ElemLVal.reduce_init = init;
6131 LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
6132 llvm::Value *InitAddr =
6133 CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
6134 CGF.EmitStoreOfScalar(InitAddr, InitLVal);
6135 // ElemLVal.reduce_fini = fini;
6136 LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
6137 llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
6138 llvm::Value *FiniAddr = Fini
6139 ? CGF.EmitCastToVoidPtr(Fini)
6140 : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
6141 CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
6142 // ElemLVal.reduce_comb = comb;
6143 LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
6144 llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
6145 CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
6146 RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
6147 CGF.EmitStoreOfScalar(CombAddr, CombLVal);
6148 // ElemLVal.flags = 0;
6149 LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
6150 if (DelayedCreation) {
6151 CGF.EmitStoreOfScalar(
6152 llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*isSigned=*/true),
6153 FlagsLVal);
6154 } else
6155 CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF),
6156 FlagsLVal.getType());
6157 }
6158 if (Data.IsReductionWithTaskMod) {
6159 // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
6160 // is_ws, int num, void *data);
6161 llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
6162 llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
6163 CGM.IntTy, /*isSigned=*/true);
6164 llvm::Value *Args[] = {
6165 IdentTLoc, GTid,
6166 llvm::ConstantInt::get(CGM.IntTy, Data.IsWorksharingReduction ? 1 : 0,
6167 /*isSigned=*/true),
6168 llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
6169 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6170 TaskRedInput.getPointer(), CGM.VoidPtrTy)};
6171 return CGF.EmitRuntimeCall(
6172 OMPBuilder.getOrCreateRuntimeFunction(
6173 CGM.getModule(), OMPRTL___kmpc_taskred_modifier_init),
6174 Args);
6175 }
6176 // Build call void *__kmpc_taskred_init(int gtid, int num_data, void *data);
6177 llvm::Value *Args[] = {
6178 CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
6179 /*isSigned=*/true),
6180 llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
6181 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
6182 CGM.VoidPtrTy)};
6183 return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
6184 CGM.getModule(), OMPRTL___kmpc_taskred_init),
6185 Args);
6186}
6187
6188void CGOpenMPRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
6189 SourceLocation Loc,
6190 bool IsWorksharingReduction) {
6191 // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
6192 // is_ws, int num, void *data);
6193 llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
6194 llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
6195 CGM.IntTy, /*isSigned=*/true);
6196 llvm::Value *Args[] = {IdentTLoc, GTid,
6197 llvm::ConstantInt::get(CGM.IntTy,
6198 IsWorksharingReduction ? 1 : 0,
6199 /*isSigned=*/true)};
6200 (void)CGF.EmitRuntimeCall(
6201 OMPBuilder.getOrCreateRuntimeFunction(
6202 CGM.getModule(), OMPRTL___kmpc_task_reduction_modifier_fini),
6203 Args);
6204}
6205
6206void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
6207 SourceLocation Loc,
6208 ReductionCodeGen &RCG,
6209 unsigned N) {
6210 auto Sizes = RCG.getSizes(N);
6211 // Emit threadprivate global variable if the type is non-constant
6212 // (Sizes.second = nullptr).
6213 if (Sizes.second) {
6214 llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
6215 /*isSigned=*/false);
6216 Address SizeAddr = getAddrOfArtificialThreadPrivate(
6217 CGF, CGM.getContext().getSizeType(),
6218 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
6219 CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
6220 }
6221}
6222
6223Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
6224 SourceLocation Loc,
6225 llvm::Value *ReductionsPtr,
6226 LValue SharedLVal) {
6227 // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
6228 // *d);
6229 llvm::Value *Args[] = {CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
6230 CGM.IntTy,
6231 /*isSigned=*/true),
6232 ReductionsPtr,
6233 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6234 SharedLVal.getPointer(CGF), CGM.VoidPtrTy)};
6235 return Address(
6236 CGF.EmitRuntimeCall(
6237 OMPBuilder.getOrCreateRuntimeFunction(
6238 CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data),
6239 Args),
6240 SharedLVal.getAlignment());
6241}
6242
6243void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
6244 SourceLocation Loc) {
6245 if (!CGF.HaveInsertPoint())
6246 return;
6247
6248 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
6249 OMPBuilder.createTaskwait(CGF.Builder);
6250 } else {
6251 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
6252 // global_tid);
6253 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
6254 // Ignore return result until untied tasks are supported.
6255 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
6256 CGM.getModule(), OMPRTL___kmpc_omp_taskwait),
6257 Args);
6258 }
6259
6260 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
6261 Region->emitUntiedSwitch(CGF);
6262}
6263
6264void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
6265 OpenMPDirectiveKind InnerKind,
6266 const RegionCodeGenTy &CodeGen,
6267 bool HasCancel) {
6268 if (!CGF.HaveInsertPoint())
6269 return;
6270 InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel,
6271 InnerKind != OMPD_critical &&
6272 InnerKind != OMPD_master &&
6273 InnerKind != OMPD_masked);
6274 CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
6275}
6276
6277namespace {
6278enum RTCancelKind {
6279 CancelNoreq = 0,
6280 CancelParallel = 1,
6281 CancelLoop = 2,
6282 CancelSections = 3,
6283 CancelTaskgroup = 4
6284};
6285} // anonymous namespace
6286
6287static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
6288 RTCancelKind CancelKind = CancelNoreq;
6289 if (CancelRegion == OMPD_parallel)
6290 CancelKind = CancelParallel;
6291 else if (CancelRegion == OMPD_for)
6292 CancelKind = CancelLoop;
6293 else if (CancelRegion == OMPD_sections)
6294 CancelKind = CancelSections;
6295 else {
6296 assert(CancelRegion == OMPD_taskgroup)((void)0);
6297 CancelKind = CancelTaskgroup;
6298 }
6299 return CancelKind;
6300}
6301
6302void CGOpenMPRuntime::emitCancellationPointCall(
6303 CodeGenFunction &CGF, SourceLocation Loc,
6304 OpenMPDirectiveKind CancelRegion) {
6305 if (!CGF.HaveInsertPoint())
6306 return;
6307 // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
6308 // global_tid, kmp_int32 cncl_kind);
6309 if (auto *OMPRegionInfo =
6310 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6311 // For 'cancellation point taskgroup', the task region info may not have a
6312 // cancel. This may instead happen in another adjacent task.
6313 if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
6314 llvm::Value *Args[] = {
6315 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
6316 CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6317 // Ignore return result until untied tasks are supported.
6318 llvm::Value *Result = CGF.EmitRuntimeCall(
6319 OMPBuilder.getOrCreateRuntimeFunction(
6320 CGM.getModule(), OMPRTL___kmpc_cancellationpoint),
6321 Args);
6322 // if (__kmpc_cancellationpoint()) {
6323 // call i32 @__kmpc_cancel_barrier( // for parallel cancellation only
6324 // exit from construct;
6325 // }
6326 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6327 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6328 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6329 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6330 CGF.EmitBlock(ExitBB);
6331 if (CancelRegion == OMPD_parallel)
6332 emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
6333 // exit from construct;
6334 CodeGenFunction::JumpDest CancelDest =
6335 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6336 CGF.EmitBranchThroughCleanup(CancelDest);
6337 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6338 }
6339 }
6340}
6341
6342void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
6343 const Expr *IfCond,
6344 OpenMPDirectiveKind CancelRegion) {
6345 if (!CGF.HaveInsertPoint())
6346 return;
6347 // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
6348 // kmp_int32 cncl_kind);
6349 auto &M = CGM.getModule();
6350 if (auto *OMPRegionInfo =
6351 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6352 auto &&ThenGen = [this, &M, Loc, CancelRegion,
6353 OMPRegionInfo](CodeGenFunction &CGF, PrePostActionTy &) {
6354 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
6355 llvm::Value *Args[] = {
6356 RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
6357 CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6358 // Ignore return result until untied tasks are supported.
6359 llvm::Value *Result = CGF.EmitRuntimeCall(
6360 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_cancel), Args);
6361 // if (__kmpc_cancel()) {
6362 // call i32 @__kmpc_cancel_barrier( // for parallel cancellation only
6363 // exit from construct;
6364 // }
6365 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6366 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6367 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6368 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6369 CGF.EmitBlock(ExitBB);
6370 if (CancelRegion == OMPD_parallel)
6371 RT.emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
6372 // exit from construct;
6373 CodeGenFunction::JumpDest CancelDest =
6374 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6375 CGF.EmitBranchThroughCleanup(CancelDest);
6376 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6377 };
6378 if (IfCond) {
6379 emitIfClause(CGF, IfCond, ThenGen,
6380 [](CodeGenFunction &, PrePostActionTy &) {});
6381 } else {
6382 RegionCodeGenTy ThenRCG(ThenGen);
6383 ThenRCG(CGF);
6384 }
6385 }
6386}
6387
6388namespace {
6389/// Cleanup action for uses_allocators support.
6390class OMPUsesAllocatorsActionTy final : public PrePostActionTy {
6391 ArrayRef<std::pair<const Expr *, const Expr *>> Allocators;
6392
6393public:
6394 OMPUsesAllocatorsActionTy(
6395 ArrayRef<std::pair<const Expr *, const Expr *>> Allocators)
6396 : Allocators(Allocators) {}
6397 void Enter(CodeGenFunction &CGF) override {
6398 if (!CGF.HaveInsertPoint())
6399 return;
6400 for (const auto &AllocatorData : Allocators) {
6401 CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsInit(
6402 CGF, AllocatorData.first, AllocatorData.second);
6403 }
6404 }
6405 void Exit(CodeGenFunction &CGF) override {
6406 if (!CGF.HaveInsertPoint())
6407 return;
6408 for (const auto &AllocatorData : Allocators) {
6409 CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsFini(CGF,
6410 AllocatorData.first);
6411 }
6412 }
6413};
6414} // namespace
6415
6416void CGOpenMPRuntime::emitTargetOutlinedFunction(
6417 const OMPExecutableDirective &D, StringRef ParentName,
6418 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6419 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6420 assert(!ParentName.empty() && "Invalid target region parent name!")((void)0);
6421 HasEmittedTargetRegion = true;
6422 SmallVector<std::pair<const Expr *, const Expr *>, 4> Allocators;
6423 for (const auto *C : D.getClausesOfKind<OMPUsesAllocatorsClause>()) {
6424 for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
6425 const OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
6426 if (!D.AllocatorTraits)
6427 continue;
6428 Allocators.emplace_back(D.Allocator, D.AllocatorTraits);
6429 }
6430 }
6431 OMPUsesAllocatorsActionTy UsesAllocatorAction(Allocators);
6432 CodeGen.setAction(UsesAllocatorAction);
6433 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
6434 IsOffloadEntry, CodeGen);
6435}
6436
6437void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
6438 const Expr *Allocator,
6439 const Expr *AllocatorTraits) {
6440 llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
6441 ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
6442 // Use default memspace handle.
6443 llvm::Value *MemSpaceHandle = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
6444 llvm::Value *NumTraits = llvm::ConstantInt::get(
6445 CGF.IntTy, cast<ConstantArrayType>(
6446 AllocatorTraits->getType()->getAsArrayTypeUnsafe())
6447 ->getSize()
6448 .getLimitedValue());
6449 LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
6450 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6451 AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy);
6452 AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
6453 AllocatorTraitsLVal.getBaseInfo(),
6454 AllocatorTraitsLVal.getTBAAInfo());
6455 llvm::Value *Traits =
6456 CGF.EmitLoadOfScalar(AllocatorTraitsLVal, AllocatorTraits->getExprLoc());
6457
6458 llvm::Value *AllocatorVal =
6459 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
6460 CGM.getModule(), OMPRTL___kmpc_init_allocator),
6461 {ThreadId, MemSpaceHandle, NumTraits, Traits});
6462 // Store to allocator.
6463 CGF.EmitVarDecl(*cast<VarDecl>(
6464 cast<DeclRefExpr>(Allocator->IgnoreParenImpCasts())->getDecl()));
6465 LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
6466 AllocatorVal =
6467 CGF.EmitScalarConversion(AllocatorVal, CGF.getContext().VoidPtrTy,
6468 Allocator->getType(), Allocator->getExprLoc());
6469 CGF.EmitStoreOfScalar(AllocatorVal, AllocatorLVal);
6470}
6471
6472void CGOpenMPRuntime::emitUsesAllocatorsFini(CodeGenFunction &CGF,
6473 const Expr *Allocator) {
6474 llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
6475 ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
6476 LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
6477 llvm::Value *AllocatorVal =
6478 CGF.EmitLoadOfScalar(AllocatorLVal, Allocator->getExprLoc());
6479 AllocatorVal = CGF.EmitScalarConversion(AllocatorVal, Allocator->getType(),
6480 CGF.getContext().VoidPtrTy,
6481 Allocator->getExprLoc());
6482 (void)CGF.EmitRuntimeCall(
6483 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
6484 OMPRTL___kmpc_destroy_allocator),
6485 {ThreadId, AllocatorVal});
6486}
6487
6488void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
6489 const OMPExecutableDirective &D, StringRef ParentName,
6490 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6491 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6492 // Create a unique name for the entry function using the source location
6493 // information of the current target region. The name will be something like:
6494 //
6495 // __omp_offloading_DD_FFFF_PP_lBB
6496 //
6497 // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
6498 // mangled name of the function that encloses the target region and BB is the
6499 // line number of the target region.
6500
6501 unsigned DeviceID;
6502 unsigned FileID;
6503 unsigned Line;
6504 getTargetEntryUniqueInfo(CGM.getContext(), D.getBeginLoc(), DeviceID, FileID,
6505 Line);
6506 SmallString<64> EntryFnName;
6507 {
6508 llvm::raw_svector_ostream OS(EntryFnName);
6509 OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
6510 << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
6511 }
6512
6513 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6514
6515 CodeGenFunction CGF(CGM, true);
6516 CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
6517 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6518
6519 OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
6520
6521 // If this target outline function is not an offload entry, we don't need to
6522 // register it.
6523 if (!IsOffloadEntry)
6524 return;
6525
6526 // The target region ID is used by the runtime library to identify the current
6527 // target region, so it only has to be unique and not necessarily point to
6528 // anything. It could be the pointer to the outlined function that implements
6529 // the target region, but we aren't using that so that the compiler doesn't
6530 // need to keep that, and could therefore inline the host function if proven
6531 // worthwhile during optimization. In the other hand, if emitting code for the
6532 // device, the ID has to be the function address so that it can retrieved from
6533 // the offloading entry and launched by the runtime library. We also mark the
6534 // outlined function to have external linkage in case we are emitting code for
6535 // the device, because these functions will be entry points to the device.
6536
6537 if (CGM.getLangOpts().OpenMPIsDevice) {
6538 OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
6539 OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
6540 OutlinedFn->setDSOLocal(false);
6541 if (CGM.getTriple().isAMDGCN())
6542 OutlinedFn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
6543 } else {
6544 std::string Name = getName({EntryFnName, "region_id"});
6545 OutlinedFnID = new llvm::GlobalVariable(
6546 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
6547 llvm::GlobalValue::WeakAnyLinkage,
6548 llvm::Constant::getNullValue(CGM.Int8Ty), Name);
6549 }
6550
6551 // Register the information for the entry associated with this target region.
6552 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
6553 DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
6554 OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
6555
6556 // Add NumTeams and ThreadLimit attributes to the outlined GPU function
6557 int32_t DefaultValTeams = -1;
6558 getNumTeamsExprForTargetDirective(CGF, D, DefaultValTeams);
6559 if (DefaultValTeams > 0) {
6560 OutlinedFn->addFnAttr("omp_target_num_teams",
6561 std::to_string(DefaultValTeams));
6562 }
6563 int32_t DefaultValThreads = -1;
6564 getNumThreadsExprForTargetDirective(CGF, D, DefaultValThreads);
6565 if (DefaultValThreads > 0) {
6566 OutlinedFn->addFnAttr("omp_target_thread_limit",
6567 std::to_string(DefaultValThreads));
6568 }
6569}
6570
6571/// Checks if the expression is constant or does not have non-trivial function
6572/// calls.
6573static bool isTrivial(ASTContext &Ctx, const Expr * E) {
6574 // We can skip constant expressions.
6575 // We can skip expressions with trivial calls or simple expressions.
6576 return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
6577 !E->hasNonTrivialCall(Ctx)) &&
6578 !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
6579}
6580
6581const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx,
6582 const Stmt *Body) {
6583 const Stmt *Child = Body->IgnoreContainers();
6584 while (const auto *C = dyn_cast_or_null<CompoundStmt>(Child)) {
6585 Child = nullptr;
6586 for (const Stmt *S : C->body()) {
6587 if (const auto *E = dyn_cast<Expr>(S)) {
6588 if (isTrivial(Ctx, E))
6589 continue;
6590 }
6591 // Some of the statements can be ignored.
6592 if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
6593 isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
6594 continue;
6595 // Analyze declarations.
6596 if (const auto *DS = dyn_cast<DeclStmt>(S)) {
6597 if (llvm::all_of(DS->decls(), [](const Decl *D) {
6598 if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
6599 isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
6600 isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
6601 isa<UsingDirectiveDecl>(D) ||
6602 isa<OMPDeclareReductionDecl>(D) ||
6603 isa<OMPThreadPrivateDecl>(D) || isa<OMPAllocateDecl>(D))
6604 return true;
6605 const auto *VD = dyn_cast<VarDecl>(D);
6606 if (!VD)
6607 return false;
6608 return VD->hasGlobalStorage() || !VD->isUsed();
6609 }))
6610 continue;
6611 }
6612 // Found multiple children - cannot get the one child only.
6613 if (Child)
6614 return nullptr;
6615 Child = S;
6616 }
6617 if (Child)
6618 Child = Child->IgnoreContainers();
6619 }
6620 return Child;
6621}
6622
6623const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
6624 CodeGenFunction &CGF, const OMPExecutableDirective &D,
6625 int32_t &DefaultVal) {
6626
6627 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
6628 assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&((void)0)
6629 "Expected target-based executable directive.")((void)0);
6630 switch (DirectiveKind) {
6631 case OMPD_target: {
6632 const auto *CS = D.getInnermostCapturedStmt();
6633 const auto *Body =
6634 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
6635 const Stmt *ChildStmt =
6636 CGOpenMPRuntime::getSingleCompoundChild(CGF.getContext(), Body);
6637 if (const auto *NestedDir =
6638 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
6639 if (isOpenMPTeamsDirective(NestedDir->getDirectiveKind())) {
6640 if (NestedDir->hasClausesOfKind<OMPNumTeamsClause>()) {
6641 const Expr *NumTeams =
6642 NestedDir->getSingleClause<OMPNumTeamsClause>()->getNumTeams();
6643 if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
6644 if (auto Constant =
6645 NumTeams->getIntegerConstantExpr(CGF.getContext()))
6646 DefaultVal = Constant->getExtValue();
6647 return NumTeams;
6648 }
6649 DefaultVal = 0;
6650 return nullptr;
6651 }
6652 if (isOpenMPParallelDirective(NestedDir->getDirectiveKind()) ||
6653 isOpenMPSimdDirective(NestedDir->getDirectiveKind())) {
6654 DefaultVal = 1;
6655 return nullptr;
6656 }
6657 DefaultVal = 1;
6658 return nullptr;
6659 }
6660 // A value of -1 is used to check if we need to emit no teams region
6661 DefaultVal = -1;
6662 return nullptr;
6663 }
6664 case OMPD_target_teams:
6665 case OMPD_target_teams_distribute:
6666 case OMPD_target_teams_distribute_simd:
6667 case OMPD_target_teams_distribute_parallel_for:
6668 case OMPD_target_teams_distribute_parallel_for_simd: {
6669 if (D.hasClausesOfKind<OMPNumTeamsClause>()) {
6670 const Expr *NumTeams =
6671 D.getSingleClause<OMPNumTeamsClause>()->getNumTeams();
6672 if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
6673 if (auto Constant = NumTeams->getIntegerConstantExpr(CGF.getContext()))
6674 DefaultVal = Constant->getExtValue();
6675 return NumTeams;
6676 }
6677 DefaultVal = 0;
6678 return nullptr;
6679 }
6680 case OMPD_target_parallel:
6681 case OMPD_target_parallel_for:
6682 case OMPD_target_parallel_for_simd:
6683 case OMPD_target_simd:
6684 DefaultVal = 1;
6685 return nullptr;
6686 case OMPD_parallel:
6687 case OMPD_for:
6688 case OMPD_parallel_for:
6689 case OMPD_parallel_master:
6690 case OMPD_parallel_sections:
6691 case OMPD_for_simd:
6692 case OMPD_parallel_for_simd:
6693 case OMPD_cancel:
6694 case OMPD_cancellation_point:
6695 case OMPD_ordered:
6696 case OMPD_threadprivate:
6697 case OMPD_allocate:
6698 case OMPD_task:
6699 case OMPD_simd:
6700 case OMPD_tile:
6701 case OMPD_unroll:
6702 case OMPD_sections:
6703 case OMPD_section:
6704 case OMPD_single:
6705 case OMPD_master:
6706 case OMPD_critical:
6707 case OMPD_taskyield:
6708 case OMPD_barrier:
6709 case OMPD_taskwait:
6710 case OMPD_taskgroup:
6711 case OMPD_atomic:
6712 case OMPD_flush:
6713 case OMPD_depobj:
6714 case OMPD_scan:
6715 case OMPD_teams:
6716 case OMPD_target_data:
6717 case OMPD_target_exit_data:
6718 case OMPD_target_enter_data:
6719 case OMPD_distribute:
6720 case OMPD_distribute_simd:
6721 case OMPD_distribute_parallel_for:
6722 case OMPD_distribute_parallel_for_simd:
6723 case OMPD_teams_distribute:
6724 case OMPD_teams_distribute_simd:
6725 case OMPD_teams_distribute_parallel_for:
6726 case OMPD_teams_distribute_parallel_for_simd:
6727 case OMPD_target_update:
6728 case OMPD_declare_simd:
6729 case OMPD_declare_variant:
6730 case OMPD_begin_declare_variant:
6731 case OMPD_end_declare_variant:
6732 case OMPD_declare_target:
6733 case OMPD_end_declare_target:
6734 case OMPD_declare_reduction:
6735 case OMPD_declare_mapper:
6736 case OMPD_taskloop:
6737 case OMPD_taskloop_simd:
6738 case OMPD_master_taskloop:
6739 case OMPD_master_taskloop_simd:
6740 case OMPD_parallel_master_taskloop:
6741 case OMPD_parallel_master_taskloop_simd:
6742 case OMPD_requires:
6743 case OMPD_unknown:
6744 break;
6745 default:
6746 break;
6747 }
6748 llvm_unreachable("Unexpected directive kind.")__builtin_unreachable();
6749}
6750
6751llvm::Value *CGOpenMPRuntime::emitNumTeamsForTargetDirective(
6752 CodeGenFunction &CGF, const OMPExecutableDirective &D) {
6753 assert(!CGF.getLangOpts().OpenMPIsDevice &&((void)0)
6754 "Clauses associated with the teams directive expected to be emitted "((void)0)
6755 "only for the host!")((void)0);
6756 CGBuilderTy &Bld = CGF.Builder;
6757 int32_t DefaultNT = -1;
6758 const Expr *NumTeams = getNumTeamsExprForTargetDirective(CGF, D, DefaultNT);
6759 if (NumTeams != nullptr) {
6760 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
6761
6762 switch (DirectiveKind) {
6763 case OMPD_target: {
6764 const auto *CS = D.getInnermostCapturedStmt();
6765 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6766 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6767 llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams,
6768 /*IgnoreResultAssign*/ true);
6769 return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
6770 /*isSigned=*/true);
6771 }
6772 case OMPD_target_teams:
6773 case OMPD_target_teams_distribute:
6774 case OMPD_target_teams_distribute_simd:
6775 case OMPD_target_teams_distribute_parallel_for:
6776 case OMPD_target_teams_distribute_parallel_for_simd: {
6777 CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
6778 llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams,
6779 /*IgnoreResultAssign*/ true);
6780 return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
6781 /*isSigned=*/true);
6782 }
6783 default:
6784 break;
6785 }
6786 } else if (DefaultNT == -1) {
6787 return nullptr;
6788 }
6789
6790 return Bld.getInt32(DefaultNT);
6791}
6792
6793static llvm::Value *getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS,
6794 llvm::Value *DefaultThreadLimitVal) {
6795 const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
6796 CGF.getContext(), CS->getCapturedStmt());
6797 if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
6798 if (isOpenMPParallelDirective(Dir->getDirectiveKind())) {
6799 llvm::Value *NumThreads = nullptr;
6800 llvm::Value *CondVal = nullptr;
6801 // Handle if clause. If if clause present, the number of threads is
6802 // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
6803 if (Dir->hasClausesOfKind<OMPIfClause>()) {
6804 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6805 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6806 const OMPIfClause *IfClause = nullptr;
6807 for (const auto *C : Dir->getClausesOfKind<OMPIfClause>()) {
6808 if (C->getNameModifier() == OMPD_unknown ||
6809 C->getNameModifier() == OMPD_parallel) {
6810 IfClause = C;
6811 break;
6812 }
6813 }
6814 if (IfClause) {
6815 const Expr *Cond = IfClause->getCondition();
6816 bool Result;
6817 if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
6818 if (!Result)
6819 return CGF.Builder.getInt32(1);
6820 } else {
6821 CodeGenFunction::LexicalScope Scope(CGF, Cond->getSourceRange());
6822 if (const auto *PreInit =
6823 cast_or_null<DeclStmt>(IfClause->getPreInitStmt())) {
6824 for (const auto *I : PreInit->decls()) {
6825 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
6826 CGF.EmitVarDecl(cast<VarDecl>(*I));
6827 } else {
6828 CodeGenFunction::AutoVarEmission Emission =
6829 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
6830 CGF.EmitAutoVarCleanups(Emission);
6831 }
6832 }
6833 }
6834 CondVal = CGF.EvaluateExprAsBool(Cond);
6835 }
6836 }
6837 }
6838 // Check the value of num_threads clause iff if clause was not specified
6839 // or is not evaluated to false.
6840 if (Dir->hasClausesOfKind<OMPNumThreadsClause>()) {
6841 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6842 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6843 const auto *NumThreadsClause =
6844 Dir->getSingleClause<OMPNumThreadsClause>();
6845 CodeGenFunction::LexicalScope Scope(
6846 CGF, NumThreadsClause->getNumThreads()->getSourceRange());
6847 if (const auto *PreInit =
6848 cast_or_null<DeclStmt>(NumThreadsClause->getPreInitStmt())) {
6849 for (const auto *I : PreInit->decls()) {
6850 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
6851 CGF.EmitVarDecl(cast<VarDecl>(*I));
6852 } else {
6853 CodeGenFunction::AutoVarEmission Emission =
6854 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
6855 CGF.EmitAutoVarCleanups(Emission);
6856 }
6857 }
6858 }
6859 NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads());
6860 NumThreads = CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty,
6861 /*isSigned=*/false);
6862 if (DefaultThreadLimitVal)
6863 NumThreads = CGF.Builder.CreateSelect(
6864 CGF.Builder.CreateICmpULT(DefaultThreadLimitVal, NumThreads),
6865 DefaultThreadLimitVal, NumThreads);
6866 } else {
6867 NumThreads = DefaultThreadLimitVal ? DefaultThreadLimitVal
6868 : CGF.Builder.getInt32(0);
6869 }
6870 // Process condition of the if clause.
6871 if (CondVal) {
6872 NumThreads = CGF.Builder.CreateSelect(CondVal, NumThreads,
6873 CGF.Builder.getInt32(1));
6874 }
6875 return NumThreads;
6876 }
6877 if (isOpenMPSimdDirective(Dir->getDirectiveKind()))
6878 return CGF.Builder.getInt32(1);
6879 return DefaultThreadLimitVal;
6880 }
6881 return DefaultThreadLimitVal ? DefaultThreadLimitVal
6882 : CGF.Builder.getInt32(0);
6883}
6884
6885const Expr *CGOpenMPRuntime::getNumThreadsExprForTargetDirective(
6886 CodeGenFunction &CGF, const OMPExecutableDirective &D,
6887 int32_t &DefaultVal) {
6888 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
6889 assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&((void)0)
6890 "Expected target-based executable directive.")((void)0);
6891
6892 switch (DirectiveKind) {
6893 case OMPD_target:
6894 // Teams have no clause thread_limit
6895 return nullptr;
6896 case OMPD_target_teams:
6897 case OMPD_target_teams_distribute:
6898 if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
6899 const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
6900 const Expr *ThreadLimit = ThreadLimitClause->getThreadLimit();
6901 if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
6902 if (auto Constant =
6903 ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
6904 DefaultVal = Constant->getExtValue();
6905 return ThreadLimit;
6906 }
6907 return nullptr;
6908 case OMPD_target_parallel:
6909 case OMPD_target_parallel_for:
6910 case OMPD_target_parallel_for_simd:
6911 case OMPD_target_teams_distribute_parallel_for:
6912 case OMPD_target_teams_distribute_parallel_for_simd: {
6913 Expr *ThreadLimit = nullptr;
6914 Expr *NumThreads = nullptr;
6915 if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
6916 const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
6917 ThreadLimit = ThreadLimitClause->getThreadLimit();
6918 if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
6919 if (auto Constant =
6920 ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
6921 DefaultVal = Constant->getExtValue();
6922 }
6923 if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
6924 const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
6925 NumThreads = NumThreadsClause->getNumThreads();
6926 if (NumThreads->isIntegerConstantExpr(CGF.getContext())) {
6927 if (auto Constant =
6928 NumThreads->getIntegerConstantExpr(CGF.getContext())) {
6929 if (Constant->getExtValue() < DefaultVal) {
6930 DefaultVal = Constant->getExtValue();
6931 ThreadLimit = NumThreads;
6932 }
6933 }
6934 }
6935 }
6936 return ThreadLimit;
6937 }
6938 case OMPD_target_teams_distribute_simd:
6939 case OMPD_target_simd:
6940 DefaultVal = 1;
6941 return nullptr;
6942 case OMPD_parallel:
6943 case OMPD_for:
6944 case OMPD_parallel_for:
6945 case OMPD_parallel_master:
6946 case OMPD_parallel_sections:
6947 case OMPD_for_simd:
6948 case OMPD_parallel_for_simd:
6949 case OMPD_cancel:
6950 case OMPD_cancellation_point:
6951 case OMPD_ordered:
6952 case OMPD_threadprivate:
6953 case OMPD_allocate:
6954 case OMPD_task:
6955 case OMPD_simd:
6956 case OMPD_tile:
6957 case OMPD_unroll:
6958 case OMPD_sections:
6959 case OMPD_section:
6960 case OMPD_single:
6961 case OMPD_master:
6962 case OMPD_critical:
6963 case OMPD_taskyield:
6964 case OMPD_barrier:
6965 case OMPD_taskwait:
6966 case OMPD_taskgroup:
6967 case OMPD_atomic:
6968 case OMPD_flush:
6969 case OMPD_depobj:
6970 case OMPD_scan:
6971 case OMPD_teams:
6972 case OMPD_target_data:
6973 case OMPD_target_exit_data:
6974 case OMPD_target_enter_data:
6975 case OMPD_distribute:
6976 case OMPD_distribute_simd:
6977 case OMPD_distribute_parallel_for:
6978 case OMPD_distribute_parallel_for_simd:
6979 case OMPD_teams_distribute:
6980 case OMPD_teams_distribute_simd:
6981 case OMPD_teams_distribute_parallel_for:
6982 case OMPD_teams_distribute_parallel_for_simd:
6983 case OMPD_target_update:
6984 case OMPD_declare_simd:
6985 case OMPD_declare_variant:
6986 case OMPD_begin_declare_variant:
6987 case OMPD_end_declare_variant:
6988 case OMPD_declare_target:
6989 case OMPD_end_declare_target:
6990 case OMPD_declare_reduction:
6991 case OMPD_declare_mapper:
6992 case OMPD_taskloop:
6993 case OMPD_taskloop_simd:
6994 case OMPD_master_taskloop:
6995 case OMPD_master_taskloop_simd:
6996 case OMPD_parallel_master_taskloop:
6997 case OMPD_parallel_master_taskloop_simd:
6998 case OMPD_requires:
6999 case OMPD_unknown:
7000 break;
7001 default:
7002 break;
7003 }
7004 llvm_unreachable("Unsupported directive kind.")__builtin_unreachable();
7005}
7006
7007llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
7008 CodeGenFunction &CGF, const OMPExecutableDirective &D) {
7009 assert(!CGF.getLangOpts().OpenMPIsDevice &&((void)0)
7010 "Clauses associated with the teams directive expected to be emitted "((void)0)
7011 "only for the host!")((void)0);
7012 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
7013 assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&((void)0)
7014 "Expected target-based executable directive.")((void)0);
7015 CGBuilderTy &Bld = CGF.Builder;
7016 llvm::Value *ThreadLimitVal = nullptr;
7017 llvm::Value *NumThreadsVal = nullptr;
7018 switch (DirectiveKind) {
7019 case OMPD_target: {
7020 const CapturedStmt *CS = D.getInnermostCapturedStmt();
7021 if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
7022 return NumThreads;
7023 const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
7024 CGF.getContext(), CS->getCapturedStmt());
7025 if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
7026 if (Dir->hasClausesOfKind<OMPThreadLimitClause>()) {
7027 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
7028 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
7029 const auto *ThreadLimitClause =
7030 Dir->getSingleClause<OMPThreadLimitClause>();
7031 CodeGenFunction::LexicalScope Scope(
7032 CGF, ThreadLimitClause->getThreadLimit()->getSourceRange());
7033 if (const auto *PreInit =
7034 cast_or_null<DeclStmt>(ThreadLimitClause->getPreInitStmt())) {
7035 for (const auto *I : PreInit->decls()) {
7036 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
7037 CGF.EmitVarDecl(cast<VarDecl>(*I));
7038 } else {
7039 CodeGenFunction::AutoVarEmission Emission =
7040 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
7041 CGF.EmitAutoVarCleanups(Emission);
7042 }
7043 }
7044 }
7045 llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
7046 ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
7047 ThreadLimitVal =
7048 Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
7049 }
7050 if (isOpenMPTeamsDirective(Dir->getDirectiveKind()) &&
7051 !isOpenMPDistributeDirective(Dir->getDirectiveKind())) {
7052 CS = Dir->getInnermostCapturedStmt();
7053 const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
7054 CGF.getContext(), CS->getCapturedStmt());
7055 Dir = dyn_cast_or_null<OMPExecutableDirective>(Child);
7056 }
7057 if (Dir && isOpenMPDistributeDirective(Dir->getDirectiveKind()) &&
7058 !isOpenMPSimdDirective(Dir->getDirectiveKind())) {
7059 CS = Dir->getInnermostCapturedStmt();
7060 if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
7061 return NumThreads;
7062 }
7063 if (Dir && isOpenMPSimdDirective(Dir->getDirectiveKind()))
7064 return Bld.getInt32(1);
7065 }
7066 return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
7067 }
7068 case OMPD_target_teams: {
7069 if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
7070 CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
7071 const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
7072 llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
7073 ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
7074 ThreadLimitVal =
7075 Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
7076 }
7077 const CapturedStmt *CS = D.getInnermostCapturedStmt();
7078 if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
7079 return NumThreads;
7080 const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
7081 CGF.getContext(), CS->getCapturedStmt());
7082 if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
7083 if (Dir->getDirectiveKind() == OMPD_distribute) {
7084 CS = Dir->getInnermostCapturedStmt();
7085 if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
7086 return NumThreads;
7087 }
7088 }
7089 return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
7090 }
7091 case OMPD_target_teams_distribute:
7092 if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
7093 CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
7094 const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
7095 llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
7096 ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
7097 ThreadLimitVal =
7098 Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
7099 }
7100 return getNumThreads(CGF, D.getInnermostCapturedStmt(), ThreadLimitVal);
7101 case OMPD_target_parallel:
7102 case OMPD_target_parallel_for:
7103 case OMPD_target_parallel_for_simd:
7104 case OMPD_target_teams_distribute_parallel_for:
7105 case OMPD_target_teams_distribute_parallel_for_simd: {
7106 llvm::Value *CondVal = nullptr;
7107 // Handle if clause. If if clause present, the number of threads is
7108 // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
7109 if (D.hasClausesOfKind<OMPIfClause>()) {
7110 const OMPIfClause *IfClause = nullptr;
7111 for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
7112 if (C->getNameModifier() == OMPD_unknown ||
7113 C->getNameModifier() == OMPD_parallel) {
7114 IfClause = C;
7115 break;
7116 }
7117 }
7118 if (IfClause) {
7119 const Expr *Cond = IfClause->getCondition();
7120 bool Result;
7121 if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
7122 if (!Result)
7123 return Bld.getInt32(1);
7124 } else {
7125 CodeGenFunction::RunCleanupsScope Scope(CGF);
7126 CondVal = CGF.EvaluateExprAsBool(Cond);
7127 }
7128 }
7129 }
7130 if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
7131 CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
7132 const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
7133 llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
7134 ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
7135 ThreadLimitVal =
7136 Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
7137 }
7138 if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
7139 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
7140 const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
7141 llvm::Value *NumThreads = CGF.EmitScalarExpr(
7142 NumThreadsClause->getNumThreads(), /*IgnoreResultAssign=*/true);
7143 NumThreadsVal =
7144 Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned=*/false);
7145 ThreadLimitVal = ThreadLimitVal
7146 ? Bld.CreateSelect(Bld.CreateICmpULT(NumThreadsVal,
7147 ThreadLimitVal),
7148 NumThreadsVal, ThreadLimitVal)
7149 : NumThreadsVal;
7150 }
7151 if (!ThreadLimitVal)
7152 ThreadLimitVal = Bld.getInt32(0);
7153 if (CondVal)
7154 return Bld.CreateSelect(CondVal, ThreadLimitVal, Bld.getInt32(1));
7155 return ThreadLimitVal;
7156 }
7157 case OMPD_target_teams_distribute_simd:
7158 case OMPD_target_simd:
7159 return Bld.getInt32(1);
7160 case OMPD_parallel:
7161 case OMPD_for:
7162 case OMPD_parallel_for:
7163 case OMPD_parallel_master:
7164 case OMPD_parallel_sections:
7165 case OMPD_for_simd:
7166 case OMPD_parallel_for_simd:
7167 case OMPD_cancel:
7168 case OMPD_cancellation_point:
7169 case OMPD_ordered:
7170 case OMPD_threadprivate:
7171 case OMPD_allocate:
7172 case OMPD_task:
7173 case OMPD_simd:
7174 case OMPD_tile:
7175 case OMPD_unroll:
7176 case OMPD_sections:
7177 case OMPD_section:
7178 case OMPD_single:
7179 case OMPD_master:
7180 case OMPD_critical:
7181 case OMPD_taskyield:
7182 case OMPD_barrier:
7183 case OMPD_taskwait:
7184 case OMPD_taskgroup:
7185 case OMPD_atomic:
7186 case OMPD_flush:
7187 case OMPD_depobj:
7188 case OMPD_scan:
7189 case OMPD_teams:
7190 case OMPD_target_data:
7191 case OMPD_target_exit_data:
7192 case OMPD_target_enter_data:
7193 case OMPD_distribute:
7194 case OMPD_distribute_simd:
7195 case OMPD_distribute_parallel_for:
7196 case OMPD_distribute_parallel_for_simd:
7197 case OMPD_teams_distribute:
7198 case OMPD_teams_distribute_simd:
7199 case OMPD_teams_distribute_parallel_for:
7200 case OMPD_teams_distribute_parallel_for_simd:
7201 case OMPD_target_update:
7202 case OMPD_declare_simd:
7203 case OMPD_declare_variant:
7204 case OMPD_begin_declare_variant:
7205 case OMPD_end_declare_variant:
7206 case OMPD_declare_target:
7207 case OMPD_end_declare_target:
7208 case OMPD_declare_reduction:
7209 case OMPD_declare_mapper:
7210 case OMPD_taskloop:
7211 case OMPD_taskloop_simd:
7212 case OMPD_master_taskloop:
7213 case OMPD_master_taskloop_simd:
7214 case OMPD_parallel_master_taskloop:
7215 case OMPD_parallel_master_taskloop_simd:
7216 case OMPD_requires:
7217 case OMPD_unknown:
7218 break;
7219 default:
7220 break;
7221 }
7222 llvm_unreachable("Unsupported directive kind.")__builtin_unreachable();
7223}
7224
7225namespace {
7226LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()using ::llvm::BitmaskEnumDetail::operator~; using ::llvm::BitmaskEnumDetail
::operator|; using ::llvm::BitmaskEnumDetail::operator&; using
::llvm::BitmaskEnumDetail::operator^; using ::llvm::BitmaskEnumDetail
::operator|=; using ::llvm::BitmaskEnumDetail::operator&=
; using ::llvm::BitmaskEnumDetail::operator^=
;
7227
7228// Utility to handle information from clauses associated with a given
7229// construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
7230// It provides a convenient interface to obtain the information and generate
7231// code for that information.
7232class MappableExprsHandler {
7233public:
7234 /// Values for bit flags used to specify the mapping type for
7235 /// offloading.
7236 enum OpenMPOffloadMappingFlags : uint64_t {
7237 /// No flags
7238 OMP_MAP_NONE = 0x0,
7239 /// Allocate memory on the device and move data from host to device.
7240 OMP_MAP_TO = 0x01,
7241 /// Allocate memory on the device and move data from device to host.
7242 OMP_MAP_FROM = 0x02,
7243 /// Always perform the requested mapping action on the element, even
7244 /// if it was already mapped before.
7245 OMP_MAP_ALWAYS = 0x04,
7246 /// Delete the element from the device environment, ignoring the
7247 /// current reference count associated with the element.
7248 OMP_MAP_DELETE = 0x08,
7249 /// The element being mapped is a pointer-pointee pair; both the
7250 /// pointer and the pointee should be mapped.
7251 OMP_MAP_PTR_AND_OBJ = 0x10,
7252 /// This flags signals that the base address of an entry should be
7253 /// passed to the target kernel as an argument.
7254 OMP_MAP_TARGET_PARAM = 0x20,
7255 /// Signal that the runtime library has to return the device pointer
7256 /// in the current position for the data being mapped. Used when we have the
7257 /// use_device_ptr or use_device_addr clause.
7258 OMP_MAP_RETURN_PARAM = 0x40,
7259 /// This flag signals that the reference being passed is a pointer to
7260 /// private data.
7261 OMP_MAP_PRIVATE = 0x80,
7262 /// Pass the element to the device by value.
7263 OMP_MAP_LITERAL = 0x100,
7264 /// Implicit map
7265 OMP_MAP_IMPLICIT = 0x200,
7266 /// Close is a hint to the runtime to allocate memory close to
7267 /// the target device.
7268 OMP_MAP_CLOSE = 0x400,
7269 /// 0x800 is reserved for compatibility with XLC.
7270 /// Produce a runtime error if the data is not already allocated.
7271 OMP_MAP_PRESENT = 0x1000,
7272 /// Signal that the runtime library should use args as an array of
7273 /// descriptor_dim pointers and use args_size as dims. Used when we have
7274 /// non-contiguous list items in target update directive
7275 OMP_MAP_NON_CONTIG = 0x100000000000,
7276 /// The 16 MSBs of the flags indicate whether the entry is member of some
7277 /// struct/class.
7278 OMP_MAP_MEMBER_OF = 0xffff000000000000,
7279 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF)LLVM_BITMASK_LARGEST_ENUMERATOR = OMP_MAP_MEMBER_OF,
7280 };
7281
7282 /// Get the offset of the OMP_MAP_MEMBER_OF field.
7283 static unsigned getFlagMemberOffset() {
7284 unsigned Offset = 0;
7285 for (uint64_t Remain = OMP_MAP_MEMBER_OF; !(Remain & 1);
7286 Remain = Remain >> 1)
7287 Offset++;
7288 return Offset;
7289 }
7290
7291 /// Class that holds debugging information for a data mapping to be passed to
7292 /// the runtime library.
7293 class MappingExprInfo {
7294 /// The variable declaration used for the data mapping.
7295 const ValueDecl *MapDecl = nullptr;
7296 /// The original expression used in the map clause, or null if there is
7297 /// none.
7298 const Expr *MapExpr = nullptr;
7299
7300 public:
7301 MappingExprInfo(const ValueDecl *MapDecl, const Expr *MapExpr = nullptr)
7302 : MapDecl(MapDecl), MapExpr(MapExpr) {}
7303
7304 const ValueDecl *getMapDecl() const { return MapDecl; }
7305 const Expr *getMapExpr() const { return MapExpr; }
7306 };
7307
7308 /// Class that associates information with a base pointer to be passed to the
7309 /// runtime library.
7310 class BasePointerInfo {
7311 /// The base pointer.
7312 llvm::Value *Ptr = nullptr;
7313 /// The base declaration that refers to this device pointer, or null if
7314 /// there is none.
7315 const ValueDecl *DevPtrDecl = nullptr;
7316
7317 public:
7318 BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
7319 : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
7320 llvm::Value *operator*() const { return Ptr; }
7321 const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
7322 void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
7323 };
7324
7325 using MapExprsArrayTy = SmallVector<MappingExprInfo, 4>;
7326 using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
7327 using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
7328 using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
7329 using MapMappersArrayTy = SmallVector<const ValueDecl *, 4>;
7330 using MapDimArrayTy = SmallVector<uint64_t, 4>;
7331 using MapNonContiguousArrayTy = SmallVector<MapValuesArrayTy, 4>;
7332
7333 /// This structure contains combined information generated for mappable
7334 /// clauses, including base pointers, pointers, sizes, map types, user-defined
7335 /// mappers, and non-contiguous information.
7336 struct MapCombinedInfoTy {
7337 struct StructNonContiguousInfo {
7338 bool IsNonContiguous = false;
7339 MapDimArrayTy Dims;
7340 MapNonContiguousArrayTy Offsets;
7341 MapNonContiguousArrayTy Counts;
7342 MapNonContiguousArrayTy Strides;
7343 };
7344 MapExprsArrayTy Exprs;
7345 MapBaseValuesArrayTy BasePointers;
7346 MapValuesArrayTy Pointers;
7347 MapValuesArrayTy Sizes;
7348 MapFlagsArrayTy Types;
7349 MapMappersArrayTy Mappers;
7350 StructNonContiguousInfo NonContigInfo;
7351
7352 /// Append arrays in \a CurInfo.
7353 void append(MapCombinedInfoTy &CurInfo) {
7354 Exprs.append(CurInfo.Exprs.begin(), CurInfo.Exprs.end());
7355 BasePointers.append(CurInfo.BasePointers.begin(),
7356 CurInfo.BasePointers.end());
7357 Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
7358 Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
7359 Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
7360 Mappers.append(CurInfo.Mappers.begin(), CurInfo.Mappers.end());
7361 NonContigInfo.Dims.append(CurInfo.NonContigInfo.Dims.begin(),
7362 CurInfo.NonContigInfo.Dims.end());
7363 NonContigInfo.Offsets.append(CurInfo.NonContigInfo.Offsets.begin(),
7364 CurInfo.NonContigInfo.Offsets.end());
7365 NonContigInfo.Counts.append(CurInfo.NonContigInfo.Counts.begin(),
7366 CurInfo.NonContigInfo.Counts.end());
7367 NonContigInfo.Strides.append(CurInfo.NonContigInfo.Strides.begin(),
7368 CurInfo.NonContigInfo.Strides.end());
7369 }
7370 };
7371
7372 /// Map between a struct and the its lowest & highest elements which have been
7373 /// mapped.
7374 /// [ValueDecl *] --> {LE(FieldIndex, Pointer),
7375 /// HE(FieldIndex, Pointer)}
7376 struct StructRangeInfoTy {
7377 MapCombinedInfoTy PreliminaryMapData;
7378 std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> LowestElem = {
7379 0, Address::invalid()};
7380 std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
7381 0, Address::invalid()};
7382 Address Base = Address::invalid();
7383 Address LB = Address::invalid();
7384 bool IsArraySection = false;
7385 bool HasCompleteRecord = false;
7386 };
7387
7388private:
7389 /// Kind that defines how a device pointer has to be returned.
7390 struct MapInfo {
7391 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
7392 OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
7393 ArrayRef<OpenMPMapModifierKind> MapModifiers;
7394 ArrayRef<OpenMPMotionModifierKind> MotionModifiers;
7395 bool ReturnDevicePointer = false;
7396 bool IsImplicit = false;
7397 const ValueDecl *Mapper = nullptr;
7398 const Expr *VarRef = nullptr;
7399 bool ForDeviceAddr = false;
7400
7401 MapInfo() = default;
7402 MapInfo(
7403 OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
7404 OpenMPMapClauseKind MapType,
7405 ArrayRef<OpenMPMapModifierKind> MapModifiers,
7406 ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
7407 bool ReturnDevicePointer, bool IsImplicit,
7408 const ValueDecl *Mapper = nullptr, const Expr *VarRef = nullptr,
7409 bool ForDeviceAddr = false)
7410 : Components(Components), MapType(MapType), MapModifiers(MapModifiers),
7411 MotionModifiers(MotionModifiers),
7412 ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit),
7413 Mapper(Mapper), VarRef(VarRef), ForDeviceAddr(ForDeviceAddr) {}
7414 };
7415
7416 /// If use_device_ptr or use_device_addr is used on a decl which is a struct
7417 /// member and there is no map information about it, then emission of that
7418 /// entry is deferred until the whole struct has been processed.
7419 struct DeferredDevicePtrEntryTy {
7420 const Expr *IE = nullptr;
7421 const ValueDecl *VD = nullptr;
7422 bool ForDeviceAddr = false;
7423
7424 DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD,
7425 bool ForDeviceAddr)
7426 : IE(IE), VD(VD), ForDeviceAddr(ForDeviceAddr) {}
7427 };
7428
7429 /// The target directive from where the mappable clauses were extracted. It
7430 /// is either a executable directive or a user-defined mapper directive.
7431 llvm::PointerUnion<const OMPExecutableDirective *,
7432 const OMPDeclareMapperDecl *>
7433 CurDir;
7434
7435 /// Function the directive is being generated for.
7436 CodeGenFunction &CGF;
7437
7438 /// Set of all first private variables in the current directive.
7439 /// bool data is set to true if the variable is implicitly marked as
7440 /// firstprivate, false otherwise.
7441 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, bool> FirstPrivateDecls;
7442
7443 /// Map between device pointer declarations and their expression components.
7444 /// The key value for declarations in 'this' is null.
7445 llvm::DenseMap<
7446 const ValueDecl *,
7447 SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
7448 DevPointersMap;
7449
7450 llvm::Value *getExprTypeSize(const Expr *E) const {
7451 QualType ExprTy = E->getType().getCanonicalType();
7452
7453 // Calculate the size for array shaping expression.
7454 if (const auto *OAE = dyn_cast<OMPArrayShapingExpr>(E)) {
7455 llvm::Value *Size =
7456 CGF.getTypeSize(OAE->getBase()->getType()->getPointeeType());
7457 for (const Expr *SE : OAE->getDimensions()) {
7458 llvm::Value *Sz = CGF.EmitScalarExpr(SE);
7459 Sz = CGF.EmitScalarConversion(Sz, SE->getType(),
7460 CGF.getContext().getSizeType(),
7461 SE->getExprLoc());
7462 Size = CGF.Builder.CreateNUWMul(Size, Sz);
7463 }
7464 return Size;
7465 }
7466
7467 // Reference types are ignored for mapping purposes.
7468 if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
7469 ExprTy = RefTy->getPointeeType().getCanonicalType();
7470
7471 // Given that an array section is considered a built-in type, we need to
7472 // do the calculation based on the length of the section instead of relying
7473 // on CGF.getTypeSize(E->getType()).
7474 if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
7475 QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
7476 OAE->getBase()->IgnoreParenImpCasts())
7477 .getCanonicalType();
7478
7479 // If there is no length associated with the expression and lower bound is
7480 // not specified too, that means we are using the whole length of the
7481 // base.
7482 if (!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
7483 !OAE->getLowerBound())
7484 return CGF.getTypeSize(BaseTy);
7485
7486 llvm::Value *ElemSize;
7487 if (const auto *PTy = BaseTy->getAs<PointerType>()) {
7488 ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
7489 } else {
7490 const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
7491 assert(ATy && "Expecting array type if not a pointer type.")((void)0);
7492 ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
7493 }
7494
7495 // If we don't have a length at this point, that is because we have an
7496 // array section with a single element.
7497 if (!OAE->getLength() && OAE->getColonLocFirst().isInvalid())
7498 return ElemSize;
7499
7500 if (const Expr *LenExpr = OAE->getLength()) {
7501 llvm::Value *LengthVal = CGF.EmitScalarExpr(LenExpr);
7502 LengthVal = CGF.EmitScalarConversion(LengthVal, LenExpr->getType(),
7503 CGF.getContext().getSizeType(),
7504 LenExpr->getExprLoc());
7505 return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
7506 }
7507 assert(!OAE->getLength() && OAE->getColonLocFirst().isValid() &&((void)0)
7508 OAE->getLowerBound() && "expected array_section[lb:].")((void)0);
7509 // Size = sizetype - lb * elemtype;
7510 llvm::Value *LengthVal = CGF.getTypeSize(BaseTy);
7511 llvm::Value *LBVal = CGF.EmitScalarExpr(OAE->getLowerBound());
7512 LBVal = CGF.EmitScalarConversion(LBVal, OAE->getLowerBound()->getType(),
7513 CGF.getContext().getSizeType(),
7514 OAE->getLowerBound()->getExprLoc());
7515 LBVal = CGF.Builder.CreateNUWMul(LBVal, ElemSize);
7516 llvm::Value *Cmp = CGF.Builder.CreateICmpUGT(LengthVal, LBVal);
7517 llvm::Value *TrueVal = CGF.Builder.CreateNUWSub(LengthVal, LBVal);
7518 LengthVal = CGF.Builder.CreateSelect(
7519 Cmp, TrueVal, llvm::ConstantInt::get(CGF.SizeTy, 0));
7520 return LengthVal;
7521 }
7522 return CGF.getTypeSize(ExprTy);
7523 }
7524
7525 /// Return the corresponding bits for a given map clause modifier. Add
7526 /// a flag marking the map as a pointer if requested. Add a flag marking the
7527 /// map as the first one of a series of maps that relate to the same map
7528 /// expression.
7529 OpenMPOffloadMappingFlags getMapTypeBits(
7530 OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
7531 ArrayRef<OpenMPMotionModifierKind> MotionModifiers, bool IsImplicit,
7532 bool AddPtrFlag, bool AddIsTargetParamFlag, bool IsNonContiguous) const {
7533 OpenMPOffloadMappingFlags Bits =
7534 IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
7535 switch (MapType) {
7536 case OMPC_MAP_alloc:
7537 case OMPC_MAP_release:
7538 // alloc and release is the default behavior in the runtime library, i.e.
7539 // if we don't pass any bits alloc/release that is what the runtime is
7540 // going to do. Therefore, we don't need to signal anything for these two
7541 // type modifiers.
7542 break;
7543 case OMPC_MAP_to:
7544 Bits |= OMP_MAP_TO;
7545 break;
7546 case OMPC_MAP_from:
7547 Bits |= OMP_MAP_FROM;
7548 break;
7549 case OMPC_MAP_tofrom:
7550 Bits |= OMP_MAP_TO | OMP_MAP_FROM;
7551 break;
7552 case OMPC_MAP_delete:
7553 Bits |= OMP_MAP_DELETE;
7554 break;
7555 case OMPC_MAP_unknown:
7556 llvm_unreachable("Unexpected map type!")__builtin_unreachable();
7557 }
7558 if (AddPtrFlag)
7559 Bits |= OMP_MAP_PTR_AND_OBJ;
7560 if (AddIsTargetParamFlag)
7561 Bits |= OMP_MAP_TARGET_PARAM;
7562 if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_always)
7563 != MapModifiers.end())
7564 Bits |= OMP_MAP_ALWAYS;
7565 if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_close)
7566 != MapModifiers.end())
7567 Bits |= OMP_MAP_CLOSE;
7568 if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_present) !=
7569 MapModifiers.end() ||
7570 llvm::find(MotionModifiers, OMPC_MOTION_MODIFIER_present) !=
7571 MotionModifiers.end())
7572 Bits |= OMP_MAP_PRESENT;
7573 if (IsNonContiguous)
7574 Bits |= OMP_MAP_NON_CONTIG;
7575 return Bits;
7576 }
7577
7578 /// Return true if the provided expression is a final array section. A
7579 /// final array section, is one whose length can't be proved to be one.
7580 bool isFinalArraySectionExpression(const Expr *E) const {
7581 const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
7582
7583 // It is not an array section and therefore not a unity-size one.
7584 if (!OASE)
7585 return false;
7586
7587 // An array section with no colon always refer to a single element.
7588 if (OASE->getColonLocFirst().isInvalid())
7589 return false;
7590
7591 const Expr *Length = OASE->getLength();
7592
7593 // If we don't have a length we have to check if the array has size 1
7594 // for this dimension. Also, we should always expect a length if the
7595 // base type is pointer.
7596 if (!Length) {
7597 QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
7598 OASE->getBase()->IgnoreParenImpCasts())
7599 .getCanonicalType();
7600 if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
7601 return ATy->getSize().getSExtValue() != 1;
7602 // If we don't have a constant dimension length, we have to consider
7603 // the current section as having any size, so it is not necessarily
7604 // unitary. If it happen to be unity size, that's user fault.
7605 return true;
7606 }
7607
7608 // Check if the length evaluates to 1.
7609 Expr::EvalResult Result;
7610 if (!Length->EvaluateAsInt(Result, CGF.getContext()))
7611 return true; // Can have more that size 1.
7612
7613 llvm::APSInt ConstLength = Result.Val.getInt();
7614 return ConstLength.getSExtValue() != 1;
7615 }
7616
7617 /// Generate the base pointers, section pointers, sizes, map type bits, and
7618 /// user-defined mappers (all included in \a CombinedInfo) for the provided
7619 /// map type, map or motion modifiers, and expression components.
7620 /// \a IsFirstComponent should be set to true if the provided set of
7621 /// components is the first associated with a capture.
7622 void generateInfoForComponentList(
7623 OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
7624 ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
7625 OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
7626 MapCombinedInfoTy &CombinedInfo, StructRangeInfoTy &PartialStruct,
7627 bool IsFirstComponentList, bool IsImplicit,
7628 const ValueDecl *Mapper = nullptr, bool ForDeviceAddr = false,
7629 const ValueDecl *BaseDecl = nullptr, const Expr *MapExpr = nullptr,
7630 ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
7631 OverlappedElements = llvm::None) const {
7632 // The following summarizes what has to be generated for each map and the
7633 // types below. The generated information is expressed in this order:
7634 // base pointer, section pointer, size, flags
7635 // (to add to the ones that come from the map type and modifier).
7636 //
7637 // double d;
7638 // int i[100];
7639 // float *p;
7640 //
7641 // struct S1 {
7642 // int i;
7643 // float f[50];
7644 // }
7645 // struct S2 {
7646 // int i;
7647 // float f[50];
7648 // S1 s;
7649 // double *p;
7650 // struct S2 *ps;
7651 // int &ref;
7652 // }
7653 // S2 s;
7654 // S2 *ps;
7655 //
7656 // map(d)
7657 // &d, &d, sizeof(double), TARGET_PARAM | TO | FROM
7658 //
7659 // map(i)
7660 // &i, &i, 100*sizeof(int), TARGET_PARAM | TO | FROM
7661 //
7662 // map(i[1:23])
7663 // &i(=&i[0]), &i[1], 23*sizeof(int), TARGET_PARAM | TO | FROM
7664 //
7665 // map(p)
7666 // &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM
7667 //
7668 // map(p[1:24])
7669 // &p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM | PTR_AND_OBJ
7670 // in unified shared memory mode or for local pointers
7671 // p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
7672 //
7673 // map(s)
7674 // &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
7675 //
7676 // map(s.i)
7677 // &s, &(s.i), sizeof(int), TARGET_PARAM | TO | FROM
7678 //
7679 // map(s.s.f)
7680 // &s, &(s.s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
7681 //
7682 // map(s.p)
7683 // &s, &(s.p), sizeof(double*), TARGET_PARAM | TO | FROM
7684 //
7685 // map(to: s.p[:22])
7686 // &s, &(s.p), sizeof(double*), TARGET_PARAM (*)
7687 // &s, &(s.p), sizeof(double*), MEMBER_OF(1) (**)
7688 // &(s.p), &(s.p[0]), 22*sizeof(double),
7689 // MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
7690 // (*) alloc space for struct members, only this is a target parameter
7691 // (**) map the pointer (nothing to be mapped in this example) (the compiler
7692 // optimizes this entry out, same in the examples below)
7693 // (***) map the pointee (map: to)
7694 //
7695 // map(to: s.ref)
7696 // &s, &(s.ref), sizeof(int*), TARGET_PARAM (*)
7697 // &s, &(s.ref), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
7698 // (*) alloc space for struct members, only this is a target parameter
7699 // (**) map the pointer (nothing to be mapped in this example) (the compiler
7700 // optimizes this entry out, same in the examples below)
7701 // (***) map the pointee (map: to)
7702 //
7703 // map(s.ps)
7704 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM
7705 //
7706 // map(from: s.ps->s.i)
7707 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
7708 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
7709 // &(s.ps), &(s.ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
7710 //
7711 // map(to: s.ps->ps)
7712 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
7713 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
7714 // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | TO
7715 //
7716 // map(s.ps->ps->ps)
7717 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
7718 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
7719 // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
7720 // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
7721 //
7722 // map(to: s.ps->ps->s.f[:22])
7723 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
7724 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
7725 // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
7726 // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
7727 //
7728 // map(ps)
7729 // &ps, &ps, sizeof(S2*), TARGET_PARAM | TO | FROM
7730 //
7731 // map(ps->i)
7732 // ps, &(ps->i), sizeof(int), TARGET_PARAM | TO | FROM
7733 //
7734 // map(ps->s.f)
7735 // ps, &(ps->s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
7736 //
7737 // map(from: ps->p)
7738 // ps, &(ps->p), sizeof(double*), TARGET_PARAM | FROM
7739 //
7740 // map(to: ps->p[:22])
7741 // ps, &(ps->p), sizeof(double*), TARGET_PARAM
7742 // ps, &(ps->p), sizeof(double*), MEMBER_OF(1)
7743 // &(ps->p), &(ps->p[0]), 22*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | TO
7744 //
7745 // map(ps->ps)
7746 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM | TO | FROM
7747 //
7748 // map(from: ps->ps->s.i)
7749 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
7750 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
7751 // &(ps->ps), &(ps->ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
7752 //
7753 // map(from: ps->ps->ps)
7754 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
7755 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
7756 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | FROM
7757 //
7758 // map(ps->ps->ps->ps)
7759 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
7760 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
7761 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
7762 // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
7763 //
7764 // map(to: ps->ps->ps->s.f[:22])
7765 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
7766 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
7767 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
7768 // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
7769 //
7770 // map(to: s.f[:22]) map(from: s.p[:33])
7771 // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1) +
7772 // sizeof(double*) (**), TARGET_PARAM
7773 // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | TO
7774 // &s, &(s.p), sizeof(double*), MEMBER_OF(1)
7775 // &(s.p), &(s.p[0]), 33*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | FROM
7776 // (*) allocate contiguous space needed to fit all mapped members even if
7777 // we allocate space for members not mapped (in this example,
7778 // s.f[22..49] and s.s are not mapped, yet we must allocate space for
7779 // them as well because they fall between &s.f[0] and &s.p)
7780 //
7781 // map(from: s.f[:22]) map(to: ps->p[:33])
7782 // &s, &(s.f[0]), 22*sizeof(float), TARGET_PARAM | FROM
7783 // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
7784 // ps, &(ps->p), sizeof(double*), MEMBER_OF(2) (*)
7785 // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(2) | PTR_AND_OBJ | TO
7786 // (*) the struct this entry pertains to is the 2nd element in the list of
7787 // arguments, hence MEMBER_OF(2)
7788 //
7789 // map(from: s.f[:22], s.s) map(to: ps->p[:33])
7790 // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1), TARGET_PARAM
7791 // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | FROM
7792 // &s, &(s.s), sizeof(struct S1), MEMBER_OF(1) | FROM
7793 // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
7794 // ps, &(ps->p), sizeof(double*), MEMBER_OF(4) (*)
7795 // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO
7796 // (*) the struct this entry pertains to is the 4th element in the list
7797 // of arguments, hence MEMBER_OF(4)
7798
7799 // Track if the map information being generated is the first for a capture.
7800 bool IsCaptureFirstInfo = IsFirstComponentList;
7801 // When the variable is on a declare target link or in a to clause with
7802 // unified memory, a reference is needed to hold the host/device address
7803 // of the variable.
7804 bool RequiresReference = false;
7805
7806 // Scan the components from the base to the complete expression.
7807 auto CI = Components.rbegin();
7808 auto CE = Components.rend();
7809 auto I = CI;
7810
7811 // Track if the map information being generated is the first for a list of
7812 // components.
7813 bool IsExpressionFirstInfo = true;
7814 bool FirstPointerInComplexData = false;
7815 Address BP = Address::invalid();
7816 const Expr *AssocExpr = I->getAssociatedExpression();
7817 const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
7818 const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
7819 const auto *OAShE = dyn_cast<OMPArrayShapingExpr>(AssocExpr);
7820
7821 if (isa<MemberExpr>(AssocExpr)) {
7822 // The base is the 'this' pointer. The content of the pointer is going
7823 // to be the base of the field being mapped.
7824 BP = CGF.LoadCXXThisAddress();
7825 } else if ((AE && isa<CXXThisExpr>(AE->getBase()->IgnoreParenImpCasts())) ||
7826 (OASE &&
7827 isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
7828 BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
7829 } else if (OAShE &&
7830 isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
7831 BP = Address(
7832 CGF.EmitScalarExpr(OAShE->getBase()),
7833 CGF.getContext().getTypeAlignInChars(OAShE->getBase()->getType()));
7834 } else {
7835 // The base is the reference to the variable.
7836 // BP = &Var.
7837 BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
7838 if (const auto *VD =
7839 dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
7840 if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
7841 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
7842 if ((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
7843 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
7844 CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) {
7845 RequiresReference = true;
7846 BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
7847 }
7848 }
7849 }
7850
7851 // If the variable is a pointer and is being dereferenced (i.e. is not
7852 // the last component), the base has to be the pointer itself, not its
7853 // reference. References are ignored for mapping purposes.
7854 QualType Ty =
7855 I->getAssociatedDeclaration()->getType().getNonReferenceType();
7856 if (Ty->isAnyPointerType() && std::next(I) != CE) {
7857 // No need to generate individual map information for the pointer, it
7858 // can be associated with the combined storage if shared memory mode is
7859 // active or the base declaration is not global variable.
7860 const auto *VD = dyn_cast<VarDecl>(I->getAssociatedDeclaration());
7861 if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
7862 !VD || VD->hasLocalStorage())
7863 BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
7864 else
7865 FirstPointerInComplexData = true;
7866 ++I;
7867 }
7868 }
7869
7870 // Track whether a component of the list should be marked as MEMBER_OF some
7871 // combined entry (for partial structs). Only the first PTR_AND_OBJ entry
7872 // in a component list should be marked as MEMBER_OF, all subsequent entries
7873 // do not belong to the base struct. E.g.
7874 // struct S2 s;
7875 // s.ps->ps->ps->f[:]
7876 // (1) (2) (3) (4)
7877 // ps(1) is a member pointer, ps(2) is a pointee of ps(1), so it is a
7878 // PTR_AND_OBJ entry; the PTR is ps(1), so MEMBER_OF the base struct. ps(3)
7879 // is the pointee of ps(2) which is not member of struct s, so it should not
7880 // be marked as such (it is still PTR_AND_OBJ).
7881 // The variable is initialized to false so that PTR_AND_OBJ entries which
7882 // are not struct members are not considered (e.g. array of pointers to
7883 // data).
7884 bool ShouldBeMemberOf = false;
7885
7886 // Variable keeping track of whether or not we have encountered a component
7887 // in the component list which is a member expression. Useful when we have a
7888 // pointer or a final array section, in which case it is the previous
7889 // component in the list which tells us whether we have a member expression.
7890 // E.g. X.f[:]
7891 // While processing the final array section "[:]" it is "f" which tells us
7892 // whether we are dealing with a member of a declared struct.
7893 const MemberExpr *EncounteredME = nullptr;
7894
7895 // Track for the total number of dimension. Start from one for the dummy
7896 // dimension.
7897 uint64_t DimSize = 1;
7898
7899 bool IsNonContiguous = CombinedInfo.NonContigInfo.IsNonContiguous;
7900 bool IsPrevMemberReference = false;
7901
7902 for (; I != CE; ++I) {
7903 // If the current component is member of a struct (parent struct) mark it.
7904 if (!EncounteredME) {
7905 EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
7906 // If we encounter a PTR_AND_OBJ entry from now on it should be marked
7907 // as MEMBER_OF the parent struct.
7908 if (EncounteredME) {
7909 ShouldBeMemberOf = true;
7910 // Do not emit as complex pointer if this is actually not array-like
7911 // expression.
7912 if (FirstPointerInComplexData) {
7913 QualType Ty = std::prev(I)
7914 ->getAssociatedDeclaration()
7915 ->getType()
7916 .getNonReferenceType();
7917 BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
7918 FirstPointerInComplexData = false;
7919 }
7920 }
7921 }
7922
7923 auto Next = std::next(I);
7924
7925 // We need to generate the addresses and sizes if this is the last
7926 // component, if the component is a pointer or if it is an array section
7927 // whose length can't be proved to be one. If this is a pointer, it
7928 // becomes the base address for the following components.
7929
7930 // A final array section, is one whose length can't be proved to be one.
7931 // If the map item is non-contiguous then we don't treat any array section
7932 // as final array section.
7933 bool IsFinalArraySection =
7934 !IsNonContiguous &&
7935 isFinalArraySectionExpression(I->getAssociatedExpression());
7936
7937 // If we have a declaration for the mapping use that, otherwise use
7938 // the base declaration of the map clause.
7939 const ValueDecl *MapDecl = (I->getAssociatedDeclaration())
7940 ? I->getAssociatedDeclaration()
7941 : BaseDecl;
7942 MapExpr = (I->getAssociatedExpression()) ? I->getAssociatedExpression()
7943 : MapExpr;
7944
7945 // Get information on whether the element is a pointer. Have to do a
7946 // special treatment for array sections given that they are built-in
7947 // types.
7948 const auto *OASE =
7949 dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
7950 const auto *OAShE =
7951 dyn_cast<OMPArrayShapingExpr>(I->getAssociatedExpression());
7952 const auto *UO = dyn_cast<UnaryOperator>(I->getAssociatedExpression());
7953 const auto *BO = dyn_cast<BinaryOperator>(I->getAssociatedExpression());
7954 bool IsPointer =
7955 OAShE ||
7956 (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
7957 .getCanonicalType()
7958 ->isAnyPointerType()) ||
7959 I->getAssociatedExpression()->getType()->isAnyPointerType();
7960 bool IsMemberReference = isa<MemberExpr>(I->getAssociatedExpression()) &&
7961 MapDecl &&
7962 MapDecl->getType()->isLValueReferenceType();
7963 bool IsNonDerefPointer = IsPointer && !UO && !BO && !IsNonContiguous;
7964
7965 if (OASE)
7966 ++DimSize;
7967
7968 if (Next == CE || IsMemberReference || IsNonDerefPointer ||
7969 IsFinalArraySection) {
7970 // If this is not the last component, we expect the pointer to be
7971 // associated with an array expression or member expression.
7972 assert((Next == CE ||((void)0)
7973 isa<MemberExpr>(Next->getAssociatedExpression()) ||((void)0)
7974 isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||((void)0)
7975 isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) ||((void)0)
7976 isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) ||((void)0)
7977 isa<UnaryOperator>(Next->getAssociatedExpression()) ||((void)0)
7978 isa<BinaryOperator>(Next->getAssociatedExpression())) &&((void)0)
7979 "Unexpected expression")((void)0);
7980
7981 Address LB = Address::invalid();
7982 Address LowestElem = Address::invalid();
7983 auto &&EmitMemberExprBase = [](CodeGenFunction &CGF,
7984 const MemberExpr *E) {
7985 const Expr *BaseExpr = E->getBase();
7986 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a
7987 // scalar.
7988 LValue BaseLV;
7989 if (E->isArrow()) {
7990 LValueBaseInfo BaseInfo;
7991 TBAAAccessInfo TBAAInfo;
7992 Address Addr =
7993 CGF.EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
7994 QualType PtrTy = BaseExpr->getType()->getPointeeType();
7995 BaseLV = CGF.MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
7996 } else {
7997 BaseLV = CGF.EmitOMPSharedLValue(BaseExpr);
7998 }
7999 return BaseLV;
8000 };
8001 if (OAShE) {
8002 LowestElem = LB = Address(CGF.EmitScalarExpr(OAShE->getBase()),
8003 CGF.getContext().getTypeAlignInChars(
8004 OAShE->getBase()->getType()));
8005 } else if (IsMemberReference) {
8006 const auto *ME = cast<MemberExpr>(I->getAssociatedExpression());
8007 LValue BaseLVal = EmitMemberExprBase(CGF, ME);
8008 LowestElem = CGF.EmitLValueForFieldInitialization(
8009 BaseLVal, cast<FieldDecl>(MapDecl))
8010 .getAddress(CGF);
8011 LB = CGF.EmitLoadOfReferenceLValue(LowestElem, MapDecl->getType())
8012 .getAddress(CGF);
8013 } else {
8014 LowestElem = LB =
8015 CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
8016 .getAddress(CGF);
8017 }
8018
8019 // If this component is a pointer inside the base struct then we don't
8020 // need to create any entry for it - it will be combined with the object
8021 // it is pointing to into a single PTR_AND_OBJ entry.
8022 bool IsMemberPointerOrAddr =
8023 EncounteredME &&
8024 (((IsPointer || ForDeviceAddr) &&
8025 I->getAssociatedExpression() == EncounteredME) ||
8026 (IsPrevMemberReference && !IsPointer) ||
8027 (IsMemberReference && Next != CE &&
8028 !Next->getAssociatedExpression()->getType()->isPointerType()));
8029 if (!OverlappedElements.empty() && Next == CE) {
8030 // Handle base element with the info for overlapped elements.
8031 assert(!PartialStruct.Base.isValid() && "The base element is set.")((void)0);
8032 assert(!IsPointer &&((void)0)
8033 "Unexpected base element with the pointer type.")((void)0);
8034 // Mark the whole struct as the struct that requires allocation on the
8035 // device.
8036 PartialStruct.LowestElem = {0, LowestElem};
8037 CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(
8038 I->getAssociatedExpression()->getType());
8039 Address HB = CGF.Builder.CreateConstGEP(
8040 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LowestElem,
8041 CGF.VoidPtrTy),
8042 TypeSize.getQuantity() - 1);
8043 PartialStruct.HighestElem = {
8044 std::numeric_limits<decltype(
8045 PartialStruct.HighestElem.first)>::max(),
8046 HB};
8047 PartialStruct.Base = BP;
8048 PartialStruct.LB = LB;
8049 assert(((void)0)
8050 PartialStruct.PreliminaryMapData.BasePointers.empty() &&((void)0)
8051 "Overlapped elements must be used only once for the variable.")((void)0);
8052 std::swap(PartialStruct.PreliminaryMapData, CombinedInfo);
8053 // Emit data for non-overlapped data.
8054 OpenMPOffloadMappingFlags Flags =
8055 OMP_MAP_MEMBER_OF |
8056 getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
8057 /*AddPtrFlag=*/false,
8058 /*AddIsTargetParamFlag=*/false, IsNonContiguous);
8059 llvm::Value *Size = nullptr;
8060 // Do bitcopy of all non-overlapped structure elements.
8061 for (OMPClauseMappableExprCommon::MappableExprComponentListRef
8062 Component : OverlappedElements) {
8063 Address ComponentLB = Address::invalid();
8064 for (const OMPClauseMappableExprCommon::MappableComponent &MC :
8065 Component) {
8066 if (const ValueDecl *VD = MC.getAssociatedDeclaration()) {
8067 const auto *FD = dyn_cast<FieldDecl>(VD);
8068 if (FD && FD->getType()->isLValueReferenceType()) {
8069 const auto *ME =
8070 cast<MemberExpr>(MC.getAssociatedExpression());
8071 LValue BaseLVal = EmitMemberExprBase(CGF, ME);
8072 ComponentLB =
8073 CGF.EmitLValueForFieldInitialization(BaseLVal, FD)
8074 .getAddress(CGF);
8075 } else {
8076 ComponentLB =
8077 CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
8078 .getAddress(CGF);
8079 }
8080 Size = CGF.Builder.CreatePtrDiff(
8081 CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
8082 CGF.EmitCastToVoidPtr(LB.getPointer()));
8083 break;
8084 }
8085 }
8086 assert(Size && "Failed to determine structure size")((void)0);
8087 CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
8088 CombinedInfo.BasePointers.push_back(BP.getPointer());
8089 CombinedInfo.Pointers.push_back(LB.getPointer());
8090 CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
8091 Size, CGF.Int64Ty, /*isSigned=*/true));
8092 CombinedInfo.Types.push_back(Flags);
8093 CombinedInfo.Mappers.push_back(nullptr);
8094 CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
8095 : 1);
8096 LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
8097 }
8098 CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
8099 CombinedInfo.BasePointers.push_back(BP.getPointer());
8100 CombinedInfo.Pointers.push_back(LB.getPointer());
8101 Size = CGF.Builder.CreatePtrDiff(
8102 CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
8103 CGF.EmitCastToVoidPtr(LB.getPointer()));
8104 CombinedInfo.Sizes.push_back(
8105 CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
8106 CombinedInfo.Types.push_back(Flags);
8107 CombinedInfo.Mappers.push_back(nullptr);
8108 CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
8109 : 1);
8110 break;
8111 }
8112 llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
8113 if (!IsMemberPointerOrAddr ||
8114 (Next == CE && MapType != OMPC_MAP_unknown)) {
8115 CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
8116 CombinedInfo.BasePointers.push_back(BP.getPointer());
8117 CombinedInfo.Pointers.push_back(LB.getPointer());
8118 CombinedInfo.Sizes.push_back(
8119 CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
8120 CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
8121 : 1);
8122
8123 // If Mapper is valid, the last component inherits the mapper.
8124 bool HasMapper = Mapper && Next == CE;
8125 CombinedInfo.Mappers.push_back(HasMapper ? Mapper : nullptr);
8126
8127 // We need to add a pointer flag for each map that comes from the
8128 // same expression except for the first one. We also need to signal
8129 // this map is the first one that relates with the current capture
8130 // (there is a set of entries for each capture).
8131 OpenMPOffloadMappingFlags Flags = getMapTypeBits(
8132 MapType, MapModifiers, MotionModifiers, IsImplicit,
8133 !IsExpressionFirstInfo || RequiresReference ||
8134 FirstPointerInComplexData || IsMemberReference,
8135 IsCaptureFirstInfo && !RequiresReference, IsNonContiguous);
8136
8137 if (!IsExpressionFirstInfo || IsMemberReference) {
8138 // If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
8139 // then we reset the TO/FROM/ALWAYS/DELETE/CLOSE flags.
8140 if (IsPointer || (IsMemberReference && Next != CE))
8141 Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
8142 OMP_MAP_DELETE | OMP_MAP_CLOSE);
8143
8144 if (ShouldBeMemberOf) {
8145 // Set placeholder value MEMBER_OF=FFFF to indicate that the flag
8146 // should be later updated with the correct value of MEMBER_OF.
8147 Flags |= OMP_MAP_MEMBER_OF;
8148 // From now on, all subsequent PTR_AND_OBJ entries should not be
8149 // marked as MEMBER_OF.
8150 ShouldBeMemberOf = false;
8151 }
8152 }
8153
8154 CombinedInfo.Types.push_back(Flags);
8155 }
8156
8157 // If we have encountered a member expression so far, keep track of the
8158 // mapped member. If the parent is "*this", then the value declaration
8159 // is nullptr.
8160 if (EncounteredME) {
8161 const auto *FD = cast<FieldDecl>(EncounteredME->getMemberDecl());
8162 unsigned FieldIndex = FD->getFieldIndex();
8163
8164 // Update info about the lowest and highest elements for this struct
8165 if (!PartialStruct.Base.isValid()) {
8166 PartialStruct.LowestElem = {FieldIndex, LowestElem};
8167 if (IsFinalArraySection) {
8168 Address HB =
8169 CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false)
8170 .getAddress(CGF);
8171 PartialStruct.HighestElem = {FieldIndex, HB};
8172 } else {
8173 PartialStruct.HighestElem = {FieldIndex, LowestElem};
8174 }
8175 PartialStruct.Base = BP;
8176 PartialStruct.LB = BP;
8177 } else if (FieldIndex < PartialStruct.LowestElem.first) {
8178 PartialStruct.LowestElem = {FieldIndex, LowestElem};
8179 } else if (FieldIndex > PartialStruct.HighestElem.first) {
8180 PartialStruct.HighestElem = {FieldIndex, LowestElem};
8181 }
8182 }
8183
8184 // Need to emit combined struct for array sections.
8185 if (IsFinalArraySection || IsNonContiguous)
8186 PartialStruct.IsArraySection = true;
8187
8188 // If we have a final array section, we are done with this expression.
8189 if (IsFinalArraySection)
8190 break;
8191
8192 // The pointer becomes the base for the next element.
8193 if (Next != CE)
8194 BP = IsMemberReference ? LowestElem : LB;
8195
8196 IsExpressionFirstInfo = false;
8197 IsCaptureFirstInfo = false;
8198 FirstPointerInComplexData = false;
8199 IsPrevMemberReference = IsMemberReference;
8200 } else if (FirstPointerInComplexData) {
8201 QualType Ty = Components.rbegin()
8202 ->getAssociatedDeclaration()
8203 ->getType()
8204 .getNonReferenceType();
8205 BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
8206 FirstPointerInComplexData = false;
8207 }
8208 }
8209 // If ran into the whole component - allocate the space for the whole
8210 // record.
8211 if (!EncounteredME)
8212 PartialStruct.HasCompleteRecord = true;
8213
8214 if (!IsNonContiguous)
8215 return;
8216
8217 const ASTContext &Context = CGF.getContext();
8218
8219 // For supporting stride in array section, we need to initialize the first
8220 // dimension size as 1, first offset as 0, and first count as 1
8221 MapValuesArrayTy CurOffsets = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 0)};
8222 MapValuesArrayTy CurCounts = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)};
8223 MapValuesArrayTy CurStrides;
8224 MapValuesArrayTy DimSizes{llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)};
8225 uint64_t ElementTypeSize;
8226
8227 // Collect Size information for each dimension and get the element size as
8228 // the first Stride. For example, for `int arr[10][10]`, the DimSizes
8229 // should be [10, 10] and the first stride is 4 btyes.
8230 for (const OMPClauseMappableExprCommon::MappableComponent &Component :
8231 Components) {
8232 const Expr *AssocExpr = Component.getAssociatedExpression();
8233 const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
8234
8235 if (!OASE)
8236 continue;
8237
8238 QualType Ty = OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
8239 auto *CAT = Context.getAsConstantArrayType(Ty);
8240 auto *VAT = Context.getAsVariableArrayType(Ty);
8241
8242 // We need all the dimension size except for the last dimension.
8243 assert((VAT || CAT || &Component == &*Components.begin()) &&((void)0)
8244 "Should be either ConstantArray or VariableArray if not the "((void)0)
8245 "first Component")((void)0);
8246
8247 // Get element size if CurStrides is empty.
8248 if (CurStrides.empty()) {
8249 const Type *ElementType = nullptr;
8250 if (CAT)
8251 ElementType = CAT->getElementType().getTypePtr();
8252 else if (VAT)
8253 ElementType = VAT->getElementType().getTypePtr();
8254 else
8255 assert(&Component == &*Components.begin() &&((void)0)
8256 "Only expect pointer (non CAT or VAT) when this is the "((void)0)
8257 "first Component")((void)0);
8258 // If ElementType is null, then it means the base is a pointer
8259 // (neither CAT nor VAT) and we'll attempt to get ElementType again
8260 // for next iteration.
8261 if (ElementType) {
8262 // For the case that having pointer as base, we need to remove one
8263 // level of indirection.
8264 if (&Component != &*Components.begin())
8265 ElementType = ElementType->getPointeeOrArrayElementType();
8266 ElementTypeSize =
8267 Context.getTypeSizeInChars(ElementType).getQuantity();
8268 CurStrides.push_back(
8269 llvm::ConstantInt::get(CGF.Int64Ty, ElementTypeSize));
8270 }
8271 }
8272 // Get dimension value except for the last dimension since we don't need
8273 // it.
8274 if (DimSizes.size() < Components.size() - 1) {
8275 if (CAT)
8276 DimSizes.push_back(llvm::ConstantInt::get(
8277 CGF.Int64Ty, CAT->getSize().getZExtValue()));
8278 else if (VAT)
8279 DimSizes.push_back(CGF.Builder.CreateIntCast(
8280 CGF.EmitScalarExpr(VAT->getSizeExpr()), CGF.Int64Ty,
8281 /*IsSigned=*/false));
8282 }
8283 }
8284
8285 // Skip the dummy dimension since we have already have its information.
8286 auto DI = DimSizes.begin() + 1;
8287 // Product of dimension.
8288 llvm::Value *DimProd =
8289 llvm::ConstantInt::get(CGF.CGM.Int64Ty, ElementTypeSize);
8290
8291 // Collect info for non-contiguous. Notice that offset, count, and stride
8292 // are only meaningful for array-section, so we insert a null for anything
8293 // other than array-section.
8294 // Also, the size of offset, count, and stride are not the same as
8295 // pointers, base_pointers, sizes, or dims. Instead, the size of offset,
8296 // count, and stride are the same as the number of non-contiguous
8297 // declaration in target update to/from clause.
8298 for (const OMPClauseMappableExprCommon::MappableComponent &Component :
8299 Components) {
8300 const Expr *AssocExpr = Component.getAssociatedExpression();
8301
8302 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr)) {
8303 llvm::Value *Offset = CGF.Builder.CreateIntCast(
8304 CGF.EmitScalarExpr(AE->getIdx()), CGF.Int64Ty,
8305 /*isSigned=*/false);
8306 CurOffsets.push_back(Offset);
8307 CurCounts.push_back(llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/1));
8308 CurStrides.push_back(CurStrides.back());
8309 continue;
8310 }
8311
8312 const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
8313
8314 if (!OASE)
8315 continue;
8316
8317 // Offset
8318 const Expr *OffsetExpr = OASE->getLowerBound();
8319 llvm::Value *Offset = nullptr;
8320 if (!OffsetExpr) {
8321 // If offset is absent, then we just set it to zero.
8322 Offset = llvm::ConstantInt::get(CGF.Int64Ty, 0);
8323 } else {
8324 Offset = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(OffsetExpr),
8325 CGF.Int64Ty,
8326 /*isSigned=*/false);
8327 }
8328 CurOffsets.push_back(Offset);
8329
8330 // Count
8331 const Expr *CountExpr = OASE->getLength();
8332 llvm::Value *Count = nullptr;
8333 if (!CountExpr) {
8334 // In Clang, once a high dimension is an array section, we construct all
8335 // the lower dimension as array section, however, for case like
8336 // arr[0:2][2], Clang construct the inner dimension as an array section
8337 // but it actually is not in an array section form according to spec.
8338 if (!OASE->getColonLocFirst().isValid() &&
8339 !OASE->getColonLocSecond().isValid()) {
8340 Count = llvm::ConstantInt::get(CGF.Int64Ty, 1);
8341 } else {
8342 // OpenMP 5.0, 2.1.5 Array Sections, Description.
8343 // When the length is absent it defaults to ⌈(size −
8344 // lower-bound)/stride⌉, where size is the size of the array
8345 // dimension.
8346 const Expr *StrideExpr = OASE->getStride();
8347 llvm::Value *Stride =
8348 StrideExpr
8349 ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr),
8350 CGF.Int64Ty, /*isSigned=*/false)
8351 : nullptr;
8352 if (Stride)
8353 Count = CGF.Builder.CreateUDiv(
8354 CGF.Builder.CreateNUWSub(*DI, Offset), Stride);
8355 else
8356 Count = CGF.Builder.CreateNUWSub(*DI, Offset);
8357 }
8358 } else {
8359 Count = CGF.EmitScalarExpr(CountExpr);
8360 }
8361 Count = CGF.Builder.CreateIntCast(Count, CGF.Int64Ty, /*isSigned=*/false);
8362 CurCounts.push_back(Count);
8363
8364 // Stride_n' = Stride_n * (D_0 * D_1 ... * D_n-1) * Unit size
8365 // Take `int arr[5][5][5]` and `arr[0:2:2][1:2:1][0:2:2]` as an example:
8366 // Offset Count Stride
8367 // D0 0 1 4 (int) <- dummy dimension
8368 // D1 0 2 8 (2 * (1) * 4)
8369 // D2 1 2 20 (1 * (1 * 5) * 4)
8370 // D3 0 2 200 (2 * (1 * 5 * 4) * 4)
8371 const Expr *StrideExpr = OASE->getStride();
8372 llvm::Value *Stride =
8373 StrideExpr
8374 ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr),
8375 CGF.Int64Ty, /*isSigned=*/false)
8376 : nullptr;
8377 DimProd = CGF.Builder.CreateNUWMul(DimProd, *(DI - 1));
8378 if (Stride)
8379 CurStrides.push_back(CGF.Builder.CreateNUWMul(DimProd, Stride));
8380 else
8381 CurStrides.push_back(DimProd);
8382 if (DI != DimSizes.end())
8383 ++DI;
8384 }
8385
8386 CombinedInfo.NonContigInfo.Offsets.push_back(CurOffsets);
8387 CombinedInfo.NonContigInfo.Counts.push_back(CurCounts);
8388 CombinedInfo.NonContigInfo.Strides.push_back(CurStrides);
8389 }
8390
8391 /// Return the adjusted map modifiers if the declaration a capture refers to
8392 /// appears in a first-private clause. This is expected to be used only with
8393 /// directives that start with 'target'.
8394 MappableExprsHandler::OpenMPOffloadMappingFlags
8395 getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
8396 assert(Cap.capturesVariable() && "Expected capture by reference only!")((void)0);
8397
8398 // A first private variable captured by reference will use only the
8399 // 'private ptr' and 'map to' flag. Return the right flags if the captured
8400 // declaration is known as first-private in this handler.
8401 if (FirstPrivateDecls.count(Cap.getCapturedVar())) {
8402 if (Cap.getCapturedVar()->getType()->isAnyPointerType())
8403 return MappableExprsHandler::OMP_MAP_TO |
8404 MappableExprsHandler::OMP_MAP_PTR_AND_OBJ;
8405 return MappableExprsHandler::OMP_MAP_PRIVATE |
8406 MappableExprsHandler::OMP_MAP_TO;
8407 }
8408 return MappableExprsHandler::OMP_MAP_TO |
8409 MappableExprsHandler::OMP_MAP_FROM;
8410 }
8411
8412 static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
8413 // Rotate by getFlagMemberOffset() bits.
8414 return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
8415 << getFlagMemberOffset());
8416 }
8417
8418 static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
8419 OpenMPOffloadMappingFlags MemberOfFlag) {
8420 // If the entry is PTR_AND_OBJ but has not been marked with the special
8421 // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be
8422 // marked as MEMBER_OF.
8423 if ((Flags & OMP_MAP_PTR_AND_OBJ) &&
8424 ((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF))
8425 return;
8426
8427 // Reset the placeholder value to prepare the flag for the assignment of the
8428 // proper MEMBER_OF value.
8429 Flags &= ~OMP_MAP_MEMBER_OF;
8430 Flags |= MemberOfFlag;
8431 }
8432
8433 void getPlainLayout(const CXXRecordDecl *RD,
8434 llvm::SmallVectorImpl<const FieldDecl *> &Layout,
8435 bool AsBase) const {
8436 const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD);
8437
8438 llvm::StructType *St =
8439 AsBase ? RL.getBaseSubobjectLLVMType() : RL.getLLVMType();
8440
8441 unsigned NumElements = St->getNumElements();
8442 llvm::SmallVector<
8443 llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>, 4>
8444 RecordLayout(NumElements);
8445
8446 // Fill bases.
8447 for (const auto &I : RD->bases()) {
8448 if (I.isVirtual())
8449 continue;
8450 const auto *Base = I.getType()->getAsCXXRecordDecl();
8451 // Ignore empty bases.
8452 if (Base->isEmpty() || CGF.getContext()
8453 .getASTRecordLayout(Base)
8454 .getNonVirtualSize()
8455 .isZero())
8456 continue;
8457
8458 unsigned FieldIndex = RL.getNonVirtualBaseLLVMFieldNo(Base);
8459 RecordLayout[FieldIndex] = Base;
8460 }
8461 // Fill in virtual bases.
8462 for (const auto &I : RD->vbases()) {
8463 const auto *Base = I.getType()->getAsCXXRecordDecl();
8464 // Ignore empty bases.
8465 if (Base->isEmpty())
8466 continue;
8467 unsigned FieldIndex = RL.getVirtualBaseIndex(Base);
8468 if (RecordLayout[FieldIndex])
8469 continue;
8470 RecordLayout[FieldIndex] = Base;
8471 }
8472 // Fill in all the fields.
8473 assert(!RD->isUnion() && "Unexpected union.")((void)0);
8474 for (const auto *Field : RD->fields()) {
8475 // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
8476 // will fill in later.)
8477 if (!Field->isBitField() && !Field->isZeroSize(CGF.getContext())) {
8478 unsigned FieldIndex = RL.getLLVMFieldNo(Field);
8479 RecordLayout[FieldIndex] = Field;
8480 }
8481 }
8482 for (const llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>
8483 &Data : RecordLayout) {
8484 if (Data.isNull())
8485 continue;
8486 if (const auto *Base = Data.dyn_cast<const CXXRecordDecl *>())
8487 getPlainLayout(Base, Layout, /*AsBase=*/true);
8488 else
8489 Layout.push_back(Data.get<const FieldDecl *>());
8490 }
8491 }
8492
8493 /// Generate all the base pointers, section pointers, sizes, map types, and
8494 /// mappers for the extracted mappable expressions (all included in \a
8495 /// CombinedInfo). Also, for each item that relates with a device pointer, a
8496 /// pair of the relevant declaration and index where it occurs is appended to
8497 /// the device pointers info array.
8498 void generateAllInfoForClauses(
8499 ArrayRef<const OMPClause *> Clauses, MapCombinedInfoTy &CombinedInfo,
8500 const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
8501 llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
8502 // We have to process the component lists that relate with the same
8503 // declaration in a single chunk so that we can generate the map flags
8504 // correctly. Therefore, we organize all lists in a map.
8505 enum MapKind { Present, Allocs, Other, Total };
8506 llvm::MapVector<CanonicalDeclPtr<const Decl>,
8507 SmallVector<SmallVector<MapInfo, 8>, 4>>
8508 Info;
8509
8510 // Helper function to fill the information map for the different supported
8511 // clauses.
8512 auto &&InfoGen =
8513 [&Info, &SkipVarSet](
8514 const ValueDecl *D, MapKind Kind,
8515 OMPClauseMappableExprCommon::MappableExprComponentListRef L,
8516 OpenMPMapClauseKind MapType,
8517 ArrayRef<OpenMPMapModifierKind> MapModifiers,
8518 ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
8519 bool ReturnDevicePointer, bool IsImplicit, const ValueDecl *Mapper,
8520 const Expr *VarRef = nullptr, bool ForDeviceAddr = false) {
8521 if (SkipVarSet.contains(D))
8522 return;
8523 auto It = Info.find(D);
8524 if (It == Info.end())
8525 It = Info
8526 .insert(std::make_pair(
8527 D, SmallVector<SmallVector<MapInfo, 8>, 4>(Total)))
8528 .first;
8529 It->second[Kind].emplace_back(
8530 L, MapType, MapModifiers, MotionModifiers, ReturnDevicePointer,
8531 IsImplicit, Mapper, VarRef, ForDeviceAddr);
8532 };
8533
8534 for (const auto *Cl : Clauses) {
8535 const auto *C = dyn_cast<OMPMapClause>(Cl);
8536 if (!C)
8537 continue;
8538 MapKind Kind = Other;
8539 if (!C->getMapTypeModifiers().empty() &&
8540 llvm::any_of(C->getMapTypeModifiers(), [](OpenMPMapModifierKind K) {
8541 return K == OMPC_MAP_MODIFIER_present;
8542 }))
8543 Kind = Present;
8544 else if (C->getMapType() == OMPC_MAP_alloc)
8545 Kind = Allocs;
8546 const auto *EI = C->getVarRefs().begin();
8547 for (const auto L : C->component_lists()) {
8548 const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
8549 InfoGen(std::get<0>(L), Kind, std::get<1>(L), C->getMapType(),
8550 C->getMapTypeModifiers(), llvm::None,
8551 /*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L),
8552 E);
8553 ++EI;
8554 }
8555 }
8556 for (const auto *Cl : Clauses) {
8557 const auto *C = dyn_cast<OMPToClause>(Cl);
8558 if (!C)
8559 continue;
8560 MapKind Kind = Other;
8561 if (!C->getMotionModifiers().empty() &&
8562 llvm::any_of(C->getMotionModifiers(), [](OpenMPMotionModifierKind K) {
8563 return K == OMPC_MOTION_MODIFIER_present;
8564 }))
8565 Kind = Present;
8566 const auto *EI = C->getVarRefs().begin();
8567 for (const auto L : C->component_lists()) {
8568 InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_to, llvm::None,
8569 C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
8570 C->isImplicit(), std::get<2>(L), *EI);
8571 ++EI;
8572 }
8573 }
8574 for (const auto *Cl : Clauses) {
8575 const auto *C = dyn_cast<OMPFromClause>(Cl);
8576 if (!C)
8577 continue;
8578 MapKind Kind = Other;
8579 if (!C->getMotionModifiers().empty() &&
8580 llvm::any_of(C->getMotionModifiers(), [](OpenMPMotionModifierKind K) {
8581 return K == OMPC_MOTION_MODIFIER_present;
8582 }))
8583 Kind = Present;
8584 const auto *EI = C->getVarRefs().begin();
8585 for (const auto L : C->component_lists()) {
8586 InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_from, llvm::None,
8587 C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
8588 C->isImplicit(), std::get<2>(L), *EI);
8589 ++EI;
8590 }
8591 }
8592
8593 // Look at the use_device_ptr clause information and mark the existing map
8594 // entries as such. If there is no map information for an entry in the
8595 // use_device_ptr list, we create one with map type 'alloc' and zero size
8596 // section. It is the user fault if that was not mapped before. If there is
8597 // no map information and the pointer is a struct member, then we defer the
8598 // emission of that entry until the whole struct has been processed.
8599 llvm::MapVector<CanonicalDeclPtr<const Decl>,
8600 SmallVector<DeferredDevicePtrEntryTy, 4>>
8601 DeferredInfo;
8602 MapCombinedInfoTy UseDevicePtrCombinedInfo;
8603
8604 for (const auto *Cl : Clauses) {
8605 const auto *C = dyn_cast<OMPUseDevicePtrClause>(Cl);
8606 if (!C)
8607 continue;
8608 for (const auto L : C->component_lists()) {
8609 OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
8610 std::get<1>(L);
8611 assert(!Components.empty() &&((void)0)
8612 "Not expecting empty list of components!")((void)0);
8613 const ValueDecl *VD = Components.back().getAssociatedDeclaration();
8614 VD = cast<ValueDecl>(VD->getCanonicalDecl());
8615 const Expr *IE = Components.back().getAssociatedExpression();
8616 // If the first component is a member expression, we have to look into
8617 // 'this', which maps to null in the map of map information. Otherwise
8618 // look directly for the information.
8619 auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
8620
8621 // We potentially have map information for this declaration already.
8622 // Look for the first set of components that refer to it.
8623 if (It != Info.end()) {
8624 bool Found = false;
8625 for (auto &Data : It->second) {
8626 auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
8627 return MI.Components.back().getAssociatedDeclaration() == VD;
8628 });
8629 // If we found a map entry, signal that the pointer has to be
8630 // returned and move on to the next declaration. Exclude cases where
8631 // the base pointer is mapped as array subscript, array section or
8632 // array shaping. The base address is passed as a pointer to base in
8633 // this case and cannot be used as a base for use_device_ptr list
8634 // item.
8635 if (CI != Data.end()) {
8636 auto PrevCI = std::next(CI->Components.rbegin());
8637 const auto *VarD = dyn_cast<VarDecl>(VD);
8638 if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
8639 isa<MemberExpr>(IE) ||
8640 !VD->getType().getNonReferenceType()->isPointerType() ||
8641 PrevCI == CI->Components.rend() ||
8642 isa<MemberExpr>(PrevCI->getAssociatedExpression()) || !VarD ||
8643 VarD->hasLocalStorage()) {
8644 CI->ReturnDevicePointer = true;
8645 Found = true;
8646 break;
8647 }
8648 }
8649 }
8650 if (Found)
8651 continue;
8652 }
8653
8654 // We didn't find any match in our map information - generate a zero
8655 // size array section - if the pointer is a struct member we defer this
8656 // action until the whole struct has been processed.
8657 if (isa<MemberExpr>(IE)) {
8658 // Insert the pointer into Info to be processed by
8659 // generateInfoForComponentList. Because it is a member pointer
8660 // without a pointee, no entry will be generated for it, therefore
8661 // we need to generate one after the whole struct has been processed.
8662 // Nonetheless, generateInfoForComponentList must be called to take
8663 // the pointer into account for the calculation of the range of the
8664 // partial struct.
8665 InfoGen(nullptr, Other, Components, OMPC_MAP_unknown, llvm::None,
8666 llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(),
8667 nullptr);
8668 DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/false);
8669 } else {
8670 llvm::Value *Ptr =
8671 CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
8672 UseDevicePtrCombinedInfo.Exprs.push_back(VD);
8673 UseDevicePtrCombinedInfo.BasePointers.emplace_back(Ptr, VD);
8674 UseDevicePtrCombinedInfo.Pointers.push_back(Ptr);
8675 UseDevicePtrCombinedInfo.Sizes.push_back(
8676 llvm::Constant::getNullValue(CGF.Int64Ty));
8677 UseDevicePtrCombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM);
8678 UseDevicePtrCombinedInfo.Mappers.push_back(nullptr);
8679 }
8680 }
8681 }
8682
8683 // Look at the use_device_addr clause information and mark the existing map
8684 // entries as such. If there is no map information for an entry in the
8685 // use_device_addr list, we create one with map type 'alloc' and zero size
8686 // section. It is the user fault if that was not mapped before. If there is
8687 // no map information and the pointer is a struct member, then we defer the
8688 // emission of that entry until the whole struct has been processed.
8689 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
8690 for (const auto *Cl : Clauses) {
8691 const auto *C = dyn_cast<OMPUseDeviceAddrClause>(Cl);
8692 if (!C)
8693 continue;
8694 for (const auto L : C->component_lists()) {
8695 assert(!std::get<1>(L).empty() &&((void)0)
8696 "Not expecting empty list of components!")((void)0);
8697 const ValueDecl *VD = std::get<1>(L).back().getAssociatedDeclaration();
8698 if (!Processed.insert(VD).second)
8699 continue;
8700 VD = cast<ValueDecl>(VD->getCanonicalDecl());
8701 const Expr *IE = std::get<1>(L).back().getAssociatedExpression();
8702 // If the first component is a member expression, we have to look into
8703 // 'this', which maps to null in the map of map information. Otherwise
8704 // look directly for the information.
8705 auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
8706
8707 // We potentially have map information for this declaration already.
8708 // Look for the first set of components that refer to it.
8709 if (It != Info.end()) {
8710 bool Found = false;
8711 for (auto &Data : It->second) {
8712 auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
8713 return MI.Components.back().getAssociatedDeclaration() == VD;
8714 });
8715 // If we found a map entry, signal that the pointer has to be
8716 // returned and move on to the next declaration.
8717 if (CI != Data.end()) {
8718 CI->ReturnDevicePointer = true;
8719 Found = true;
8720 break;
8721 }
8722 }
8723 if (Found)
8724 continue;
8725 }
8726
8727 // We didn't find any match in our map information - generate a zero
8728 // size array section - if the pointer is a struct member we defer this
8729 // action until the whole struct has been processed.
8730 if (isa<MemberExpr>(IE)) {
8731 // Insert the pointer into Info to be processed by
8732 // generateInfoForComponentList. Because it is a member pointer
8733 // without a pointee, no entry will be generated for it, therefore
8734 // we need to generate one after the whole struct has been processed.
8735 // Nonetheless, generateInfoForComponentList must be called to take
8736 // the pointer into account for the calculation of the range of the
8737 // partial struct.
8738 InfoGen(nullptr, Other, std::get<1>(L), OMPC_MAP_unknown, llvm::None,
8739 llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(),
8740 nullptr, nullptr, /*ForDeviceAddr=*/true);
8741 DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/true);
8742 } else {
8743 llvm::Value *Ptr;
8744 if (IE->isGLValue())
8745 Ptr = CGF.EmitLValue(IE).getPointer(CGF);
8746 else
8747 Ptr = CGF.EmitScalarExpr(IE);
8748 CombinedInfo.Exprs.push_back(VD);
8749 CombinedInfo.BasePointers.emplace_back(Ptr, VD);
8750 CombinedInfo.Pointers.push_back(Ptr);
8751 CombinedInfo.Sizes.push_back(
8752 llvm::Constant::getNullValue(CGF.Int64Ty));
8753 CombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM);
8754 CombinedInfo.Mappers.push_back(nullptr);
8755 }
8756 }
8757 }
8758
8759 for (const auto &Data : Info) {
8760 StructRangeInfoTy PartialStruct;
8761 // Temporary generated information.
8762 MapCombinedInfoTy CurInfo;
8763 const Decl *D = Data.first;
8764 const ValueDecl *VD = cast_or_null<ValueDecl>(D);
8765 for (const auto &M : Data.second) {
8766 for (const MapInfo &L : M) {
8767 assert(!L.Components.empty() &&((void)0)
8768 "Not expecting declaration with no component lists.")((void)0);
8769
8770 // Remember the current base pointer index.
8771 unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size();
8772 CurInfo.NonContigInfo.IsNonContiguous =
8773 L.Components.back().isNonContiguous();
8774 generateInfoForComponentList(
8775 L.MapType, L.MapModifiers, L.MotionModifiers, L.Components,
8776 CurInfo, PartialStruct, /*IsFirstComponentList=*/false,
8777 L.IsImplicit, L.Mapper, L.ForDeviceAddr, VD, L.VarRef);
8778
8779 // If this entry relates with a device pointer, set the relevant
8780 // declaration and add the 'return pointer' flag.
8781 if (L.ReturnDevicePointer) {
8782 assert(CurInfo.BasePointers.size() > CurrentBasePointersIdx &&((void)0)
8783 "Unexpected number of mapped base pointers.")((void)0);
8784
8785 const ValueDecl *RelevantVD =
8786 L.Components.back().getAssociatedDeclaration();
8787 assert(RelevantVD &&((void)0)
8788 "No relevant declaration related with device pointer??")((void)0);
8789
8790 CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(
8791 RelevantVD);
8792 CurInfo.Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
8793 }
8794 }
8795 }
8796
8797 // Append any pending zero-length pointers which are struct members and
8798 // used with use_device_ptr or use_device_addr.
8799 auto CI = DeferredInfo.find(Data.first);
8800 if (CI != DeferredInfo.end()) {
8801 for (const DeferredDevicePtrEntryTy &L : CI->second) {
8802 llvm::Value *BasePtr;
8803 llvm::Value *Ptr;
8804 if (L.ForDeviceAddr) {
8805 if (L.IE->isGLValue())
8806 Ptr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
8807 else
8808 Ptr = this->CGF.EmitScalarExpr(L.IE);
8809 BasePtr = Ptr;
8810 // Entry is RETURN_PARAM. Also, set the placeholder value
8811 // MEMBER_OF=FFFF so that the entry is later updated with the
8812 // correct value of MEMBER_OF.
8813 CurInfo.Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF);
8814 } else {
8815 BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
8816 Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE),
8817 L.IE->getExprLoc());
8818 // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the
8819 // placeholder value MEMBER_OF=FFFF so that the entry is later
8820 // updated with the correct value of MEMBER_OF.
8821 CurInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
8822 OMP_MAP_MEMBER_OF);
8823 }
8824 CurInfo.Exprs.push_back(L.VD);
8825 CurInfo.BasePointers.emplace_back(BasePtr, L.VD);
8826 CurInfo.Pointers.push_back(Ptr);
8827 CurInfo.Sizes.push_back(
8828 llvm::Constant::getNullValue(this->CGF.Int64Ty));
8829 CurInfo.Mappers.push_back(nullptr);
8830 }
8831 }
8832 // If there is an entry in PartialStruct it means we have a struct with
8833 // individual members mapped. Emit an extra combined entry.
8834 if (PartialStruct.Base.isValid()) {
8835 CurInfo.NonContigInfo.Dims.push_back(0);
8836 emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD);
8837 }
8838
8839 // We need to append the results of this capture to what we already
8840 // have.
8841 CombinedInfo.append(CurInfo);
8842 }
8843 // Append data for use_device_ptr clauses.
8844 CombinedInfo.append(UseDevicePtrCombinedInfo);
8845 }
8846
8847public:
8848 MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
8849 : CurDir(&Dir), CGF(CGF) {
8850 // Extract firstprivate clause information.
8851 for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
8852 for (const auto *D : C->varlists())
8853 FirstPrivateDecls.try_emplace(
8854 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl()), C->isImplicit());
8855 // Extract implicit firstprivates from uses_allocators clauses.
8856 for (const auto *C : Dir.getClausesOfKind<OMPUsesAllocatorsClause>()) {
8857 for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
8858 OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
8859 if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(D.AllocatorTraits))
8860 FirstPrivateDecls.try_emplace(cast<VarDecl>(DRE->getDecl()),
8861 /*Implicit=*/true);
8862 else if (const auto *VD = dyn_cast<VarDecl>(
8863 cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts())
8864 ->getDecl()))
8865 FirstPrivateDecls.try_emplace(VD, /*Implicit=*/true);
8866 }
8867 }
8868 // Extract device pointer clause information.
8869 for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
8870 for (auto L : C->component_lists())
8871 DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
8872 }
8873
8874 /// Constructor for the declare mapper directive.
8875 MappableExprsHandler(const OMPDeclareMapperDecl &Dir, CodeGenFunction &CGF)
8876 : CurDir(&Dir), CGF(CGF) {}
8877
8878 /// Generate code for the combined entry if we have a partially mapped struct
8879 /// and take care of the mapping flags of the arguments corresponding to
8880 /// individual struct members.
8881 void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
8882 MapFlagsArrayTy &CurTypes,
8883 const StructRangeInfoTy &PartialStruct,
8884 const ValueDecl *VD = nullptr,
8885 bool NotTargetParams = true) const {
8886 if (CurTypes.size() == 1 &&
8887 ((CurTypes.back() & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF) &&
8888 !PartialStruct.IsArraySection)
8889 return;
8890 Address LBAddr = PartialStruct.LowestElem.second;
8891 Address HBAddr = PartialStruct.HighestElem.second;
8892 if (PartialStruct.HasCompleteRecord) {
8893 LBAddr = PartialStruct.LB;
8894 HBAddr = PartialStruct.LB;
8895 }
8896 CombinedInfo.Exprs.push_back(VD);
8897 // Base is the base of the struct
8898 CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
8899 // Pointer is the address of the lowest element
8900 llvm::Value *LB = LBAddr.getPointer();
8901 CombinedInfo.Pointers.push_back(LB);
8902 // There should not be a mapper for a combined entry.
8903 CombinedInfo.Mappers.push_back(nullptr);
8904 // Size is (addr of {highest+1} element) - (addr of lowest element)
8905 llvm::Value *HB = HBAddr.getPointer();
8906 llvm::Value *HAddr =
8907 CGF.Builder.CreateConstGEP1_32(HBAddr.getElementType(), HB, /*Idx0=*/1);
8908 llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
8909 llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
8910 llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
8911 llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
8912 /*isSigned=*/false);
8913 CombinedInfo.Sizes.push_back(Size);
8914 // Map type is always TARGET_PARAM, if generate info for captures.
8915 CombinedInfo.Types.push_back(NotTargetParams ? OMP_MAP_NONE
8916 : OMP_MAP_TARGET_PARAM);
8917 // If any element has the present modifier, then make sure the runtime
8918 // doesn't attempt to allocate the struct.
8919 if (CurTypes.end() !=
8920 llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
8921 return Type & OMP_MAP_PRESENT;
8922 }))
8923 CombinedInfo.Types.back() |= OMP_MAP_PRESENT;
8924 // Remove TARGET_PARAM flag from the first element
8925 (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
8926
8927 // All other current entries will be MEMBER_OF the combined entry
8928 // (except for PTR_AND_OBJ entries which do not have a placeholder value
8929 // 0xFFFF in the MEMBER_OF field).
8930 OpenMPOffloadMappingFlags MemberOfFlag =
8931 getMemberOfFlag(CombinedInfo.BasePointers.size() - 1);
8932 for (auto &M : CurTypes)
8933 setCorrectMemberOfFlag(M, MemberOfFlag);
8934 }
8935
8936 /// Generate all the base pointers, section pointers, sizes, map types, and
8937 /// mappers for the extracted mappable expressions (all included in \a
8938 /// CombinedInfo). Also, for each item that relates with a device pointer, a
8939 /// pair of the relevant declaration and index where it occurs is appended to
8940 /// the device pointers info array.
8941 void generateAllInfo(
8942 MapCombinedInfoTy &CombinedInfo,
8943 const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
8944 llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
8945 assert(CurDir.is<const OMPExecutableDirective *>() &&((void)0)
8946 "Expect a executable directive")((void)0);
8947 const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
8948 generateAllInfoForClauses(CurExecDir->clauses(), CombinedInfo, SkipVarSet);
8949 }
8950
8951 /// Generate all the base pointers, section pointers, sizes, map types, and
8952 /// mappers for the extracted map clauses of user-defined mapper (all included
8953 /// in \a CombinedInfo).
8954 void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo) const {
8955 assert(CurDir.is<const OMPDeclareMapperDecl *>() &&((void)0)
8956 "Expect a declare mapper directive")((void)0);
8957 const auto *CurMapperDir = CurDir.get<const OMPDeclareMapperDecl *>();
8958 generateAllInfoForClauses(CurMapperDir->clauses(), CombinedInfo);
8959 }
8960
8961 /// Emit capture info for lambdas for variables captured by reference.
8962 void generateInfoForLambdaCaptures(
8963 const ValueDecl *VD, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
8964 llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
8965 const auto *RD = VD->getType()
8966 .getCanonicalType()
8967 .getNonReferenceType()
8968 ->getAsCXXRecordDecl();
8969 if (!RD || !RD->isLambda())
8970 return;
8971 Address VDAddr = Address(Arg, CGF.getContext().getDeclAlign(VD));
8972 LValue VDLVal = CGF.MakeAddrLValue(
8973 VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
8974 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
8975 FieldDecl *ThisCapture = nullptr;
8976 RD->getCaptureFields(Captures, ThisCapture);
8977 if (ThisCapture) {
8978 LValue ThisLVal =
8979 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
8980 LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture);
8981 LambdaPointers.try_emplace(ThisLVal.getPointer(CGF),
8982 VDLVal.getPointer(CGF));
8983 CombinedInfo.Exprs.push_back(VD);
8984 CombinedInfo.BasePointers.push_back(ThisLVal.getPointer(CGF));
8985 CombinedInfo.Pointers.push_back(ThisLValVal.getPointer(CGF));
8986 CombinedInfo.Sizes.push_back(
8987 CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
8988 CGF.Int64Ty, /*isSigned=*/true));
8989 CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
8990 OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
8991 CombinedInfo.Mappers.push_back(nullptr);
8992 }
8993 for (const LambdaCapture &LC : RD->captures()) {
8994 if (!LC.capturesVariable())
8995 continue;
8996 const VarDecl *VD = LC.getCapturedVar();
8997 if (LC.getCaptureKind() != LCK_ByRef && !VD->getType()->isPointerType())
8998 continue;
8999 auto It = Captures.find(VD);
9000 assert(It != Captures.end() && "Found lambda capture without field.")((void)0);
9001 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
9002 if (LC.getCaptureKind() == LCK_ByRef) {
9003 LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
9004 LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
9005 VDLVal.getPointer(CGF));
9006 CombinedInfo.Exprs.push_back(VD);
9007 CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
9008 CombinedInfo.Pointers.push_back(VarLValVal.getPointer(CGF));
9009 CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
9010 CGF.getTypeSize(
9011 VD->getType().getCanonicalType().getNonReferenceType()),
9012 CGF.Int64Ty, /*isSigned=*/true));
9013 } else {
9014 RValue VarRVal = CGF.EmitLoadOfLValue(VarLVal, RD->getLocation());
9015 LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
9016 VDLVal.getPointer(CGF));
9017 CombinedInfo.Exprs.push_back(VD);
9018 CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
9019 CombinedInfo.Pointers.push_back(VarRVal.getScalarVal());
9020 CombinedInfo.Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0));
9021 }
9022 CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
9023 OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
9024 CombinedInfo.Mappers.push_back(nullptr);
9025 }
9026 }
9027
9028 /// Set correct indices for lambdas captures.
9029 void adjustMemberOfForLambdaCaptures(
9030 const llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers,
9031 MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
9032 MapFlagsArrayTy &Types) const {
9033 for (unsigned I = 0, E = Types.size(); I < E; ++I) {
9034 // Set correct member_of idx for all implicit lambda captures.
9035 if (Types[I] != (OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
9036 OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT))
9037 continue;
9038 llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]);
9039 assert(BasePtr && "Unable to find base lambda address.")((void)0);
9040 int TgtIdx = -1;
9041 for (unsigned J = I; J > 0; --J) {
9042 unsigned Idx = J - 1;
9043 if (Pointers[Idx] != BasePtr)
9044 continue;
9045 TgtIdx = Idx;
9046 break;
9047 }
9048 assert(TgtIdx != -1 && "Unable to find parent lambda.")((void)0);
9049 // All other current entries will be MEMBER_OF the combined entry
9050 // (except for PTR_AND_OBJ entries which do not have a placeholder value
9051 // 0xFFFF in the MEMBER_OF field).
9052 OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(TgtIdx);
9053 setCorrectMemberOfFlag(Types[I], MemberOfFlag);
9054 }
9055 }
9056
9057 /// Generate the base pointers, section pointers, sizes, map types, and
9058 /// mappers associated to a given capture (all included in \a CombinedInfo).
9059 void generateInfoForCapture(const CapturedStmt::Capture *Cap,
9060 llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
9061 StructRangeInfoTy &PartialStruct) const {
9062 assert(!Cap->capturesVariableArrayType() &&((void)0)
9063 "Not expecting to generate map info for a variable array type!")((void)0);
9064
9065 // We need to know when we generating information for the first component
9066 const ValueDecl *VD = Cap->capturesThis()
9067 ? nullptr
9068 : Cap->getCapturedVar()->getCanonicalDecl();
9069
9070 // If this declaration appears in a is_device_ptr clause we just have to
9071 // pass the pointer by value. If it is a reference to a declaration, we just
9072 // pass its value.
9073 if (DevPointersMap.count(VD)) {
9074 CombinedInfo.Exprs.push_back(VD);
9075 CombinedInfo.BasePointers.emplace_back(Arg, VD);
9076 CombinedInfo.Pointers.push_back(Arg);
9077 CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
9078 CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty,
9079 /*isSigned=*/true));
9080 CombinedInfo.Types.push_back(
9081 (Cap->capturesVariable() ? OMP_MAP_TO : OMP_MAP_LITERAL) |
9082 OMP_MAP_TARGET_PARAM);
9083 CombinedInfo.Mappers.push_back(nullptr);
9084 return;
9085 }
9086
9087 using MapData =
9088 std::tuple<OMPClauseMappableExprCommon::MappableExprComponentListRef,
9089 OpenMPMapClauseKind, ArrayRef<OpenMPMapModifierKind>, bool,
9090 const ValueDecl *, const Expr *>;
9091 SmallVector<MapData, 4> DeclComponentLists;
9092 assert(CurDir.is<const OMPExecutableDirective *>() &&((void)0)
9093 "Expect a executable directive")((void)0);
9094 const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
9095 for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
9096 const auto *EI = C->getVarRefs().begin();
9097 for (const auto L : C->decl_component_lists(VD)) {
9098 const ValueDecl *VDecl, *Mapper;
9099 // The Expression is not correct if the mapping is implicit
9100 const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
9101 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
9102 std::tie(VDecl, Components, Mapper) = L;
9103 assert(VDecl == VD && "We got information for the wrong declaration??")((void)0);
9104 assert(!Components.empty() &&((void)0)
9105 "Not expecting declaration with no component lists.")((void)0);
9106 DeclComponentLists.emplace_back(Components, C->getMapType(),
9107 C->getMapTypeModifiers(),
9108 C->isImplicit(), Mapper, E);
9109 ++EI;
9110 }
9111 }
9112 llvm::stable_sort(DeclComponentLists, [](const MapData &LHS,
9113 const MapData &RHS) {
9114 ArrayRef<OpenMPMapModifierKind> MapModifiers = std::get<2>(LHS);
9115 OpenMPMapClauseKind MapType = std::get<1>(RHS);
9116 bool HasPresent = !MapModifiers.empty() &&
9117 llvm::any_of(MapModifiers, [](OpenMPMapModifierKind K) {
9118 return K == clang::OMPC_MAP_MODIFIER_present;
9119 });
9120 bool HasAllocs = MapType == OMPC_MAP_alloc;
9121 MapModifiers = std::get<2>(RHS);
9122 MapType = std::get<1>(LHS);
9123 bool HasPresentR =
9124 !MapModifiers.empty() &&
9125 llvm::any_of(MapModifiers, [](OpenMPMapModifierKind K) {
9126 return K == clang::OMPC_MAP_MODIFIER_present;
9127 });
9128 bool HasAllocsR = MapType == OMPC_MAP_alloc;
9129 return (HasPresent && !HasPresentR) || (HasAllocs && !HasAllocsR);
9130 });
9131
9132 // Find overlapping elements (including the offset from the base element).
9133 llvm::SmallDenseMap<
9134 const MapData *,
9135 llvm::SmallVector<
9136 OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>,
9137 4>
9138 OverlappedData;
9139 size_t Count = 0;
9140 for (const MapData &L : DeclComponentLists) {
9141 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
9142 OpenMPMapClauseKind MapType;
9143 ArrayRef<OpenMPMapModifierKind> MapModifiers;
9144 bool IsImplicit;
9145 const ValueDecl *Mapper;
9146 const Expr *VarRef;
9147 std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
9148 L;
9149 ++Count;
9150 for (const MapData &L1 : makeArrayRef(DeclComponentLists).slice(Count)) {
9151 OMPClauseMappableExprCommon::MappableExprComponentListRef Components1;
9152 std::tie(Components1, MapType, MapModifiers, IsImplicit, Mapper,
9153 VarRef) = L1;
9154 auto CI = Components.rbegin();
9155 auto CE = Components.rend();
9156 auto SI = Components1.rbegin();
9157 auto SE = Components1.rend();
9158 for (; CI != CE && SI != SE; ++CI, ++SI) {
9159 if (CI->getAssociatedExpression()->getStmtClass() !=
9160 SI->getAssociatedExpression()->getStmtClass())
9161 break;
9162 // Are we dealing with different variables/fields?
9163 if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration())
9164 break;
9165 }
9166 // Found overlapping if, at least for one component, reached the head
9167 // of the components list.
9168 if (CI == CE || SI == SE) {
9169 // Ignore it if it is the same component.
9170 if (CI == CE && SI == SE)
9171 continue;
9172 const auto It = (SI == SE) ? CI : SI;
9173 // If one component is a pointer and another one is a kind of
9174 // dereference of this pointer (array subscript, section, dereference,
9175 // etc.), it is not an overlapping.
9176 // Same, if one component is a base and another component is a
9177 // dereferenced pointer memberexpr with the same base.
9178 if (!isa<MemberExpr>(It->getAssociatedExpression()) ||
9179 (std::prev(It)->getAssociatedDeclaration() &&
9180 std::prev(It)
9181 ->getAssociatedDeclaration()
9182 ->getType()
9183 ->isPointerType()) ||
9184 (It->getAssociatedDeclaration() &&
9185 It->getAssociatedDeclaration()->getType()->isPointerType() &&
9186 std::next(It) != CE && std::next(It) != SE))
9187 continue;
9188 const MapData &BaseData = CI == CE ? L : L1;
9189 OMPClauseMappableExprCommon::MappableExprComponentListRef SubData =
9190 SI == SE ? Components : Components1;
9191 auto &OverlappedElements = OverlappedData.FindAndConstruct(&BaseData);
9192 OverlappedElements.getSecond().push_back(SubData);
9193 }
9194 }
9195 }
9196 // Sort the overlapped elements for each item.
9197 llvm::SmallVector<const FieldDecl *, 4> Layout;
9198 if (!OverlappedData.empty()) {
9199 const Type *BaseType = VD->getType().getCanonicalType().getTypePtr();
9200 const Type *OrigType = BaseType->getPointeeOrArrayElementType();
9201 while (BaseType != OrigType) {
9202 BaseType = OrigType->getCanonicalTypeInternal().getTypePtr();
9203 OrigType = BaseType->getPointeeOrArrayElementType();
9204 }
9205
9206 if (const auto *CRD = BaseType->getAsCXXRecordDecl())
9207 getPlainLayout(CRD, Layout, /*AsBase=*/false);
9208 else {
9209 const auto *RD = BaseType->getAsRecordDecl();
9210 Layout.append(RD->field_begin(), RD->field_end());
9211 }
9212 }
9213 for (auto &Pair : OverlappedData) {
9214 llvm::stable_sort(
9215 Pair.getSecond(),
9216 [&Layout](
9217 OMPClauseMappableExprCommon::MappableExprComponentListRef First,
9218 OMPClauseMappableExprCommon::MappableExprComponentListRef
9219 Second) {
9220 auto CI = First.rbegin();
9221 auto CE = First.rend();
9222 auto SI = Second.rbegin();
9223 auto SE = Second.rend();
9224 for (; CI != CE && SI != SE; ++CI, ++SI) {
9225 if (CI->getAssociatedExpression()->getStmtClass() !=
9226 SI->getAssociatedExpression()->getStmtClass())
9227 break;
9228 // Are we dealing with different variables/fields?
9229 if (CI->getAssociatedDeclaration() !=
9230 SI->getAssociatedDeclaration())
9231 break;
9232 }
9233
9234 // Lists contain the same elements.
9235 if (CI == CE && SI == SE)
9236 return false;
9237
9238 // List with less elements is less than list with more elements.
9239 if (CI == CE || SI == SE)
9240 return CI == CE;
9241
9242 const auto *FD1 = cast<FieldDecl>(CI->getAssociatedDeclaration());
9243 const auto *FD2 = cast<FieldDecl>(SI->getAssociatedDeclaration());
9244 if (FD1->getParent() == FD2->getParent())
9245 return FD1->getFieldIndex() < FD2->getFieldIndex();
9246 const auto *It =
9247 llvm::find_if(Layout, [FD1, FD2](const FieldDecl *FD) {
9248 return FD == FD1 || FD == FD2;
9249 });
9250 return *It == FD1;
9251 });
9252 }
9253
9254 // Associated with a capture, because the mapping flags depend on it.
9255 // Go through all of the elements with the overlapped elements.
9256 bool IsFirstComponentList = true;
9257 for (const auto &Pair : OverlappedData) {
9258 const MapData &L = *Pair.getFirst();
9259 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
9260 OpenMPMapClauseKind MapType;
9261 ArrayRef<OpenMPMapModifierKind> MapModifiers;
9262 bool IsImplicit;
9263 const ValueDecl *Mapper;
9264 const Expr *VarRef;
9265 std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
9266 L;
9267 ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
9268 OverlappedComponents = Pair.getSecond();
9269 generateInfoForComponentList(
9270 MapType, MapModifiers, llvm::None, Components, CombinedInfo,
9271 PartialStruct, IsFirstComponentList, IsImplicit, Mapper,
9272 /*ForDeviceAddr=*/false, VD, VarRef, OverlappedComponents);
9273 IsFirstComponentList = false;
9274 }
9275 // Go through other elements without overlapped elements.
9276 for (const MapData &L : DeclComponentLists) {
9277 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
9278 OpenMPMapClauseKind MapType;
9279 ArrayRef<OpenMPMapModifierKind> MapModifiers;
9280 bool IsImplicit;
9281 const ValueDecl *Mapper;
9282 const Expr *VarRef;
9283 std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
9284 L;
9285 auto It = OverlappedData.find(&L);
9286 if (It == OverlappedData.end())
9287 generateInfoForComponentList(MapType, MapModifiers, llvm::None,
9288 Components, CombinedInfo, PartialStruct,
9289 IsFirstComponentList, IsImplicit, Mapper,
9290 /*ForDeviceAddr=*/false, VD, VarRef);
9291 IsFirstComponentList = false;
9292 }
9293 }
9294
9295 /// Generate the default map information for a given capture \a CI,
9296 /// record field declaration \a RI and captured value \a CV.
9297 void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
9298 const FieldDecl &RI, llvm::Value *CV,
9299 MapCombinedInfoTy &CombinedInfo) const {
9300 bool IsImplicit = true;
9301 // Do the default mapping.
9302 if (CI.capturesThis()) {
9303 CombinedInfo.Exprs.push_back(nullptr);
9304 CombinedInfo.BasePointers.push_back(CV);
9305 CombinedInfo.Pointers.push_back(CV);
9306 const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
9307 CombinedInfo.Sizes.push_back(
9308 CGF.Builder.CreateIntCast(CGF.getTypeSize(PtrTy->getPointeeType()),
9309 CGF.Int64Ty, /*isSigned=*/true));
9310 // Default map type.
9311 CombinedInfo.Types.push_back(OMP_MAP_TO | OMP_MAP_FROM);
9312 } else if (CI.capturesVariableByCopy()) {
9313 const VarDecl *VD = CI.getCapturedVar();
9314 CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
9315 CombinedInfo.BasePointers.push_back(CV);
9316 CombinedInfo.Pointers.push_back(CV);
9317 if (!RI.getType()->isAnyPointerType()) {
9318 // We have to signal to the runtime captures passed by value that are
9319 // not pointers.
9320 CombinedInfo.Types.push_back(OMP_MAP_LITERAL);
9321 CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
9322 CGF.getTypeSize(RI.getType()), CGF.Int64Ty, /*isSigned=*/true));
9323 } else {
9324 // Pointers are implicitly mapped with a zero size and no flags
9325 // (other than first map that is added for all implicit maps).
9326 CombinedInfo.Types.push_back(OMP_MAP_NONE);
9327 CombinedInfo.Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
9328 }
9329 auto I = FirstPrivateDecls.find(VD);
9330 if (I != FirstPrivateDecls.end())
9331 IsImplicit = I->getSecond();
9332 } else {
9333 assert(CI.capturesVariable() && "Expected captured reference.")((void)0);
9334 const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
9335 QualType ElementType = PtrTy->getPointeeType();
9336 CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
9337 CGF.getTypeSize(ElementType), CGF.Int64Ty, /*isSigned=*/true));
9338 // The default map type for a scalar/complex type is 'to' because by
9339 // default the value doesn't have to be retrieved. For an aggregate
9340 // type, the default is 'tofrom'.
9341 CombinedInfo.Types.push_back(getMapModifiersForPrivateClauses(CI));
9342 const VarDecl *VD = CI.getCapturedVar();
9343 auto I = FirstPrivateDecls.find(VD);
9344 CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
9345 CombinedInfo.BasePointers.push_back(CV);
9346 if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
9347 Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
9348 CV, ElementType, CGF.getContext().getDeclAlign(VD),
9349 AlignmentSource::Decl));
9350 CombinedInfo.Pointers.push_back(PtrAddr.getPointer());
9351 } else {
9352 CombinedInfo.Pointers.push_back(CV);
9353 }
9354 if (I != FirstPrivateDecls.end())
9355 IsImplicit = I->getSecond();
9356 }
9357 // Every default map produces a single argument which is a target parameter.
9358 CombinedInfo.Types.back() |= OMP_MAP_TARGET_PARAM;
9359
9360 // Add flag stating this is an implicit map.
9361 if (IsImplicit)
9362 CombinedInfo.Types.back() |= OMP_MAP_IMPLICIT;
9363
9364 // No user-defined mapper for default mapping.
9365 CombinedInfo.Mappers.push_back(nullptr);
9366 }
9367};
9368} // anonymous namespace
9369
9370static void emitNonContiguousDescriptor(
9371 CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
9372 CGOpenMPRuntime::TargetDataInfo &Info) {
9373 CodeGenModule &CGM = CGF.CGM;
9374 MappableExprsHandler::MapCombinedInfoTy::StructNonContiguousInfo
9375 &NonContigInfo = CombinedInfo.NonContigInfo;
9376
9377 // Build an array of struct descriptor_dim and then assign it to
9378 // offload_args.
9379 //
9380 // struct descriptor_dim {
9381 // uint64_t offset;
9382 // uint64_t count;
9383 // uint64_t stride
9384 // };
9385 ASTContext &C = CGF.getContext();
9386 QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
9387 RecordDecl *RD;
9388 RD = C.buildImplicitRecord("descriptor_dim");
9389 RD->startDefinition();
9390 addFieldToRecordDecl(C, RD, Int64Ty);
9391 addFieldToRecordDecl(C, RD, Int64Ty);
9392 addFieldToRecordDecl(C, RD, Int64Ty);
9393 RD->completeDefinition();
9394 QualType DimTy = C.getRecordType(RD);
9395
9396 enum { OffsetFD = 0, CountFD, StrideFD };
9397 // We need two index variable here since the size of "Dims" is the same as the
9398 // size of Components, however, the size of offset, count, and stride is equal
9399 // to the size of base declaration that is non-contiguous.
9400 for (unsigned I = 0, L = 0, E = NonContigInfo.Dims.size(); I < E; ++I) {
9401 // Skip emitting ir if dimension size is 1 since it cannot be
9402 // non-contiguous.
9403 if (NonContigInfo.Dims[I] == 1)
9404 continue;
9405 llvm::APInt Size(/*numBits=*/32, NonContigInfo.Dims[I]);
9406 QualType ArrayTy =
9407 C.getConstantArrayType(DimTy, Size, nullptr, ArrayType::Normal, 0);
9408 Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
9409 for (unsigned II = 0, EE = NonContigInfo.Dims[I]; II < EE; ++II) {
9410 unsigned RevIdx = EE - II - 1;
9411 LValue DimsLVal = CGF.MakeAddrLValue(
9412 CGF.Builder.CreateConstArrayGEP(DimsAddr, II), DimTy);
9413 // Offset
9414 LValue OffsetLVal = CGF.EmitLValueForField(
9415 DimsLVal, *std::next(RD->field_begin(), OffsetFD));
9416 CGF.EmitStoreOfScalar(NonContigInfo.Offsets[L][RevIdx], OffsetLVal);
9417 // Count
9418 LValue CountLVal = CGF.EmitLValueForField(
9419 DimsLVal, *std::next(RD->field_begin(), CountFD));
9420 CGF.EmitStoreOfScalar(NonContigInfo.Counts[L][RevIdx], CountLVal);
9421 // Stride
9422 LValue StrideLVal = CGF.EmitLValueForField(
9423 DimsLVal, *std::next(RD->field_begin(), StrideFD));
9424 CGF.EmitStoreOfScalar(NonContigInfo.Strides[L][RevIdx], StrideLVal);
9425 }
9426 // args[I] = &dims
9427 Address DAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
9428 DimsAddr, CGM.Int8PtrTy);
9429 llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
9430 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
9431 Info.PointersArray, 0, I);
9432 Address PAddr(P, CGF.getPointerAlign());
9433 CGF.Builder.CreateStore(DAddr.getPointer(), PAddr);
9434 ++L;
9435 }
9436}
9437
9438/// Emit a string constant containing the names of the values mapped to the
9439/// offloading runtime library.
9440llvm::Constant *
9441emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
9442 MappableExprsHandler::MappingExprInfo &MapExprs) {
9443 llvm::Constant *SrcLocStr;
9444 if (!MapExprs.getMapDecl()) {
9445 SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr();
9446 } else {
9447 std::string ExprName = "";
9448 if (MapExprs.getMapExpr()) {
9449 PrintingPolicy P(CGF.getContext().getLangOpts());
9450 llvm::raw_string_ostream OS(ExprName);
9451 MapExprs.getMapExpr()->printPretty(OS, nullptr, P);
9452 OS.flush();
9453 } else {
9454 ExprName = MapExprs.getMapDecl()->getNameAsString();
9455 }
9456
9457 SourceLocation Loc = MapExprs.getMapDecl()->getLocation();
9458 PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
9459 const char *FileName = PLoc.getFilename();
9460 unsigned Line = PLoc.getLine();
9461 unsigned Column = PLoc.getColumn();
9462 SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FileName, ExprName.c_str(),
9463 Line, Column);
9464 }
9465 return SrcLocStr;
9466}
9467
9468/// Emit the arrays used to pass the captures and map information to the
9469/// offloading runtime library. If there is no map or capture information,
9470/// return nullptr by reference.
9471static void emitOffloadingArrays(
9472 CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
9473 CGOpenMPRuntime::TargetDataInfo &Info, llvm::OpenMPIRBuilder &OMPBuilder,
9474 bool IsNonContiguous = false) {
9475 CodeGenModule &CGM = CGF.CGM;
9476 ASTContext &Ctx = CGF.getContext();
9477
9478 // Reset the array information.
9479 Info.clearArrayInfo();
9480 Info.NumberOfPtrs = CombinedInfo.BasePointers.size();
9481
9482 if (Info.NumberOfPtrs) {
9483 // Detect if we have any capture size requiring runtime evaluation of the
9484 // size so that a constant array could be eventually used.
9485 bool hasRuntimeEvaluationCaptureSize = false;
9486 for (llvm::Value *S : CombinedInfo.Sizes)
9487 if (!isa<llvm::Constant>(S)) {
9488 hasRuntimeEvaluationCaptureSize = true;
9489 break;
9490 }
9491
9492 llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
9493 QualType PointerArrayType = Ctx.getConstantArrayType(
9494 Ctx.VoidPtrTy, PointerNumAP, nullptr, ArrayType::Normal,
9495 /*IndexTypeQuals=*/0);
9496
9497 Info.BasePointersArray =
9498 CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
9499 Info.PointersArray =
9500 CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
9501 Address MappersArray =
9502 CGF.CreateMemTemp(PointerArrayType, ".offload_mappers");
9503 Info.MappersArray = MappersArray.getPointer();
9504
9505 // If we don't have any VLA types or other types that require runtime
9506 // evaluation, we can use a constant array for the map sizes, otherwise we
9507 // need to fill up the arrays as we do for the pointers.
9508 QualType Int64Ty =
9509 Ctx.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
9510 if (hasRuntimeEvaluationCaptureSize) {
9511 QualType SizeArrayType = Ctx.getConstantArrayType(
9512 Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
9513 /*IndexTypeQuals=*/0);
9514 Info.SizesArray =
9515 CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
9516 } else {
9517 // We expect all the sizes to be constant, so we collect them to create
9518 // a constant array.
9519 SmallVector<llvm::Constant *, 16> ConstSizes;
9520 for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) {
9521 if (IsNonContiguous &&
9522 (CombinedInfo.Types[I] & MappableExprsHandler::OMP_MAP_NON_CONTIG)) {
9523 ConstSizes.push_back(llvm::ConstantInt::get(
9524 CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I]));
9525 } else {
9526 ConstSizes.push_back(cast<llvm::Constant>(CombinedInfo.Sizes[I]));
9527 }
9528 }
9529
9530 auto *SizesArrayInit = llvm::ConstantArray::get(
9531 llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes);
9532 std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
9533 auto *SizesArrayGbl = new llvm::GlobalVariable(
9534 CGM.getModule(), SizesArrayInit->getType(),
9535 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
9536 SizesArrayInit, Name);
9537 SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
9538 Info.SizesArray = SizesArrayGbl;
9539 }
9540
9541 // The map types are always constant so we don't need to generate code to
9542 // fill arrays. Instead, we create an array constant.
9543 SmallVector<uint64_t, 4> Mapping(CombinedInfo.Types.size(), 0);
9544 llvm::copy(CombinedInfo.Types, Mapping.begin());
9545 std::string MaptypesName =
9546 CGM.getOpenMPRuntime().getName({"offload_maptypes"});
9547 auto *MapTypesArrayGbl =
9548 OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
9549 Info.MapTypesArray = MapTypesArrayGbl;
9550
9551 // The information types are only built if there is debug information
9552 // requested.
9553 if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) {
9554 Info.MapNamesArray = llvm::Constant::getNullValue(
9555 llvm::Type::getInt8Ty(CGF.Builder.getContext())->getPointerTo());
9556 } else {
9557 auto fillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
9558 return emitMappingInformation(CGF, OMPBuilder, MapExpr);
9559 };
9560 SmallVector<llvm::Constant *, 4> InfoMap(CombinedInfo.Exprs.size());
9561 llvm::transform(CombinedInfo.Exprs, InfoMap.begin(), fillInfoMap);
9562 std::string MapnamesName =
9563 CGM.getOpenMPRuntime().getName({"offload_mapnames"});
9564 auto *MapNamesArrayGbl =
9565 OMPBuilder.createOffloadMapnames(InfoMap, MapnamesName);
9566 Info.MapNamesArray = MapNamesArrayGbl;
9567 }
9568
9569 // If there's a present map type modifier, it must not be applied to the end
9570 // of a region, so generate a separate map type array in that case.
9571 if (Info.separateBeginEndCalls()) {
9572 bool EndMapTypesDiffer = false;
9573 for (uint64_t &Type : Mapping) {
9574 if (Type & MappableExprsHandler::OMP_MAP_PRESENT) {
9575 Type &= ~MappableExprsHandler::OMP_MAP_PRESENT;
9576 EndMapTypesDiffer = true;
9577 }
9578 }
9579 if (EndMapTypesDiffer) {
9580 MapTypesArrayGbl =
9581 OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
9582 Info.MapTypesArrayEnd = MapTypesArrayGbl;
9583 }
9584 }
9585
9586 for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
9587 llvm::Value *BPVal = *CombinedInfo.BasePointers[I];
9588 llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
9589 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
9590 Info.BasePointersArray, 0, I);
9591 BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
9592 BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
9593 Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
9594 CGF.Builder.CreateStore(BPVal, BPAddr);
9595
9596 if (Info.requiresDevicePointerInfo())
9597 if (const ValueDecl *DevVD =
9598 CombinedInfo.BasePointers[I].getDevicePtrDecl())
9599 Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
9600
9601 llvm::Value *PVal = CombinedInfo.Pointers[I];
9602 llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
9603 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
9604 Info.PointersArray, 0, I);
9605 P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
9606 P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
9607 Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
9608 CGF.Builder.CreateStore(PVal, PAddr);
9609
9610 if (hasRuntimeEvaluationCaptureSize) {
9611 llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
9612 llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
9613 Info.SizesArray,
9614 /*Idx0=*/0,
9615 /*Idx1=*/I);
9616 Address SAddr(S, Ctx.getTypeAlignInChars(Int64Ty));
9617 CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(CombinedInfo.Sizes[I],
9618 CGM.Int64Ty,
9619 /*isSigned=*/true),
9620 SAddr);
9621 }
9622
9623 // Fill up the mapper array.
9624 llvm::Value *MFunc = llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
9625 if (CombinedInfo.Mappers[I]) {
9626 MFunc = CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
9627 cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
9628 MFunc = CGF.Builder.CreatePointerCast(MFunc, CGM.VoidPtrTy);
9629 Info.HasMapper = true;
9630 }
9631 Address MAddr = CGF.Builder.CreateConstArrayGEP(MappersArray, I);
9632 CGF.Builder.CreateStore(MFunc, MAddr);
9633 }
9634 }
9635
9636 if (!IsNonContiguous || CombinedInfo.NonContigInfo.Offsets.empty() ||
9637 Info.NumberOfPtrs == 0)
9638 return;
9639
9640 emitNonContiguousDescriptor(CGF, CombinedInfo, Info);
9641}
9642
9643namespace {
9644/// Additional arguments for emitOffloadingArraysArgument function.
9645struct ArgumentsOptions {
9646 bool ForEndCall = false;
9647 ArgumentsOptions() = default;
9648 ArgumentsOptions(bool ForEndCall) : ForEndCall(ForEndCall) {}
9649};
9650} // namespace
9651
9652/// Emit the arguments to be passed to the runtime library based on the
9653/// arrays of base pointers, pointers, sizes, map types, and mappers. If
9654/// ForEndCall, emit map types to be passed for the end of the region instead of
9655/// the beginning.
9656static void emitOffloadingArraysArgument(
9657 CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
9658 llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
9659 llvm::Value *&MapTypesArrayArg, llvm::Value *&MapNamesArrayArg,
9660 llvm::Value *&MappersArrayArg, CGOpenMPRuntime::TargetDataInfo &Info,
9661 const ArgumentsOptions &Options = ArgumentsOptions()) {
9662 assert((!Options.ForEndCall || Info.separateBeginEndCalls()) &&((void)0)
9663 "expected region end call to runtime only when end call is separate")((void)0);
9664 CodeGenModule &CGM = CGF.CGM;
9665 if (Info.NumberOfPtrs) {
9666 BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
9667 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
9668 Info.BasePointersArray,
9669 /*Idx0=*/0, /*Idx1=*/0);
9670 PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
9671 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
9672 Info.PointersArray,
9673 /*Idx0=*/0,
9674 /*Idx1=*/0);
9675 SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
9676 llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), Info.SizesArray,
9677 /*Idx0=*/0, /*Idx1=*/0);
9678 MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
9679 llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
9680 Options.ForEndCall && Info.MapTypesArrayEnd ? Info.MapTypesArrayEnd
9681 : Info.MapTypesArray,
9682 /*Idx0=*/0,
9683 /*Idx1=*/0);
9684
9685 // Only emit the mapper information arrays if debug information is
9686 // requested.
9687 if (CGF.CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
9688 MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
9689 else
9690 MapNamesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
9691 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
9692 Info.MapNamesArray,
9693 /*Idx0=*/0,
9694 /*Idx1=*/0);
9695 // If there is no user-defined mapper, set the mapper array to nullptr to
9696 // avoid an unnecessary data privatization
9697 if (!Info.HasMapper)
9698 MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
9699 else
9700 MappersArrayArg =
9701 CGF.Builder.CreatePointerCast(Info.MappersArray, CGM.VoidPtrPtrTy);
9702 } else {
9703 BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
9704 PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
9705 SizesArrayArg = llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
9706 MapTypesArrayArg =
9707 llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
9708 MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
9709 MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
9710 }
9711}
9712
9713/// Check for inner distribute directive.
9714static const OMPExecutableDirective *
9715getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
9716 const auto *CS = D.getInnermostCapturedStmt();
9717 const auto *Body =
9718 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
9719 const Stmt *ChildStmt =
9720 CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
9721
9722 if (const auto *NestedDir =
9723 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
9724 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
9725 switch (D.getDirectiveKind()) {
9726 case OMPD_target:
9727 if (isOpenMPDistributeDirective(DKind))
9728 return NestedDir;
9729 if (DKind == OMPD_teams) {
9730 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
9731 /*IgnoreCaptured=*/true);
9732 if (!Body)
9733 return nullptr;
9734 ChildStmt = CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
9735 if (const auto *NND =
9736 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
9737 DKind = NND->getDirectiveKind();
9738 if (isOpenMPDistributeDirective(DKind))
9739 return NND;
9740 }
9741 }
9742 return nullptr;
9743 case OMPD_target_teams:
9744 if (isOpenMPDistributeDirective(DKind))
9745 return NestedDir;
9746 return nullptr;
9747 case OMPD_target_parallel:
9748 case OMPD_target_simd:
9749 case OMPD_target_parallel_for:
9750 case OMPD_target_parallel_for_simd:
9751 return nullptr;
9752 case OMPD_target_teams_distribute:
9753 case OMPD_target_teams_distribute_simd:
9754 case OMPD_target_teams_distribute_parallel_for:
9755 case OMPD_target_teams_distribute_parallel_for_simd:
9756 case OMPD_parallel:
9757 case OMPD_for:
9758 case OMPD_parallel_for:
9759 case OMPD_parallel_master:
9760 case OMPD_parallel_sections:
9761 case OMPD_for_simd:
9762 case OMPD_parallel_for_simd:
9763 case OMPD_cancel:
9764 case OMPD_cancellation_point:
9765 case OMPD_ordered:
9766 case OMPD_threadprivate:
9767 case OMPD_allocate:
9768 case OMPD_task:
9769 case OMPD_simd:
9770 case OMPD_tile:
9771 case OMPD_unroll:
9772 case OMPD_sections:
9773 case OMPD_section:
9774 case OMPD_single:
9775 case OMPD_master:
9776 case OMPD_critical:
9777 case OMPD_taskyield:
9778 case OMPD_barrier:
9779 case OMPD_taskwait:
9780 case OMPD_taskgroup:
9781 case OMPD_atomic:
9782 case OMPD_flush:
9783 case OMPD_depobj:
9784 case OMPD_scan:
9785 case OMPD_teams:
9786 case OMPD_target_data:
9787 case OMPD_target_exit_data:
9788 case OMPD_target_enter_data:
9789 case OMPD_distribute:
9790 case OMPD_distribute_simd:
9791 case OMPD_distribute_parallel_for:
9792 case OMPD_distribute_parallel_for_simd:
9793 case OMPD_teams_distribute:
9794 case OMPD_teams_distribute_simd:
9795 case OMPD_teams_distribute_parallel_for:
9796 case OMPD_teams_distribute_parallel_for_simd:
9797 case OMPD_target_update:
9798 case OMPD_declare_simd:
9799 case OMPD_declare_variant:
9800 case OMPD_begin_declare_variant:
9801 case OMPD_end_declare_variant:
9802 case OMPD_declare_target:
9803 case OMPD_end_declare_target:
9804 case OMPD_declare_reduction:
9805 case OMPD_declare_mapper:
9806 case OMPD_taskloop:
9807 case OMPD_taskloop_simd:
9808 case OMPD_master_taskloop:
9809 case OMPD_master_taskloop_simd:
9810 case OMPD_parallel_master_taskloop:
9811 case OMPD_parallel_master_taskloop_simd:
9812 case OMPD_requires:
9813 case OMPD_unknown:
9814 default:
9815 llvm_unreachable("Unexpected directive.")__builtin_unreachable();
9816 }
9817 }
9818
9819 return nullptr;
9820}
9821
9822/// Emit the user-defined mapper function. The code generation follows the
9823/// pattern in the example below.
9824/// \code
9825/// void .omp_mapper.<type_name>.<mapper_id>.(void *rt_mapper_handle,
9826/// void *base, void *begin,
9827/// int64_t size, int64_t type,
9828/// void *name = nullptr) {
9829/// // Allocate space for an array section first or add a base/begin for
9830/// // pointer dereference.
9831/// if ((size > 1 || (base != begin && maptype.IsPtrAndObj)) &&
9832/// !maptype.IsDelete)
9833/// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
9834/// size*sizeof(Ty), clearToFromMember(type));
9835/// // Map members.
9836/// for (unsigned i = 0; i < size; i++) {
9837/// // For each component specified by this mapper:
9838/// for (auto c : begin[i]->all_components) {
9839/// if (c.hasMapper())
9840/// (*c.Mapper())(rt_mapper_handle, c.arg_base, c.arg_begin, c.arg_size,
9841/// c.arg_type, c.arg_name);
9842/// else
9843/// __tgt_push_mapper_component(rt_mapper_handle, c.arg_base,
9844/// c.arg_begin, c.arg_size, c.arg_type,
9845/// c.arg_name);
9846/// }
9847/// }
9848/// // Delete the array section.
9849/// if (size > 1 && maptype.IsDelete)
9850/// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
9851/// size*sizeof(Ty), clearToFromMember(type));
9852/// }
9853/// \endcode
9854void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
9855 CodeGenFunction *CGF) {
9856 if (UDMMap.count(D) > 0)
9857 return;
9858 ASTContext &C = CGM.getContext();
9859 QualType Ty = D->getType();
9860 QualType PtrTy = C.getPointerType(Ty).withRestrict();
9861 QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
9862 auto *MapperVarDecl =
9863 cast<VarDecl>(cast<DeclRefExpr>(D->getMapperVarRef())->getDecl());
9864 SourceLocation Loc = D->getLocation();
9865 CharUnits ElementSize = C.getTypeSizeInChars(Ty);
9866
9867 // Prepare mapper function arguments and attributes.
9868 ImplicitParamDecl HandleArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
9869 C.VoidPtrTy, ImplicitParamDecl::Other);
9870 ImplicitParamDecl BaseArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
9871 ImplicitParamDecl::Other);
9872 ImplicitParamDecl BeginArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
9873 C.VoidPtrTy, ImplicitParamDecl::Other);
9874 ImplicitParamDecl SizeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
9875 ImplicitParamDecl::Other);
9876 ImplicitParamDecl TypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
9877 ImplicitParamDecl::Other);
9878 ImplicitParamDecl NameArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
9879 ImplicitParamDecl::Other);
9880 FunctionArgList Args;
9881 Args.push_back(&HandleArg);
9882 Args.push_back(&BaseArg);
9883 Args.push_back(&BeginArg);
9884 Args.push_back(&SizeArg);
9885 Args.push_back(&TypeArg);
9886 Args.push_back(&NameArg);
9887 const CGFunctionInfo &FnInfo =
9888 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
9889 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
9890 SmallString<64> TyStr;
9891 llvm::raw_svector_ostream Out(TyStr);
9892 CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out);
9893 std::string Name = getName({"omp_mapper", TyStr, D->getName()});
9894 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
9895 Name, &CGM.getModule());
9896 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
9897 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
9898 // Start the mapper function code generation.
9899 CodeGenFunction MapperCGF(CGM);
9900 MapperCGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
9901 // Compute the starting and end addresses of array elements.
9902 llvm::Value *Size = MapperCGF.EmitLoadOfScalar(
9903 MapperCGF.GetAddrOfLocalVar(&SizeArg), /*Volatile=*/false,
9904 C.getPointerType(Int64Ty), Loc);
9905 // Prepare common arguments for array initiation and deletion.
9906 llvm::Value *Handle = MapperCGF.EmitLoadOfScalar(
9907 MapperCGF.GetAddrOfLocalVar(&HandleArg),
9908 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
9909 llvm::Value *BaseIn = MapperCGF.EmitLoadOfScalar(
9910 MapperCGF.GetAddrOfLocalVar(&BaseArg),
9911 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
9912 llvm::Value *BeginIn = MapperCGF.EmitLoadOfScalar(
9913 MapperCGF.GetAddrOfLocalVar(&BeginArg),
9914 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
9915 // Convert the size in bytes into the number of array elements.
9916 Size = MapperCGF.Builder.CreateExactUDiv(
9917 Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
9918 llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast(
9919 BeginIn, CGM.getTypes().ConvertTypeForMem(PtrTy));
9920 llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(
9921 PtrBegin->getType()->getPointerElementType(), PtrBegin, Size);
9922 llvm::Value *MapType = MapperCGF.EmitLoadOfScalar(
9923 MapperCGF.GetAddrOfLocalVar(&TypeArg), /*Volatile=*/false,
9924 C.getPointerType(Int64Ty), Loc);
9925 llvm::Value *MapName = MapperCGF.EmitLoadOfScalar(
9926 MapperCGF.GetAddrOfLocalVar(&NameArg),
9927 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
9928
9929 // Emit array initiation if this is an array section and \p MapType indicates
9930 // that memory allocation is required.
9931 llvm::BasicBlock *HeadBB = MapperCGF.createBasicBlock("omp.arraymap.head");
9932 emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
9933 MapName, ElementSize, HeadBB, /*IsInit=*/true);
9934
9935 // Emit a for loop to iterate through SizeArg of elements and map all of them.
9936
9937 // Emit the loop header block.
9938 MapperCGF.EmitBlock(HeadBB);
9939 llvm::BasicBlock *BodyBB = MapperCGF.createBasicBlock("omp.arraymap.body");
9940 llvm::BasicBlock *DoneBB = MapperCGF.createBasicBlock("omp.done");
9941 // Evaluate whether the initial condition is satisfied.
9942 llvm::Value *IsEmpty =
9943 MapperCGF.Builder.CreateICmpEQ(PtrBegin, PtrEnd, "omp.arraymap.isempty");
9944 MapperCGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
9945 llvm::BasicBlock *EntryBB = MapperCGF.Builder.GetInsertBlock();
9946
9947 // Emit the loop body block.
9948 MapperCGF.EmitBlock(BodyBB);
9949 llvm::BasicBlock *LastBB = BodyBB;
9950 llvm::PHINode *PtrPHI = MapperCGF.Builder.CreatePHI(
9951 PtrBegin->getType(), 2, "omp.arraymap.ptrcurrent");
9952 PtrPHI->addIncoming(PtrBegin, EntryBB);
9953 Address PtrCurrent =
9954 Address(PtrPHI, MapperCGF.GetAddrOfLocalVar(&BeginArg)
9955 .getAlignment()
9956 .alignmentOfArrayElement(ElementSize));
9957 // Privatize the declared variable of mapper to be the current array element.
9958 CodeGenFunction::OMPPrivateScope Scope(MapperCGF);
9959 Scope.addPrivate(MapperVarDecl, [PtrCurrent]() { return PtrCurrent; });
9960 (void)Scope.Privatize();
9961
9962 // Get map clause information. Fill up the arrays with all mapped variables.
9963 MappableExprsHandler::MapCombinedInfoTy Info;
9964 MappableExprsHandler MEHandler(*D, MapperCGF);
9965 MEHandler.generateAllInfoForMapper(Info);
9966
9967 // Call the runtime API __tgt_mapper_num_components to get the number of
9968 // pre-existing components.
9969 llvm::Value *OffloadingArgs[] = {Handle};
9970 llvm::Value *PreviousSize = MapperCGF.EmitRuntimeCall(
9971 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
9972 OMPRTL___tgt_mapper_num_components),
9973 OffloadingArgs);
9974 llvm::Value *ShiftedPreviousSize = MapperCGF.Builder.CreateShl(
9975 PreviousSize,
9976 MapperCGF.Builder.getInt64(MappableExprsHandler::getFlagMemberOffset()));
9977
9978 // Fill up the runtime mapper handle for all components.
9979 for (unsigned I = 0; I < Info.BasePointers.size(); ++I) {
9980 llvm::Value *CurBaseArg = MapperCGF.Builder.CreateBitCast(
9981 *Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
9982 llvm::Value *CurBeginArg = MapperCGF.Builder.CreateBitCast(
9983 Info.Pointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
9984 llvm::Value *CurSizeArg = Info.Sizes[I];
9985 llvm::Value *CurNameArg =
9986 (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
9987 ? llvm::ConstantPointerNull::get(CGM.VoidPtrTy)
9988 : emitMappingInformation(MapperCGF, OMPBuilder, Info.Exprs[I]);
9989
9990 // Extract the MEMBER_OF field from the map type.
9991 llvm::Value *OriMapType = MapperCGF.Builder.getInt64(Info.Types[I]);
9992 llvm::Value *MemberMapType =
9993 MapperCGF.Builder.CreateNUWAdd(OriMapType, ShiftedPreviousSize);
9994
9995 // Combine the map type inherited from user-defined mapper with that
9996 // specified in the program. According to the OMP_MAP_TO and OMP_MAP_FROM
9997 // bits of the \a MapType, which is the input argument of the mapper
9998 // function, the following code will set the OMP_MAP_TO and OMP_MAP_FROM
9999 // bits of MemberMapType.
10000 // [OpenMP 5.0], 1.2.6. map-type decay.
10001 // | alloc | to | from | tofrom | release | delete
10002 // ----------------------------------------------------------
10003 // alloc | alloc | alloc | alloc | alloc | release | delete
10004 // to | alloc | to | alloc | to | release | delete
10005 // from | alloc | alloc | from | from | release | delete
10006 // tofrom | alloc | to | from | tofrom | release | delete
10007 llvm::Value *LeftToFrom = MapperCGF.Builder.CreateAnd(
10008 MapType,
10009 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO |
10010 MappableExprsHandler::OMP_MAP_FROM));
10011 llvm::BasicBlock *AllocBB = MapperCGF.createBasicBlock("omp.type.alloc");
10012 llvm::BasicBlock *AllocElseBB =
10013 MapperCGF.createBasicBlock("omp.type.alloc.else");
10014 llvm::BasicBlock *ToBB = MapperCGF.createBasicBlock("omp.type.to");
10015 llvm::BasicBlock *ToElseBB = MapperCGF.createBasicBlock("omp.type.to.else");
10016 llvm::BasicBlock *FromBB = MapperCGF.createBasicBlock("omp.type.from");
10017 llvm::BasicBlock *EndBB = MapperCGF.createBasicBlock("omp.type.end");
10018 llvm::Value *IsAlloc = MapperCGF.Builder.CreateIsNull(LeftToFrom);
10019 MapperCGF.Builder.CreateCondBr(IsAlloc, AllocBB, AllocElseBB);
10020 // In case of alloc, clear OMP_MAP_TO and OMP_MAP_FROM.
10021 MapperCGF.EmitBlock(AllocBB);
10022 llvm::Value *AllocMapType = MapperCGF.Builder.CreateAnd(
10023 MemberMapType,
10024 MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
10025 MappableExprsHandler::OMP_MAP_FROM)));
10026 MapperCGF.Builder.CreateBr(EndBB);
10027 MapperCGF.EmitBlock(AllocElseBB);
10028 llvm::Value *IsTo = MapperCGF.Builder.CreateICmpEQ(
10029 LeftToFrom,
10030 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO));
10031 MapperCGF.Builder.CreateCondBr(IsTo, ToBB, ToElseBB);
10032 // In case of to, clear OMP_MAP_FROM.
10033 MapperCGF.EmitBlock(ToBB);
10034 llvm::Value *ToMapType = MapperCGF.Builder.CreateAnd(
10035 MemberMapType,
10036 MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_FROM));
10037 MapperCGF.Builder.CreateBr(EndBB);
10038 MapperCGF.EmitBlock(ToElseBB);
10039 llvm::Value *IsFrom = MapperCGF.Builder.CreateICmpEQ(
10040 LeftToFrom,
10041 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_FROM));
10042 MapperCGF.Builder.CreateCondBr(IsFrom, FromBB, EndBB);
10043 // In case of from, clear OMP_MAP_TO.
10044 MapperCGF.EmitBlock(FromBB);
10045 llvm::Value *FromMapType = MapperCGF.Builder.CreateAnd(
10046 MemberMapType,
10047 MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_TO));
10048 // In case of tofrom, do nothing.
10049 MapperCGF.EmitBlock(EndBB);
10050 LastBB = EndBB;
10051 llvm::PHINode *CurMapType =
10052 MapperCGF.Builder.CreatePHI(CGM.Int64Ty, 4, "omp.maptype");
10053 CurMapType->addIncoming(AllocMapType, AllocBB);
10054 CurMapType->addIncoming(ToMapType, ToBB);
10055 CurMapType->addIncoming(FromMapType, FromBB);
10056 CurMapType->addIncoming(MemberMapType, ToElseBB);
10057
10058 llvm::Value *OffloadingArgs[] = {Handle, CurBaseArg, CurBeginArg,
10059 CurSizeArg, CurMapType, CurNameArg};
10060 if (Info.Mappers[I]) {
10061 // Call the corresponding mapper function.
10062 llvm::Function *MapperFunc = getOrCreateUserDefinedMapperFunc(
10063 cast<OMPDeclareMapperDecl>(Info.Mappers[I]));
10064 assert(MapperFunc && "Expect a valid mapper function is available.")((void)0);
10065 MapperCGF.EmitNounwindRuntimeCall(MapperFunc, OffloadingArgs);
10066 } else {
10067 // Call the runtime API __tgt_push_mapper_component to fill up the runtime
10068 // data structure.
10069 MapperCGF.EmitRuntimeCall(
10070 OMPBuilder.getOrCreateRuntimeFunction(
10071 CGM.getModule(), OMPRTL___tgt_push_mapper_component),
10072 OffloadingArgs);
10073 }
10074 }
10075
10076 // Update the pointer to point to the next element that needs to be mapped,
10077 // and check whether we have mapped all elements.
10078 llvm::Type *ElemTy = PtrPHI->getType()->getPointerElementType();
10079 llvm::Value *PtrNext = MapperCGF.Builder.CreateConstGEP1_32(
10080 ElemTy, PtrPHI, /*Idx0=*/1, "omp.arraymap.next");
10081 PtrPHI->addIncoming(PtrNext, LastBB);
10082 llvm::Value *IsDone =
10083 MapperCGF.Builder.CreateICmpEQ(PtrNext, PtrEnd, "omp.arraymap.isdone");
10084 llvm::BasicBlock *ExitBB = MapperCGF.createBasicBlock("omp.arraymap.exit");
10085 MapperCGF.Builder.CreateCondBr(IsDone, ExitBB, BodyBB);
10086
10087 MapperCGF.EmitBlock(ExitBB);
10088 // Emit array deletion if this is an array section and \p MapType indicates
10089 // that deletion is required.
10090 emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
10091 MapName, ElementSize, DoneBB, /*IsInit=*/false);
10092
10093 // Emit the function exit block.
10094 MapperCGF.EmitBlock(DoneBB, /*IsFinished=*/true);
10095 MapperCGF.FinishFunction();
10096 UDMMap.try_emplace(D, Fn);
10097 if (CGF) {
10098 auto &Decls = FunctionUDMMap.FindAndConstruct(CGF->CurFn);
10099 Decls.second.push_back(D);
10100 }
10101}
10102
10103/// Emit the array initialization or deletion portion for user-defined mapper
10104/// code generation. First, it evaluates whether an array section is mapped and
10105/// whether the \a MapType instructs to delete this section. If \a IsInit is
10106/// true, and \a MapType indicates to not delete this array, array
10107/// initialization code is generated. If \a IsInit is false, and \a MapType
10108/// indicates to not this array, array deletion code is generated.
10109void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
10110 CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *Base,
10111 llvm::Value *Begin, llvm::Value *Size, llvm::Value *MapType,
10112 llvm::Value *MapName, CharUnits ElementSize, llvm::BasicBlock *ExitBB,
10113 bool IsInit) {
10114 StringRef Prefix = IsInit ? ".init" : ".del";
10115
10116 // Evaluate if this is an array section.
10117 llvm::BasicBlock *BodyBB =
10118 MapperCGF.createBasicBlock(getName({"omp.array", Prefix}));
10119 llvm::Value *IsArray = MapperCGF.Builder.CreateICmpSGT(
10120 Size, MapperCGF.Builder.getInt64(1), "omp.arrayinit.isarray");
10121 llvm::Value *DeleteBit = MapperCGF.Builder.CreateAnd(
10122 MapType,
10123 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_DELETE));
10124 llvm::Value *DeleteCond;
10125 llvm::Value *Cond;
10126 if (IsInit) {
10127 // base != begin?
10128 llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateIsNotNull(
10129 MapperCGF.Builder.CreatePtrDiff(Base, Begin));
10130 // IsPtrAndObj?
10131 llvm::Value *PtrAndObjBit = MapperCGF.Builder.CreateAnd(
10132 MapType,
10133 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_PTR_AND_OBJ));
10134 PtrAndObjBit = MapperCGF.Builder.CreateIsNotNull(PtrAndObjBit);
10135 BaseIsBegin = MapperCGF.Builder.CreateAnd(BaseIsBegin, PtrAndObjBit);
10136 Cond = MapperCGF.Builder.CreateOr(IsArray, BaseIsBegin);
10137 DeleteCond = MapperCGF.Builder.CreateIsNull(
10138 DeleteBit, getName({"omp.array", Prefix, ".delete"}));
10139 } else {
10140 Cond = IsArray;
10141 DeleteCond = MapperCGF.Builder.CreateIsNotNull(
10142 DeleteBit, getName({"omp.array", Prefix, ".delete"}));
10143 }
10144 Cond = MapperCGF.Builder.CreateAnd(Cond, DeleteCond);
10145 MapperCGF.Builder.CreateCondBr(Cond, BodyBB, ExitBB);
10146
10147 MapperCGF.EmitBlock(BodyBB);
10148 // Get the array size by multiplying element size and element number (i.e., \p
10149 // Size).
10150 llvm::Value *ArraySize = MapperCGF.Builder.CreateNUWMul(
10151 Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
10152 // Remove OMP_MAP_TO and OMP_MAP_FROM from the map type, so that it achieves
10153 // memory allocation/deletion purpose only.
10154 llvm::Value *MapTypeArg = MapperCGF.Builder.CreateAnd(
10155 MapType,
10156 MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
10157 MappableExprsHandler::OMP_MAP_FROM)));
10158 MapTypeArg = MapperCGF.Builder.CreateOr(
10159 MapTypeArg,
10160 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_IMPLICIT));
10161
10162 // Call the runtime API __tgt_push_mapper_component to fill up the runtime
10163 // data structure.
10164 llvm::Value *OffloadingArgs[] = {Handle, Base, Begin,
10165 ArraySize, MapTypeArg, MapName};
10166 MapperCGF.EmitRuntimeCall(
10167 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
10168 OMPRTL___tgt_push_mapper_component),
10169 OffloadingArgs);
10170}
10171
10172llvm::Function *CGOpenMPRuntime::getOrCreateUserDefinedMapperFunc(
10173 const OMPDeclareMapperDecl *D) {
10174 auto I = UDMMap.find(D);
10175 if (I != UDMMap.end())
10176 return I->second;
10177 emitUserDefinedMapper(D);
10178 return UDMMap.lookup(D);
10179}
10180
10181void CGOpenMPRuntime::emitTargetNumIterationsCall(
10182 CodeGenFunction &CGF, const OMPExecutableDirective &D,
10183 llvm::Value *DeviceID,
10184 llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
10185 const OMPLoopDirective &D)>
10186 SizeEmitter) {
10187 OpenMPDirectiveKind Kind = D.getDirectiveKind();
10188 const OMPExecutableDirective *TD = &D;
10189 // Get nested teams distribute kind directive, if any.
10190 if (!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind))
10191 TD = getNestedDistributeDirective(CGM.getContext(), D);
10192 if (!TD)
10193 return;
10194 const auto *LD = cast<OMPLoopDirective>(TD);
10195 auto &&CodeGen = [LD, DeviceID, SizeEmitter, &D, this](CodeGenFunction &CGF,
10196 PrePostActionTy &) {
10197 if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD)) {
10198 llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
10199 llvm::Value *Args[] = {RTLoc, DeviceID, NumIterations};
10200 CGF.EmitRuntimeCall(
10201 OMPBuilder.getOrCreateRuntimeFunction(
10202 CGM.getModule(), OMPRTL___kmpc_push_target_tripcount_mapper),
10203 Args);
10204 }
10205 };
10206 emitInlinedDirective(CGF, OMPD_unknown, CodeGen);
10207}
10208
10209void CGOpenMPRuntime::emitTargetCall(
10210 CodeGenFunction &CGF, const OMPExecutableDirective &D,
10211 llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
10212 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
10213 llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
10214 const OMPLoopDirective &D)>
10215 SizeEmitter) {
10216 if (!CGF.HaveInsertPoint())
10217 return;
10218
10219 assert(OutlinedFn && "Invalid outlined function!")((void)0);
10220
10221 const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
10222 D.hasClausesOfKind<OMPNowaitClause>();
10223 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
10224 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
10225 auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
10226 PrePostActionTy &) {
10227 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
10228 };
10229 emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
10230
10231 CodeGenFunction::OMPTargetDataInfo InputInfo;
10232 llvm::Value *MapTypesArray = nullptr;
10233 llvm::Value *MapNamesArray = nullptr;
10234 // Fill up the pointer arrays and transfer execution to the device.
10235 auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
10236 &MapTypesArray, &MapNamesArray, &CS, RequiresOuterTask,
10237 &CapturedVars,
10238 SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) {
10239 if (Device.getInt() == OMPC_DEVICE_ancestor) {
10240 // Reverse offloading is not supported, so just execute on the host.
10241 if (RequiresOuterTask) {
10242 CapturedVars.clear();
10243 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
10244 }
10245 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
10246 return;
10247 }
10248
10249 // On top of the arrays that were filled up, the target offloading call
10250 // takes as arguments the device id as well as the host pointer. The host
10251 // pointer is used by the runtime library to identify the current target
10252 // region, so it only has to be unique and not necessarily point to
10253 // anything. It could be the pointer to the outlined function that
10254 // implements the target region, but we aren't using that so that the
10255 // compiler doesn't need to keep that, and could therefore inline the host
10256 // function if proven worthwhile during optimization.
10257
10258 // From this point on, we need to have an ID of the target region defined.
10259 assert(OutlinedFnID && "Invalid outlined function ID!")((void)0);
10260
10261 // Emit device ID if any.
10262 llvm::Value *DeviceID;
10263 if (Device.getPointer()) {
10264 assert((Device.getInt() == OMPC_DEVICE_unknown ||((void)0)
10265 Device.getInt() == OMPC_DEVICE_device_num) &&((void)0)
10266 "Expected device_num modifier.")((void)0);
10267 llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
10268 DeviceID =
10269 CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true);
10270 } else {
10271 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
10272 }
10273
10274 // Emit the number of elements in the offloading arrays.
10275 llvm::Value *PointerNum =
10276 CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
10277
10278 // Return value of the runtime offloading call.
10279 llvm::Value *Return;
10280
10281 llvm::Value *NumTeams = emitNumTeamsForTargetDirective(CGF, D);
10282 llvm::Value *NumThreads = emitNumThreadsForTargetDirective(CGF, D);
10283
10284 // Source location for the ident struct
10285 llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
10286
10287 // Emit tripcount for the target loop-based directive.
10288 emitTargetNumIterationsCall(CGF, D, DeviceID, SizeEmitter);
10289
10290 bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
10291 // The target region is an outlined function launched by the runtime
10292 // via calls __tgt_target() or __tgt_target_teams().
10293 //
10294 // __tgt_target() launches a target region with one team and one thread,
10295 // executing a serial region. This master thread may in turn launch
10296 // more threads within its team upon encountering a parallel region,
10297 // however, no additional teams can be launched on the device.
10298 //
10299 // __tgt_target_teams() launches a target region with one or more teams,
10300 // each with one or more threads. This call is required for target
10301 // constructs such as:
10302 // 'target teams'
10303 // 'target' / 'teams'
10304 // 'target teams distribute parallel for'
10305 // 'target parallel'
10306 // and so on.
10307 //
10308 // Note that on the host and CPU targets, the runtime implementation of
10309 // these calls simply call the outlined function without forking threads.
10310 // The outlined functions themselves have runtime calls to
10311 // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
10312 // the compiler in emitTeamsCall() and emitParallelCall().
10313 //
10314 // In contrast, on the NVPTX target, the implementation of
10315 // __tgt_target_teams() launches a GPU kernel with the requested number
10316 // of teams and threads so no additional calls to the runtime are required.
10317 if (NumTeams) {
10318 // If we have NumTeams defined this means that we have an enclosed teams
10319 // region. Therefore we also expect to have NumThreads defined. These two
10320 // values should be defined in the presence of a teams directive,
10321 // regardless of having any clauses associated. If the user is using teams
10322 // but no clauses, these two values will be the default that should be
10323 // passed to the runtime library - a 32-bit integer with the value zero.
10324 assert(NumThreads && "Thread limit expression should be available along "((void)0)
10325 "with number of teams.")((void)0);
10326 SmallVector<llvm::Value *> OffloadingArgs = {
10327 RTLoc,
10328 DeviceID,
10329 OutlinedFnID,
10330 PointerNum,
10331 InputInfo.BasePointersArray.getPointer(),
10332 InputInfo.PointersArray.getPointer(),
10333 InputInfo.SizesArray.getPointer(),
10334 MapTypesArray,
10335 MapNamesArray,
10336 InputInfo.MappersArray.getPointer(),
10337 NumTeams,
10338 NumThreads};
10339 if (HasNowait) {
10340 // Add int32_t depNum = 0, void *depList = nullptr, int32_t
10341 // noAliasDepNum = 0, void *noAliasDepList = nullptr.
10342 OffloadingArgs.push_back(CGF.Builder.getInt32(0));
10343 OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
10344 OffloadingArgs.push_back(CGF.Builder.getInt32(0));
10345 OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
10346 }
10347 Return = CGF.EmitRuntimeCall(
10348 OMPBuilder.getOrCreateRuntimeFunction(
10349 CGM.getModule(), HasNowait
10350 ? OMPRTL___tgt_target_teams_nowait_mapper
10351 : OMPRTL___tgt_target_teams_mapper),
10352 OffloadingArgs);
10353 } else {
10354 SmallVector<llvm::Value *> OffloadingArgs = {
10355 RTLoc,
10356 DeviceID,
10357 OutlinedFnID,
10358 PointerNum,
10359 InputInfo.BasePointersArray.getPointer(),
10360 InputInfo.PointersArray.getPointer(),
10361 InputInfo.SizesArray.getPointer(),
10362 MapTypesArray,
10363 MapNamesArray,
10364 InputInfo.MappersArray.getPointer()};
10365 if (HasNowait) {
10366 // Add int32_t depNum = 0, void *depList = nullptr, int32_t
10367 // noAliasDepNum = 0, void *noAliasDepList = nullptr.
10368 OffloadingArgs.push_back(CGF.Builder.getInt32(0));
10369 OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
10370 OffloadingArgs.push_back(CGF.Builder.getInt32(0));
10371 OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
10372 }
10373 Return = CGF.EmitRuntimeCall(
10374 OMPBuilder.getOrCreateRuntimeFunction(
10375 CGM.getModule(), HasNowait ? OMPRTL___tgt_target_nowait_mapper
10376 : OMPRTL___tgt_target_mapper),
10377 OffloadingArgs);
10378 }
10379
10380 // Check the error code and execute the host version if required.
10381 llvm::BasicBlock *OffloadFailedBlock =
10382 CGF.createBasicBlock("omp_offload.failed");
10383 llvm::BasicBlock *OffloadContBlock =
10384 CGF.createBasicBlock("omp_offload.cont");
10385 llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
10386 CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
10387
10388 CGF.EmitBlock(OffloadFailedBlock);
10389 if (RequiresOuterTask) {
10390 CapturedVars.clear();
10391 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
10392 }
10393 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
10394 CGF.EmitBranch(OffloadContBlock);
10395
10396 CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
10397 };
10398
10399 // Notify that the host version must be executed.
10400 auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
10401 RequiresOuterTask](CodeGenFunction &CGF,
10402 PrePostActionTy &) {
10403 if (RequiresOuterTask) {
10404 CapturedVars.clear();
10405 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
10406 }
10407 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
10408 };
10409
10410 auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
10411 &MapNamesArray, &CapturedVars, RequiresOuterTask,
10412 &CS](CodeGenFunction &CGF, PrePostActionTy &) {
10413 // Fill up the arrays with all the captured variables.
10414 MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
10415
10416 // Get mappable expression information.
10417 MappableExprsHandler MEHandler(D, CGF);
10418 llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
10419 llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
10420
10421 auto RI = CS.getCapturedRecordDecl()->field_begin();
10422 auto *CV = CapturedVars.begin();
10423 for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
10424 CE = CS.capture_end();
10425 CI != CE; ++CI, ++RI, ++CV) {
10426 MappableExprsHandler::MapCombinedInfoTy CurInfo;
10427 MappableExprsHandler::StructRangeInfoTy PartialStruct;
10428
10429 // VLA sizes are passed to the outlined region by copy and do not have map
10430 // information associated.
10431 if (CI->capturesVariableArrayType()) {
10432 CurInfo.Exprs.push_back(nullptr);
10433 CurInfo.BasePointers.push_back(*CV);
10434 CurInfo.Pointers.push_back(*CV);
10435 CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
10436 CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true));
10437 // Copy to the device as an argument. No need to retrieve it.
10438 CurInfo.Types.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
10439 MappableExprsHandler::OMP_MAP_TARGET_PARAM |
10440 MappableExprsHandler::OMP_MAP_IMPLICIT);
10441 CurInfo.Mappers.push_back(nullptr);
10442 } else {
10443 // If we have any information in the map clause, we use it, otherwise we
10444 // just do a default mapping.
10445 MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct);
10446 if (!CI->capturesThis())
10447 MappedVarSet.insert(CI->getCapturedVar());
10448 else
10449 MappedVarSet.insert(nullptr);
10450 if (CurInfo.BasePointers.empty() && !PartialStruct.Base.isValid())
10451 MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
10452 // Generate correct mapping for variables captured by reference in
10453 // lambdas.
10454 if (CI->capturesVariable())
10455 MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV,
10456 CurInfo, LambdaPointers);
10457 }
10458 // We expect to have at least an element of information for this capture.
10459 assert((!CurInfo.BasePointers.empty() || PartialStruct.Base.isValid()) &&((void)0)
10460 "Non-existing map pointer for capture!")((void)0);
10461 assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&((void)0)
10462 CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&((void)0)
10463 CurInfo.BasePointers.size() == CurInfo.Types.size() &&((void)0)
10464 CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&((void)0)
10465 "Inconsistent map information sizes!")((void)0);
10466
10467 // If there is an entry in PartialStruct it means we have a struct with
10468 // individual members mapped. Emit an extra combined entry.
10469 if (PartialStruct.Base.isValid()) {
10470 CombinedInfo.append(PartialStruct.PreliminaryMapData);
10471 MEHandler.emitCombinedEntry(
10472 CombinedInfo, CurInfo.Types, PartialStruct, nullptr,
10473 !PartialStruct.PreliminaryMapData.BasePointers.empty());
10474 }
10475
10476 // We need to append the results of this capture to what we already have.
10477 CombinedInfo.append(CurInfo);
10478 }
10479 // Adjust MEMBER_OF flags for the lambdas captures.
10480 MEHandler.adjustMemberOfForLambdaCaptures(
10481 LambdaPointers, CombinedInfo.BasePointers, CombinedInfo.Pointers,
10482 CombinedInfo.Types);
10483 // Map any list items in a map clause that were not captures because they
10484 // weren't referenced within the construct.
10485 MEHandler.generateAllInfo(CombinedInfo, MappedVarSet);
10486
10487 TargetDataInfo Info;
10488 // Fill up the arrays and create the arguments.
10489 emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder);
10490 emitOffloadingArraysArgument(
10491 CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
10492 Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
10493 {/*ForEndTask=*/false});
10494
10495 InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
10496 InputInfo.BasePointersArray =
10497 Address(Info.BasePointersArray, CGM.getPointerAlign());
10498 InputInfo.PointersArray =
10499 Address(Info.PointersArray, CGM.getPointerAlign());
10500 InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
10501 InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
10502 MapTypesArray = Info.MapTypesArray;
10503 MapNamesArray = Info.MapNamesArray;
10504 if (RequiresOuterTask)
10505 CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
10506 else
10507 emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
10508 };
10509
10510 auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
10511 CodeGenFunction &CGF, PrePostActionTy &) {
10512 if (RequiresOuterTask) {
10513 CodeGenFunction::OMPTargetDataInfo InputInfo;
10514 CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
10515 } else {
10516 emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
10517 }
10518 };
10519
10520 // If we have a target function ID it means that we need to support
10521 // offloading, otherwise, just execute on the host. We need to execute on host
10522 // regardless of the conditional in the if clause if, e.g., the user do not
10523 // specify target triples.
10524 if (OutlinedFnID) {
10525 if (IfCond) {
10526 emitIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
10527 } else {
10528 RegionCodeGenTy ThenRCG(TargetThenGen);
10529 ThenRCG(CGF);
10530 }
10531 } else {
10532 RegionCodeGenTy ElseRCG(TargetElseGen);
10533 ElseRCG(CGF);
10534 }
10535}
10536
10537void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
10538 StringRef ParentName) {
10539 if (!S)
10540 return;
10541
10542 // Codegen OMP target directives that offload compute to the device.
10543 bool RequiresDeviceCodegen =
10544 isa<OMPExecutableDirective>(S) &&
10545 isOpenMPTargetExecutionDirective(
10546 cast<OMPExecutableDirective>(S)->getDirectiveKind());
10547
10548 if (RequiresDeviceCodegen) {
10549 const auto &E = *cast<OMPExecutableDirective>(S);
10550 unsigned DeviceID;
10551 unsigned FileID;
10552 unsigned Line;
10553 getTargetEntryUniqueInfo(CGM.getContext(), E.getBeginLoc(), DeviceID,
10554 FileID, Line);
10555
10556 // Is this a target region that should not be emitted as an entry point? If
10557 // so just signal we are done with this target region.
10558 if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
10559 ParentName, Line))
10560 return;
10561
10562 switch (E.getDirectiveKind()) {
10563 case OMPD_target:
10564 CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
10565 cast<OMPTargetDirective>(E));
10566 break;
10567 case OMPD_target_parallel:
10568 CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
10569 CGM, ParentName, cast<OMPTargetParallelDirective>(E));
10570 break;
10571 case OMPD_target_teams:
10572 CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
10573 CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
10574 break;
10575 case OMPD_target_teams_distribute:
10576 CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
10577 CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
10578 break;
10579 case OMPD_target_teams_distribute_simd:
10580 CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
10581 CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
10582 break;
10583 case OMPD_target_parallel_for:
10584 CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
10585 CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
10586 break;
10587 case OMPD_target_parallel_for_simd:
10588 CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
10589 CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
10590 break;
10591 case OMPD_target_simd:
10592 CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
10593 CGM, ParentName, cast<OMPTargetSimdDirective>(E));
10594 break;
10595 case OMPD_target_teams_distribute_parallel_for:
10596 CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
10597 CGM, ParentName,
10598 cast<OMPTargetTeamsDistributeParallelForDirective>(E));
10599 break;
10600 case OMPD_target_teams_distribute_parallel_for_simd:
10601 CodeGenFunction::
10602 EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
10603 CGM, ParentName,
10604 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
10605 break;
10606 case OMPD_parallel:
10607 case OMPD_for:
10608 case OMPD_parallel_for:
10609 case OMPD_parallel_master:
10610 case OMPD_parallel_sections:
10611 case OMPD_for_simd:
10612 case OMPD_parallel_for_simd:
10613 case OMPD_cancel:
10614 case OMPD_cancellation_point:
10615 case OMPD_ordered:
10616 case OMPD_threadprivate:
10617 case OMPD_allocate:
10618 case OMPD_task:
10619 case OMPD_simd:
10620 case OMPD_tile:
10621 case OMPD_unroll:
10622 case OMPD_sections:
10623 case OMPD_section:
10624 case OMPD_single:
10625 case OMPD_master:
10626 case OMPD_critical:
10627 case OMPD_taskyield:
10628 case OMPD_barrier:
10629 case OMPD_taskwait:
10630 case OMPD_taskgroup:
10631 case OMPD_atomic:
10632 case OMPD_flush:
10633 case OMPD_depobj:
10634 case OMPD_scan:
10635 case OMPD_teams:
10636 case OMPD_target_data:
10637 case OMPD_target_exit_data:
10638 case OMPD_target_enter_data:
10639 case OMPD_distribute:
10640 case OMPD_distribute_simd:
10641 case OMPD_distribute_parallel_for:
10642 case OMPD_distribute_parallel_for_simd:
10643 case OMPD_teams_distribute:
10644 case OMPD_teams_distribute_simd:
10645 case OMPD_teams_distribute_parallel_for:
10646 case OMPD_teams_distribute_parallel_for_simd:
10647 case OMPD_target_update:
10648 case OMPD_declare_simd:
10649 case OMPD_declare_variant:
10650 case OMPD_begin_declare_variant:
10651 case OMPD_end_declare_variant:
10652 case OMPD_declare_target:
10653 case OMPD_end_declare_target:
10654 case OMPD_declare_reduction:
10655 case OMPD_declare_mapper:
10656 case OMPD_taskloop:
10657 case OMPD_taskloop_simd:
10658 case OMPD_master_taskloop:
10659 case OMPD_master_taskloop_simd:
10660 case OMPD_parallel_master_taskloop:
10661 case OMPD_parallel_master_taskloop_simd:
10662 case OMPD_requires:
10663 case OMPD_unknown:
10664 default:
10665 llvm_unreachable("Unknown target directive for OpenMP device codegen.")__builtin_unreachable();
10666 }
10667 return;
10668 }
10669
10670 if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
10671 if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
10672 return;
10673
10674 scanForTargetRegionsFunctions(E->getRawStmt(), ParentName);
10675 return;
10676 }
10677
10678 // If this is a lambda function, look into its body.
10679 if (const auto *L = dyn_cast<LambdaExpr>(S))
10680 S = L->getBody();
10681
10682 // Keep looking for target regions recursively.
10683 for (const Stmt *II : S->children())
10684 scanForTargetRegionsFunctions(II, ParentName);
10685}
10686
10687static bool isAssumedToBeNotEmitted(const ValueDecl *VD, bool IsDevice) {
10688 Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
10689 OMPDeclareTargetDeclAttr::getDeviceType(VD);
10690 if (!DevTy)
10691 return false;
10692 // Do not emit device_type(nohost) functions for the host.
10693 if (!IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
10694 return true;
10695 // Do not emit device_type(host) functions for the device.
10696 if (IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_Host)
10697 return true;
10698 return false;
10699}
10700
10701bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
10702 // If emitting code for the host, we do not process FD here. Instead we do
10703 // the normal code generation.
10704 if (!CGM.getLangOpts().OpenMPIsDevice) {
10705 if (const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
10706 if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
10707 CGM.getLangOpts().OpenMPIsDevice))
10708 return true;
10709 return false;
10710 }
10711
10712 const ValueDecl *VD = cast<ValueDecl>(GD.getDecl());
10713 // Try to detect target regions in the function.
10714 if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
10715 StringRef Name = CGM.getMangledName(GD);
10716 scanForTargetRegionsFunctions(FD->getBody(), Name);
10717 if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
10718 CGM.getLangOpts().OpenMPIsDevice))
10719 return true;
10720 }
10721
10722 // Do not to emit function if it is not marked as declare target.
10723 return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
10724 AlreadyEmittedTargetDecls.count(VD) == 0;
10725}
10726
10727bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
10728 if (isAssumedToBeNotEmitted(cast<ValueDecl>(GD.getDecl()),
10729 CGM.getLangOpts().OpenMPIsDevice))
10730 return true;
10731
10732 if (!CGM.getLangOpts().OpenMPIsDevice)
10733 return false;
10734
10735 // Check if there are Ctors/Dtors in this declaration and look for target
10736 // regions in it. We use the complete variant to produce the kernel name
10737 // mangling.
10738 QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
10739 if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
10740 for (const CXXConstructorDecl *Ctor : RD->ctors()) {
10741 StringRef ParentName =
10742 CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
10743 scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
10744 }
10745 if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
10746 StringRef ParentName =
10747 CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
10748 scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
10749 }
10750 }
10751
10752 // Do not to emit variable if it is not marked as declare target.
10753 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
10754 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
10755 cast<VarDecl>(GD.getDecl()));
10756 if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
10757 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
10758 HasRequiresUnifiedSharedMemory)) {
10759 DeferredGlobalVariables.insert(cast<VarDecl>(GD.getDecl()));
10760 return true;
10761 }
10762 return false;
10763}
10764
10765void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
10766 llvm::Constant *Addr) {
10767 if (CGM.getLangOpts().OMPTargetTriples.empty() &&
10768 !CGM.getLangOpts().OpenMPIsDevice)
10769 return;
10770
10771 // If we have host/nohost variables, they do not need to be registered.
10772 Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
10773 OMPDeclareTargetDeclAttr::getDeviceType(VD);
10774 if (DevTy && DevTy.getValue() != OMPDeclareTargetDeclAttr::DT_Any)
10775 return;
10776
10777 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
10778 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
10779 if (!Res) {
10780 if (CGM.getLangOpts().OpenMPIsDevice) {
10781 // Register non-target variables being emitted in device code (debug info
10782 // may cause this).
10783 StringRef VarName = CGM.getMangledName(VD);
10784 EmittedNonTargetVariables.try_emplace(VarName, Addr);
10785 }
10786 return;
10787 }
10788 // Register declare target variables.
10789 OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
10790 StringRef VarName;
10791 CharUnits VarSize;
10792 llvm::GlobalValue::LinkageTypes Linkage;
10793
10794 if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
10795 !HasRequiresUnifiedSharedMemory) {
10796 Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
10797 VarName = CGM.getMangledName(VD);
10798 if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
10799 VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
10800 assert(!VarSize.isZero() && "Expected non-zero size of the variable")((void)0);
10801 } else {
10802 VarSize = CharUnits::Zero();
10803 }
10804 Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
10805 // Temp solution to prevent optimizations of the internal variables.
10806 if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
10807 // Do not create a "ref-variable" if the original is not also available
10808 // on the host.
10809 if (!OffloadEntriesInfoManager.hasDeviceGlobalVarEntryInfo(VarName))
10810 return;
10811 std::string RefName = getName({VarName, "ref"});
10812 if (!CGM.GetGlobalValue(RefName)) {
10813 llvm::Constant *AddrRef =
10814 getOrCreateInternalVariable(Addr->getType(), RefName);
10815 auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
10816 GVAddrRef->setConstant(/*Val=*/true);
10817 GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
10818 GVAddrRef->setInitializer(Addr);
10819 CGM.addCompilerUsedGlobal(GVAddrRef);
10820 }
10821 }
10822 } else {
10823 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||((void)0)
10824 (*Res == OMPDeclareTargetDeclAttr::MT_To &&((void)0)
10825 HasRequiresUnifiedSharedMemory)) &&((void)0)
10826 "Declare target attribute must link or to with unified memory.")((void)0);
10827 if (*Res == OMPDeclareTargetDeclAttr::MT_Link)
10828 Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
10829 else
10830 Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
10831
10832 if (CGM.getLangOpts().OpenMPIsDevice) {
10833 VarName = Addr->getName();
10834 Addr = nullptr;
10835 } else {
10836 VarName = getAddrOfDeclareTargetVar(VD).getName();
10837 Addr = cast<llvm::Constant>(getAddrOfDeclareTargetVar(VD).getPointer());
10838 }
10839 VarSize = CGM.getPointerSize();
10840 Linkage = llvm::GlobalValue::WeakAnyLinkage;
10841 }
10842
10843 OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
10844 VarName, Addr, VarSize, Flags, Linkage);
10845}
10846
10847bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
10848 if (isa<FunctionDecl>(GD.getDecl()) ||
10849 isa<OMPDeclareReductionDecl>(GD.getDecl()))
10850 return emitTargetFunctions(GD);
10851
10852 return emitTargetGlobalVariable(GD);
10853}
10854
10855void CGOpenMPRuntime::emitDeferredTargetDecls() const {
10856 for (const VarDecl *VD : DeferredGlobalVariables) {
10857 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
10858 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
10859 if (!Res)
10860 continue;
10861 if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
10862 !HasRequiresUnifiedSharedMemory) {
10863 CGM.EmitGlobal(VD);
10864 } else {
10865 assert((*Res == OMPDeclareTargetDeclAttr::MT_Link ||((void)0)
10866 (*Res == OMPDeclareTargetDeclAttr::MT_To &&((void)0)
10867 HasRequiresUnifiedSharedMemory)) &&((void)0)
10868 "Expected link clause or to clause with unified memory.")((void)0);
10869 (void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
10870 }
10871 }
10872}
10873
10874void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
10875 CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
10876 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&((void)0)
10877 " Expected target-based directive.")((void)0);
10878}
10879
10880void CGOpenMPRuntime::processRequiresDirective(const OMPRequiresDecl *D) {
10881 for (const OMPClause *Clause : D->clauselists()) {
10882 if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
10883 HasRequiresUnifiedSharedMemory = true;
10884 } else if (const auto *AC =
10885 dyn_cast<OMPAtomicDefaultMemOrderClause>(Clause)) {
10886 switch (AC->getAtomicDefaultMemOrderKind()) {
10887 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_acq_rel:
10888 RequiresAtomicOrdering = llvm::AtomicOrdering::AcquireRelease;
10889 break;
10890 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_seq_cst:
10891 RequiresAtomicOrdering = llvm::AtomicOrdering::SequentiallyConsistent;
10892 break;
10893 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_relaxed:
10894 RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
10895 break;
10896 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown:
10897 break;
10898 }
10899 }
10900 }
10901}
10902
10903llvm::AtomicOrdering CGOpenMPRuntime::getDefaultMemoryOrdering() const {
10904 return RequiresAtomicOrdering;
10905}
10906
10907bool CGOpenMPRuntime::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
10908 LangAS &AS) {
10909 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
10910 return false;
10911 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
10912 switch(A->getAllocatorType()) {
10913 case OMPAllocateDeclAttr::OMPNullMemAlloc:
10914 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
10915 // Not supported, fallback to the default mem space.
10916 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
10917 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
10918 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
10919 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
10920 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
10921 case OMPAllocateDeclAttr::OMPConstMemAlloc:
10922 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
10923 AS = LangAS::Default;
10924 return true;
10925 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
10926 llvm_unreachable("Expected predefined allocator for the variables with the "__builtin_unreachable()
10927 "static storage.")__builtin_unreachable();
10928 }
10929 return false;
10930}
10931
10932bool CGOpenMPRuntime::hasRequiresUnifiedSharedMemory() const {
10933 return HasRequiresUnifiedSharedMemory;
10934}
10935
10936CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
10937 CodeGenModule &CGM)
10938 : CGM(CGM) {
10939 if (CGM.getLangOpts().OpenMPIsDevice) {
10940 SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
10941 CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
10942 }
10943}
10944
10945CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
10946 if (CGM.getLangOpts().OpenMPIsDevice)
10947 CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
10948}
10949
10950bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
10951 if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
10952 return true;
10953
10954 const auto *D = cast<FunctionDecl>(GD.getDecl());
10955 // Do not to emit function if it is marked as declare target as it was already
10956 // emitted.
10957 if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
10958 if (D->hasBody() && AlreadyEmittedTargetDecls.count(D) == 0) {
10959 if (auto *F = dyn_cast_or_null<llvm::Function>(
10960 CGM.GetGlobalValue(CGM.getMangledName(GD))))
10961 return !F->isDeclaration();
10962 return false;
10963 }
10964 return true;
10965 }
10966
10967 return !AlreadyEmittedTargetDecls.insert(D).second;
10968}
10969
10970llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
10971 // If we don't have entries or if we are emitting code for the device, we
10972 // don't need to do anything.
10973 if (CGM.getLangOpts().OMPTargetTriples.empty() ||
10974 CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsDevice ||
10975 (OffloadEntriesInfoManager.empty() &&
10976 !HasEmittedDeclareTargetRegion &&
10977 !HasEmittedTargetRegion))
10978 return nullptr;
10979
10980 // Create and register the function that handles the requires directives.
10981 ASTContext &C = CGM.getContext();
10982
10983 llvm::Function *RequiresRegFn;
10984 {
10985 CodeGenFunction CGF(CGM);
10986 const auto &FI = CGM.getTypes().arrangeNullaryFunction();
10987 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
10988 std::string ReqName = getName({"omp_offloading", "requires_reg"});
10989 RequiresRegFn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, ReqName, FI);
10990 CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {});
10991 OpenMPOffloadingRequiresDirFlags Flags = OMP_REQ_NONE;
10992 // TODO: check for other requires clauses.
10993 // The requires directive takes effect only when a target region is
10994 // present in the compilation unit. Otherwise it is ignored and not
10995 // passed to the runtime. This avoids the runtime from throwing an error
10996 // for mismatching requires clauses across compilation units that don't
10997 // contain at least 1 target region.
10998 assert((HasEmittedTargetRegion ||((void)0)
10999 HasEmittedDeclareTargetRegion ||((void)0)
11000 !OffloadEntriesInfoManager.empty()) &&((void)0)
11001 "Target or declare target region expected.")((void)0);
11002 if (HasRequiresUnifiedSharedMemory)
11003 Flags = OMP_REQ_UNIFIED_SHARED_MEMORY;
11004 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
11005 CGM.getModule(), OMPRTL___tgt_register_requires),
11006 llvm::ConstantInt::get(CGM.Int64Ty, Flags));
11007 CGF.FinishFunction();
11008 }
11009 return RequiresRegFn;
11010}
11011
11012void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
11013 const OMPExecutableDirective &D,
11014 SourceLocation Loc,
11015 llvm::Function *OutlinedFn,
11016 ArrayRef<llvm::Value *> CapturedVars) {
11017 if (!CGF.HaveInsertPoint())
11018 return;
11019
11020 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
11021 CodeGenFunction::RunCleanupsScope Scope(CGF);
11022
11023 // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
11024 llvm::Value *Args[] = {
11025 RTLoc,
11026 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
11027 CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
11028 llvm::SmallVector<llvm::Value *, 16> RealArgs;
11029 RealArgs.append(std::begin(Args), std::end(Args));
11030 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
11031
11032 llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
11033 CGM.getModule(), OMPRTL___kmpc_fork_teams);
11034 CGF.EmitRuntimeCall(RTLFn, RealArgs);
11035}
11036
11037void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
11038 const Expr *NumTeams,
11039 const Expr *ThreadLimit,
11040 SourceLocation Loc) {
11041 if (!CGF.HaveInsertPoint())
11042 return;
11043
11044 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
11045
11046 llvm::Value *NumTeamsVal =
11047 NumTeams
11048 ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
11049 CGF.CGM.Int32Ty, /* isSigned = */ true)
11050 : CGF.Builder.getInt32(0);
11051
11052 llvm::Value *ThreadLimitVal =
11053 ThreadLimit
11054 ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
11055 CGF.CGM.Int32Ty, /* isSigned = */ true)
11056 : CGF.Builder.getInt32(0);
11057
11058 // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
11059 llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
11060 ThreadLimitVal};
11061 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
11062 CGM.getModule(), OMPRTL___kmpc_push_num_teams),
11063 PushNumTeamsArgs);
11064}
11065
11066void CGOpenMPRuntime::emitTargetDataCalls(
11067 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
11068 const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
11069 if (!CGF.HaveInsertPoint())
11070 return;
11071
11072 // Action used to replace the default codegen action and turn privatization
11073 // off.
11074 PrePostActionTy NoPrivAction;
11075
11076 // Generate the code for the opening of the data environment. Capture all the
11077 // arguments of the runtime call by reference because they are used in the
11078 // closing of the region.
11079 auto &&BeginThenGen = [this, &D, Device, &Info,
11080 &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
11081 // Fill up the arrays with all the mapped variables.
11082 MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
11083
11084 // Get map clause information.
11085 MappableExprsHandler MEHandler(D, CGF);
11086 MEHandler.generateAllInfo(CombinedInfo);
11087
11088 // Fill up the arrays and create the arguments.
11089 emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
11090 /*IsNonContiguous=*/true);
11091
11092 llvm::Value *BasePointersArrayArg = nullptr;
11093 llvm::Value *PointersArrayArg = nullptr;
11094 llvm::Value *SizesArrayArg = nullptr;
11095 llvm::Value *MapTypesArrayArg = nullptr;
11096 llvm::Value *MapNamesArrayArg = nullptr;
11097 llvm::Value *MappersArrayArg = nullptr;
11098 emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
11099 SizesArrayArg, MapTypesArrayArg,
11100 MapNamesArrayArg, MappersArrayArg, Info);
11101
11102 // Emit device ID if any.
11103 llvm::Value *DeviceID = nullptr;
11104 if (Device) {
11105 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
11106 CGF.Int64Ty, /*isSigned=*/true);
11107 } else {
11108 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
11109 }
11110
11111 // Emit the number of elements in the offloading arrays.
11112 llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
11113 //
11114 // Source location for the ident struct
11115 llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
11116
11117 llvm::Value *OffloadingArgs[] = {RTLoc,
11118 DeviceID,
11119 PointerNum,
11120 BasePointersArrayArg,
11121 PointersArrayArg,
11122 SizesArrayArg,
11123 MapTypesArrayArg,
11124 MapNamesArrayArg,
11125 MappersArrayArg};
11126 CGF.EmitRuntimeCall(
11127 OMPBuilder.getOrCreateRuntimeFunction(
11128 CGM.getModule(), OMPRTL___tgt_target_data_begin_mapper),
11129 OffloadingArgs);
11130
11131 // If device pointer privatization is required, emit the body of the region
11132 // here. It will have to be duplicated: with and without privatization.
11133 if (!Info.CaptureDeviceAddrMap.empty())
11134 CodeGen(CGF);
11135 };
11136
11137 // Generate code for the closing of the data region.
11138 auto &&EndThenGen = [this, Device, &Info, &D](CodeGenFunction &CGF,
11139 PrePostActionTy &) {
11140 assert(Info.isValid() && "Invalid data environment closing arguments.")((void)0);
11141
11142 llvm::Value *BasePointersArrayArg = nullptr;
11143 llvm::Value *PointersArrayArg = nullptr;
11144 llvm::Value *SizesArrayArg = nullptr;
11145 llvm::Value *MapTypesArrayArg = nullptr;
11146 llvm::Value *MapNamesArrayArg = nullptr;
11147 llvm::Value *MappersArrayArg = nullptr;
11148 emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
11149 SizesArrayArg, MapTypesArrayArg,
11150 MapNamesArrayArg, MappersArrayArg, Info,
11151 {/*ForEndCall=*/true});
11152
11153 // Emit device ID if any.
11154 llvm::Value *DeviceID = nullptr;
11155 if (Device) {
11156 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
11157 CGF.Int64Ty, /*isSigned=*/true);
11158 } else {
11159 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
11160 }
11161
11162 // Emit the number of elements in the offloading arrays.
11163 llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
11164
11165 // Source location for the ident struct
11166 llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
11167
11168 llvm::Value *OffloadingArgs[] = {RTLoc,
11169 DeviceID,
11170 PointerNum,
11171 BasePointersArrayArg,
11172 PointersArrayArg,
11173 SizesArrayArg,
11174 MapTypesArrayArg,
11175 MapNamesArrayArg,
11176 MappersArrayArg};
11177 CGF.EmitRuntimeCall(
11178 OMPBuilder.getOrCreateRuntimeFunction(
11179 CGM.getModule(), OMPRTL___tgt_target_data_end_mapper),
11180 OffloadingArgs);
11181 };
11182
11183 // If we need device pointer privatization, we need to emit the body of the
11184 // region with no privatization in the 'else' branch of the conditional.
11185 // Otherwise, we don't have to do anything.
11186 auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
11187 PrePostActionTy &) {
11188 if (!Info.CaptureDeviceAddrMap.empty()) {
11189 CodeGen.setAction(NoPrivAction);
11190 CodeGen(CGF);
11191 }
11192 };
11193
11194 // We don't have to do anything to close the region if the if clause evaluates
11195 // to false.
11196 auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
11197
11198 if (IfCond) {
11199 emitIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
11200 } else {
11201 RegionCodeGenTy RCG(BeginThenGen);
11202 RCG(CGF);
11203 }
11204
11205 // If we don't require privatization of device pointers, we emit the body in
11206 // between the runtime calls. This avoids duplicating the body code.
11207 if (Info.CaptureDeviceAddrMap.empty()) {
11208 CodeGen.setAction(NoPrivAction);
11209 CodeGen(CGF);
11210 }
11211
11212 if (IfCond) {
11213 emitIfClause(CGF, IfCond, EndThenGen, EndElseGen);
11214 } else {
11215 RegionCodeGenTy RCG(EndThenGen);
11216 RCG(CGF);
11217 }
11218}
11219
11220void CGOpenMPRuntime::emitTargetDataStandAloneCall(
11221 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
11222 const Expr *Device) {
11223 if (!CGF.HaveInsertPoint())
11224 return;
11225
11226 assert((isa<OMPTargetEnterDataDirective>(D) ||((void)0)
11227 isa<OMPTargetExitDataDirective>(D) ||((void)0)
11228 isa<OMPTargetUpdateDirective>(D)) &&((void)0)
11229 "Expecting either target enter, exit data, or update directives.")((void)0);
11230
11231 CodeGenFunction::OMPTargetDataInfo InputInfo;
11232 llvm::Value *MapTypesArray = nullptr;
11233 llvm::Value *MapNamesArray = nullptr;
11234 // Generate the code for the opening of the data environment.
11235 auto &&ThenGen = [this, &D, Device, &InputInfo, &MapTypesArray,
11236 &MapNamesArray](CodeGenFunction &CGF, PrePostActionTy &) {
11237 // Emit device ID if any.
11238 llvm::Value *DeviceID = nullptr;
11239 if (Device) {
11240 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
11241 CGF.Int64Ty, /*isSigned=*/true);
11242 } else {
11243 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
11244 }
11245
11246 // Emit the number of elements in the offloading arrays.
11247 llvm::Constant *PointerNum =
11248 CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
11249
11250 // Source location for the ident struct
11251 llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
11252
11253 llvm::Value *OffloadingArgs[] = {RTLoc,
11254 DeviceID,
11255 PointerNum,
11256 InputInfo.BasePointersArray.getPointer(),
11257 InputInfo.PointersArray.getPointer(),
11258 InputInfo.SizesArray.getPointer(),
11259 MapTypesArray,
11260 MapNamesArray,
11261 InputInfo.MappersArray.getPointer()};
11262
11263 // Select the right runtime function call for each standalone
11264 // directive.
11265 const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
11266 RuntimeFunction RTLFn;
11267 switch (D.getDirectiveKind()) {
11268 case OMPD_target_enter_data:
11269 RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait_mapper
11270 : OMPRTL___tgt_target_data_begin_mapper;
11271 break;
11272 case OMPD_target_exit_data:
11273 RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait_mapper
11274 : OMPRTL___tgt_target_data_end_mapper;
11275 break;
11276 case OMPD_target_update:
11277 RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait_mapper
11278 : OMPRTL___tgt_target_data_update_mapper;
11279 break;
11280 case OMPD_parallel:
11281 case OMPD_for:
11282 case OMPD_parallel_for:
11283 case OMPD_parallel_master:
11284 case OMPD_parallel_sections:
11285 case OMPD_for_simd:
11286 case OMPD_parallel_for_simd:
11287 case OMPD_cancel:
11288 case OMPD_cancellation_point:
11289 case OMPD_ordered:
11290 case OMPD_threadprivate:
11291 case OMPD_allocate:
11292 case OMPD_task:
11293 case OMPD_simd:
11294 case OMPD_tile:
11295 case OMPD_unroll:
11296 case OMPD_sections:
11297 case OMPD_section:
11298 case OMPD_single:
11299 case OMPD_master:
11300 case OMPD_critical:
11301 case OMPD_taskyield:
11302 case OMPD_barrier:
11303 case OMPD_taskwait:
11304 case OMPD_taskgroup:
11305 case OMPD_atomic:
11306 case OMPD_flush:
11307 case OMPD_depobj:
11308 case OMPD_scan:
11309 case OMPD_teams:
11310 case OMPD_target_data:
11311 case OMPD_distribute:
11312 case OMPD_distribute_simd:
11313 case OMPD_distribute_parallel_for:
11314 case OMPD_distribute_parallel_for_simd:
11315 case OMPD_teams_distribute:
11316 case OMPD_teams_distribute_simd:
11317 case OMPD_teams_distribute_parallel_for:
11318 case OMPD_teams_distribute_parallel_for_simd:
11319 case OMPD_declare_simd:
11320 case OMPD_declare_variant:
11321 case OMPD_begin_declare_variant:
11322 case OMPD_end_declare_variant:
11323 case OMPD_declare_target:
11324 case OMPD_end_declare_target:
11325 case OMPD_declare_reduction:
11326 case OMPD_declare_mapper:
11327 case OMPD_taskloop:
11328 case OMPD_taskloop_simd:
11329 case OMPD_master_taskloop:
11330 case OMPD_master_taskloop_simd:
11331 case OMPD_parallel_master_taskloop:
11332 case OMPD_parallel_master_taskloop_simd:
11333 case OMPD_target:
11334 case OMPD_target_simd:
11335 case OMPD_target_teams_distribute:
11336 case OMPD_target_teams_distribute_simd:
11337 case OMPD_target_teams_distribute_parallel_for:
11338 case OMPD_target_teams_distribute_parallel_for_simd:
11339 case OMPD_target_teams:
11340 case OMPD_target_parallel:
11341 case OMPD_target_parallel_for:
11342 case OMPD_target_parallel_for_simd:
11343 case OMPD_requires:
11344 case OMPD_unknown:
11345 default:
11346 llvm_unreachable("Unexpected standalone target data directive.")__builtin_unreachable();
11347 break;
11348 }
11349 CGF.EmitRuntimeCall(
11350 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), RTLFn),
11351 OffloadingArgs);
11352 };
11353
11354 auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
11355 &MapNamesArray](CodeGenFunction &CGF,
11356 PrePostActionTy &) {
11357 // Fill up the arrays with all the mapped variables.
11358 MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
11359
11360 // Get map clause information.
11361 MappableExprsHandler MEHandler(D, CGF);
11362 MEHandler.generateAllInfo(CombinedInfo);
11363
11364 TargetDataInfo Info;
11365 // Fill up the arrays and create the arguments.
11366 emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
11367 /*IsNonContiguous=*/true);
11368 bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
11369 D.hasClausesOfKind<OMPNowaitClause>();
11370 emitOffloadingArraysArgument(
11371 CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
11372 Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
11373 {/*ForEndTask=*/false});
11374 InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
11375 InputInfo.BasePointersArray =
11376 Address(Info.BasePointersArray, CGM.getPointerAlign());
11377 InputInfo.PointersArray =
11378 Address(Info.PointersArray, CGM.getPointerAlign());
11379 InputInfo.SizesArray =
11380 Address(Info.SizesArray, CGM.getPointerAlign());
11381 InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
11382 MapTypesArray = Info.MapTypesArray;
11383 MapNamesArray = Info.MapNamesArray;
11384 if (RequiresOuterTask)
11385 CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
11386 else
11387 emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
11388 };
11389
11390 if (IfCond) {
11391 emitIfClause(CGF, IfCond, TargetThenGen,
11392 [](CodeGenFunction &CGF, PrePostActionTy &) {});
11393 } else {
11394 RegionCodeGenTy ThenRCG(TargetThenGen);
11395 ThenRCG(CGF);
11396 }
11397}
11398
11399namespace {
11400 /// Kind of parameter in a function with 'declare simd' directive.
11401 enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
11402 /// Attribute set of the parameter.
11403 struct ParamAttrTy {
11404 ParamKindTy Kind = Vector;
11405 llvm::APSInt StrideOrArg;
11406 llvm::APSInt Alignment;
11407 };
11408} // namespace
11409
11410static unsigned evaluateCDTSize(const FunctionDecl *FD,
11411 ArrayRef<ParamAttrTy> ParamAttrs) {
11412 // Every vector variant of a SIMD-enabled function has a vector length (VLEN).
11413 // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument
11414 // of that clause. The VLEN value must be power of 2.
11415 // In other case the notion of the function`s "characteristic data type" (CDT)
11416 // is used to compute the vector length.
11417 // CDT is defined in the following order:
11418 // a) For non-void function, the CDT is the return type.
11419 // b) If the function has any non-uniform, non-linear parameters, then the
11420 // CDT is the type of the first such parameter.
11421 // c) If the CDT determined by a) or b) above is struct, union, or class
11422 // type which is pass-by-value (except for the type that maps to the
11423 // built-in complex data type), the characteristic data type is int.
11424 // d) If none of the above three cases is applicable, the CDT is int.
11425 // The VLEN is then determined based on the CDT and the size of vector
11426 // register of that ISA for which current vector version is generated. The
11427 // VLEN is computed using the formula below:
11428 // VLEN = sizeof(vector_register) / sizeof(CDT),
11429 // where vector register size specified in section 3.2.1 Registers and the
11430 // Stack Frame of original AMD64 ABI document.
11431 QualType RetType = FD->getReturnType();
11432 if (RetType.isNull())
21
Calling 'QualType::isNull'
27
Returning from 'QualType::isNull'
28
Taking true branch
11433 return 0;
29
Returning zero
11434 ASTContext &C = FD->getASTContext();
11435 QualType CDT;
11436 if (!RetType.isNull() && !RetType->isVoidType()) {
11437 CDT = RetType;
11438 } else {
11439 unsigned Offset = 0;
11440 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
11441 if (ParamAttrs[Offset].Kind == Vector)
11442 CDT = C.getPointerType(C.getRecordType(MD->getParent()));
11443 ++Offset;
11444 }
11445 if (CDT.isNull()) {
11446 for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
11447 if (ParamAttrs[I + Offset].Kind == Vector) {
11448 CDT = FD->getParamDecl(I)->getType();
11449 break;
11450 }
11451 }
11452 }
11453 }
11454 if (CDT.isNull())
11455 CDT = C.IntTy;
11456 CDT = CDT->getCanonicalTypeUnqualified();
11457 if (CDT->isRecordType() || CDT->isUnionType())
11458 CDT = C.IntTy;
11459 return C.getTypeSize(CDT);
11460}
11461
11462static void
11463emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
11464 const llvm::APSInt &VLENVal,
11465 ArrayRef<ParamAttrTy> ParamAttrs,
11466 OMPDeclareSimdDeclAttr::BranchStateTy State) {
11467 struct ISADataTy {
11468 char ISA;
11469 unsigned VecRegSize;
11470 };
11471 ISADataTy ISAData[] = {
11472 {
11473 'b', 128
11474 }, // SSE
11475 {
11476 'c', 256
11477 }, // AVX
11478 {
11479 'd', 256
11480 }, // AVX2
11481 {
11482 'e', 512
11483 }, // AVX512
11484 };
11485 llvm::SmallVector<char, 2> Masked;
11486 switch (State) {
12
Control jumps to 'case BS_Inbranch:' at line 11494
11487 case OMPDeclareSimdDeclAttr::BS_Undefined:
11488 Masked.push_back('N');
11489 Masked.push_back('M');
11490 break;
11491 case OMPDeclareSimdDeclAttr::BS_Notinbranch:
11492 Masked.push_back('N');
11493 break;
11494 case OMPDeclareSimdDeclAttr::BS_Inbranch:
11495 Masked.push_back('M');
11496 break;
13
Execution continues on line 11498
11497 }
11498 for (char Mask : Masked) {
14
Assuming '__begin1' is not equal to '__end1'
11499 for (const ISADataTy &Data : ISAData) {
11500 SmallString<256> Buffer;
11501 llvm::raw_svector_ostream Out(Buffer);
11502 Out << "_ZGV" << Data.ISA << Mask;
11503 if (!VLENVal) {
15
Calling 'APInt::operator!'
18
Returning from 'APInt::operator!'
19
Taking true branch
11504 unsigned NumElts = evaluateCDTSize(FD, ParamAttrs);
20
Calling 'evaluateCDTSize'
30
Returning from 'evaluateCDTSize'
31
'NumElts' initialized to 0
11505 assert(NumElts && "Non-zero simdlen/cdtsize expected")((void)0);
11506 Out << llvm::APSInt::getUnsigned(Data.VecRegSize / NumElts);
32
Division by zero
11507 } else {
11508 Out << VLENVal;
11509 }
11510 for (const ParamAttrTy &ParamAttr : ParamAttrs) {
11511 switch (ParamAttr.Kind){
11512 case LinearWithVarStride:
11513 Out << 's' << ParamAttr.StrideOrArg;
11514 break;
11515 case Linear:
11516 Out << 'l';
11517 if (ParamAttr.StrideOrArg != 1)
11518 Out << ParamAttr.StrideOrArg;
11519 break;
11520 case Uniform:
11521 Out << 'u';
11522 break;
11523 case Vector:
11524 Out << 'v';
11525 break;
11526 }
11527 if (!!ParamAttr.Alignment)
11528 Out << 'a' << ParamAttr.Alignment;
11529 }
11530 Out << '_' << Fn->getName();
11531 Fn->addFnAttr(Out.str());
11532 }
11533 }
11534}
11535
11536// This are the Functions that are needed to mangle the name of the
11537// vector functions generated by the compiler, according to the rules
11538// defined in the "Vector Function ABI specifications for AArch64",
11539// available at
11540// https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi.
11541
11542/// Maps To Vector (MTV), as defined in 3.1.1 of the AAVFABI.
11543///
11544/// TODO: Need to implement the behavior for reference marked with a
11545/// var or no linear modifiers (1.b in the section). For this, we
11546/// need to extend ParamKindTy to support the linear modifiers.
11547static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
11548 QT = QT.getCanonicalType();
11549
11550 if (QT->isVoidType())
11551 return false;
11552
11553 if (Kind == ParamKindTy::Uniform)
11554 return false;
11555
11556 if (Kind == ParamKindTy::Linear)
11557 return false;
11558
11559 // TODO: Handle linear references with modifiers
11560
11561 if (Kind == ParamKindTy::LinearWithVarStride)
11562 return false;
11563
11564 return true;
11565}
11566
11567/// Pass By Value (PBV), as defined in 3.1.2 of the AAVFABI.
11568static bool getAArch64PBV(QualType QT, ASTContext &C) {
11569 QT = QT.getCanonicalType();
11570 unsigned Size = C.getTypeSize(QT);
11571
11572 // Only scalars and complex within 16 bytes wide set PVB to true.
11573 if (Size != 8 && Size != 16 && Size != 32 && Size != 64 && Size != 128)
11574 return false;
11575
11576 if (QT->isFloatingType())
11577 return true;
11578
11579 if (QT->isIntegerType())
11580 return true;
11581
11582 if (QT->isPointerType())
11583 return true;
11584
11585 // TODO: Add support for complex types (section 3.1.2, item 2).
11586
11587 return false;
11588}
11589
11590/// Computes the lane size (LS) of a return type or of an input parameter,
11591/// as defined by `LS(P)` in 3.2.1 of the AAVFABI.
11592/// TODO: Add support for references, section 3.2.1, item 1.
11593static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) {
11594 if (!getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) {
11595 QualType PTy = QT.getCanonicalType()->getPointeeType();
11596 if (getAArch64PBV(PTy, C))
11597 return C.getTypeSize(PTy);
11598 }
11599 if (getAArch64PBV(QT, C))
11600 return C.getTypeSize(QT);
11601
11602 return C.getTypeSize(C.getUIntPtrType());
11603}
11604
11605// Get Narrowest Data Size (NDS) and Widest Data Size (WDS) from the
11606// signature of the scalar function, as defined in 3.2.2 of the
11607// AAVFABI.
11608static std::tuple<unsigned, unsigned, bool>
11609getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) {
11610 QualType RetType = FD->getReturnType().getCanonicalType();
11611
11612 ASTContext &C = FD->getASTContext();
11613
11614 bool OutputBecomesInput = false;
11615
11616 llvm::SmallVector<unsigned, 8> Sizes;
11617 if (!RetType->isVoidType()) {
11618 Sizes.push_back(getAArch64LS(RetType, ParamKindTy::Vector, C));
11619 if (!getAArch64PBV(RetType, C) && getAArch64MTV(RetType, {}))
11620 OutputBecomesInput = true;
11621 }
11622 for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
11623 QualType QT = FD->getParamDecl(I)->getType().getCanonicalType();
11624 Sizes.push_back(getAArch64LS(QT, ParamAttrs[I].Kind, C));
11625 }
11626
11627 assert(!Sizes.empty() && "Unable to determine NDS and WDS.")((void)0);
11628 // The LS of a function parameter / return value can only be a power
11629 // of 2, starting from 8 bits, up to 128.
11630 assert(std::all_of(Sizes.begin(), Sizes.end(),((void)0)
11631 [](unsigned Size) {((void)0)
11632 return Size == 8 || Size == 16 || Size == 32 ||((void)0)
11633 Size == 64 || Size == 128;((void)0)
11634 }) &&((void)0)
11635 "Invalid size")((void)0);
11636
11637 return std::make_tuple(*std::min_element(std::begin(Sizes), std::end(Sizes)),
11638 *std::max_element(std::begin(Sizes), std::end(Sizes)),
11639 OutputBecomesInput);
11640}
11641
11642/// Mangle the parameter part of the vector function name according to
11643/// their OpenMP classification. The mangling function is defined in
11644/// section 3.5 of the AAVFABI.
11645static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
11646 SmallString<256> Buffer;
11647 llvm::raw_svector_ostream Out(Buffer);
11648 for (const auto &ParamAttr : ParamAttrs) {
11649 switch (ParamAttr.Kind) {
11650 case LinearWithVarStride:
11651 Out << "ls" << ParamAttr.StrideOrArg;
11652 break;
11653 case Linear:
11654 Out << 'l';
11655 // Don't print the step value if it is not present or if it is
11656 // equal to 1.
11657 if (ParamAttr.StrideOrArg != 1)
11658 Out << ParamAttr.StrideOrArg;
11659 break;
11660 case Uniform:
11661 Out << 'u';
11662 break;
11663 case Vector:
11664 Out << 'v';
11665 break;
11666 }
11667
11668 if (!!ParamAttr.Alignment)
11669 Out << 'a' << ParamAttr.Alignment;
11670 }
11671
11672 return std::string(Out.str());
11673}
11674
11675// Function used to add the attribute. The parameter `VLEN` is
11676// templated to allow the use of "x" when targeting scalable functions
11677// for SVE.
11678template <typename T>
11679static void addAArch64VectorName(T VLEN, StringRef LMask, StringRef Prefix,
11680 char ISA, StringRef ParSeq,
11681 StringRef MangledName, bool OutputBecomesInput,
11682 llvm::Function *Fn) {
11683 SmallString<256> Buffer;
11684 llvm::raw_svector_ostream Out(Buffer);
11685 Out << Prefix << ISA << LMask << VLEN;
11686 if (OutputBecomesInput)
11687 Out << "v";
11688 Out << ParSeq << "_" << MangledName;
11689 Fn->addFnAttr(Out.str());
11690}
11691
11692// Helper function to generate the Advanced SIMD names depending on
11693// the value of the NDS when simdlen is not present.
11694static void addAArch64AdvSIMDNDSNames(unsigned NDS, StringRef Mask,
11695 StringRef Prefix, char ISA,
11696 StringRef ParSeq, StringRef MangledName,
11697 bool OutputBecomesInput,
11698 llvm::Function *Fn) {
11699 switch (NDS) {
11700 case 8:
11701 addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
11702 OutputBecomesInput, Fn);
11703 addAArch64VectorName(16, Mask, Prefix, ISA, ParSeq, MangledName,
11704 OutputBecomesInput, Fn);
11705 break;
11706 case 16:
11707 addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
11708 OutputBecomesInput, Fn);
11709 addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
11710 OutputBecomesInput, Fn);
11711 break;
11712 case 32:
11713 addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
11714 OutputBecomesInput, Fn);
11715 addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
11716 OutputBecomesInput, Fn);
11717 break;
11718 case 64:
11719 case 128:
11720 addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
11721 OutputBecomesInput, Fn);
11722 break;
11723 default:
11724 llvm_unreachable("Scalar type is too wide.")__builtin_unreachable();
11725 }
11726}
11727
11728/// Emit vector function attributes for AArch64, as defined in the AAVFABI.
11729static void emitAArch64DeclareSimdFunction(
11730 CodeGenModule &CGM, const FunctionDecl *FD, unsigned UserVLEN,
11731 ArrayRef<ParamAttrTy> ParamAttrs,
11732 OMPDeclareSimdDeclAttr::BranchStateTy State, StringRef MangledName,
11733 char ISA, unsigned VecRegSize, llvm::Function *Fn, SourceLocation SLoc) {
11734
11735 // Get basic data for building the vector signature.
11736 const auto Data = getNDSWDS(FD, ParamAttrs);
11737 const unsigned NDS = std::get<0>(Data);
11738 const unsigned WDS = std::get<1>(Data);
11739 const bool OutputBecomesInput = std::get<2>(Data);
11740
11741 // Check the values provided via `simdlen` by the user.
11742 // 1. A `simdlen(1)` doesn't produce vector signatures,
11743 if (UserVLEN == 1) {
11744 unsigned DiagID = CGM.getDiags().getCustomDiagID(
11745 DiagnosticsEngine::Warning,
11746 "The clause simdlen(1) has no effect when targeting aarch64.");
11747 CGM.getDiags().Report(SLoc, DiagID);
11748 return;
11749 }
11750
11751 // 2. Section 3.3.1, item 1: user input must be a power of 2 for
11752 // Advanced SIMD output.
11753 if (ISA == 'n' && UserVLEN && !llvm::isPowerOf2_32(UserVLEN)) {
11754 unsigned DiagID = CGM.getDiags().getCustomDiagID(
11755 DiagnosticsEngine::Warning, "The value specified in simdlen must be a "
11756 "power of 2 when targeting Advanced SIMD.");
11757 CGM.getDiags().Report(SLoc, DiagID);
11758 return;
11759 }
11760
11761 // 3. Section 3.4.1. SVE fixed lengh must obey the architectural
11762 // limits.
11763 if (ISA == 's' && UserVLEN != 0) {
11764 if ((UserVLEN * WDS > 2048) || (UserVLEN * WDS % 128 != 0)) {
11765 unsigned DiagID = CGM.getDiags().getCustomDiagID(
11766 DiagnosticsEngine::Warning, "The clause simdlen must fit the %0-bit "
11767 "lanes in the architectural constraints "
11768 "for SVE (min is 128-bit, max is "
11769 "2048-bit, by steps of 128-bit)");
11770 CGM.getDiags().Report(SLoc, DiagID) << WDS;
11771 return;
11772 }
11773 }
11774
11775 // Sort out parameter sequence.
11776 const std::string ParSeq = mangleVectorParameters(ParamAttrs);
11777 StringRef Prefix = "_ZGV";
11778 // Generate simdlen from user input (if any).
11779 if (UserVLEN) {
11780 if (ISA == 's') {
11781 // SVE generates only a masked function.
11782 addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
11783 OutputBecomesInput, Fn);
11784 } else {
11785 assert(ISA == 'n' && "Expected ISA either 's' or 'n'.")((void)0);
11786 // Advanced SIMD generates one or two functions, depending on
11787 // the `[not]inbranch` clause.
11788 switch (State) {
11789 case OMPDeclareSimdDeclAttr::BS_Undefined:
11790 addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
11791 OutputBecomesInput, Fn);
11792 addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
11793 OutputBecomesInput, Fn);
11794 break;
11795 case OMPDeclareSimdDeclAttr::BS_Notinbranch:
11796 addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
11797 OutputBecomesInput, Fn);
11798 break;
11799 case OMPDeclareSimdDeclAttr::BS_Inbranch:
11800 addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
11801 OutputBecomesInput, Fn);
11802 break;
11803 }
11804 }
11805 } else {
11806 // If no user simdlen is provided, follow the AAVFABI rules for
11807 // generating the vector length.
11808 if (ISA == 's') {
11809 // SVE, section 3.4.1, item 1.
11810 addAArch64VectorName("x", "M", Prefix, ISA, ParSeq, MangledName,
11811 OutputBecomesInput, Fn);
11812 } else {
11813 assert(ISA == 'n' && "Expected ISA either 's' or 'n'.")((void)0);
11814 // Advanced SIMD, Section 3.3.1 of the AAVFABI, generates one or
11815 // two vector names depending on the use of the clause
11816 // `[not]inbranch`.
11817 switch (State) {
11818 case OMPDeclareSimdDeclAttr::BS_Undefined:
11819 addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
11820 OutputBecomesInput, Fn);
11821 addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
11822 OutputBecomesInput, Fn);
11823 break;
11824 case OMPDeclareSimdDeclAttr::BS_Notinbranch:
11825 addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
11826 OutputBecomesInput, Fn);
11827 break;
11828 case OMPDeclareSimdDeclAttr::BS_Inbranch:
11829 addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
11830 OutputBecomesInput, Fn);
11831 break;
11832 }
11833 }
11834 }
11835}
11836
11837void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
11838 llvm::Function *Fn) {
11839 ASTContext &C = CGM.getContext();
11840 FD = FD->getMostRecentDecl();
11841 // Map params to their positions in function decl.
11842 llvm::DenseMap<const Decl *, unsigned> ParamPositions;
11843 if (isa<CXXMethodDecl>(FD))
1
Assuming 'FD' is not a 'CXXMethodDecl'
2
Taking false branch
11844 ParamPositions.try_emplace(FD, 0);
11845 unsigned ParamPos = ParamPositions.size();
11846 for (const ParmVarDecl *P : FD->parameters()) {
3
Assuming '__begin1' is equal to '__end1'
11847 ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
11848 ++ParamPos;
11849 }
11850 while (FD) {
4
Loop condition is true. Entering loop body
11851 for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
11852 llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
11853 // Mark uniform parameters.
11854 for (const Expr *E : Attr->uniforms()) {
5
Assuming '__begin3' is equal to '__end3'
11855 E = E->IgnoreParenImpCasts();
11856 unsigned Pos;
11857 if (isa<CXXThisExpr>(E)) {
11858 Pos = ParamPositions[FD];
11859 } else {
11860 const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
11861 ->getCanonicalDecl();
11862 Pos = ParamPositions[PVD];
11863 }
11864 ParamAttrs[Pos].Kind = Uniform;
11865 }
11866 // Get alignment info.
11867 auto NI = Attr->alignments_begin();
11868 for (const Expr *E : Attr->aligneds()) {
6
Assuming '__begin3' is equal to '__end3'
11869 E = E->IgnoreParenImpCasts();
11870 unsigned Pos;
11871 QualType ParmTy;
11872 if (isa<CXXThisExpr>(E)) {
11873 Pos = ParamPositions[FD];
11874 ParmTy = E->getType();
11875 } else {
11876 const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
11877 ->getCanonicalDecl();
11878 Pos = ParamPositions[PVD];
11879 ParmTy = PVD->getType();
11880 }
11881 ParamAttrs[Pos].Alignment =
11882 (*NI)
11883 ? (*NI)->EvaluateKnownConstInt(C)
11884 : llvm::APSInt::getUnsigned(
11885 C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
11886 .getQuantity());
11887 ++NI;
11888 }
11889 // Mark linear parameters.
11890 auto SI = Attr->steps_begin();
11891 auto MI = Attr->modifiers_begin();
11892 for (const Expr *E : Attr->linears()) {
7
Assuming '__begin3' is equal to '__end3'
11893 E = E->IgnoreParenImpCasts();
11894 unsigned Pos;
11895 // Rescaling factor needed to compute the linear parameter
11896 // value in the mangled name.
11897 unsigned PtrRescalingFactor = 1;
11898 if (isa<CXXThisExpr>(E)) {
11899 Pos = ParamPositions[FD];
11900 } else {
11901 const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
11902 ->getCanonicalDecl();
11903 Pos = ParamPositions[PVD];
11904 if (auto *P = dyn_cast<PointerType>(PVD->getType()))
11905 PtrRescalingFactor = CGM.getContext()
11906 .getTypeSizeInChars(P->getPointeeType())
11907 .getQuantity();
11908 }
11909 ParamAttrTy &ParamAttr = ParamAttrs[Pos];
11910 ParamAttr.Kind = Linear;
11911 // Assuming a stride of 1, for `linear` without modifiers.
11912 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1);
11913 if (*SI) {
11914 Expr::EvalResult Result;
11915 if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
11916 if (const auto *DRE =
11917 cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
11918 if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
11919 ParamAttr.Kind = LinearWithVarStride;
11920 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
11921 ParamPositions[StridePVD->getCanonicalDecl()]);
11922 }
11923 }
11924 } else {
11925 ParamAttr.StrideOrArg = Result.Val.getInt();
11926 }
11927 }
11928 // If we are using a linear clause on a pointer, we need to
11929 // rescale the value of linear_step with the byte size of the
11930 // pointee type.
11931 if (Linear == ParamAttr.Kind)
11932 ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor;
11933 ++SI;
11934 ++MI;
11935 }
11936 llvm::APSInt VLENVal;
11937 SourceLocation ExprLoc;
11938 const Expr *VLENExpr = Attr->getSimdlen();
11939 if (VLENExpr) {
8
Assuming 'VLENExpr' is null
9
Taking false branch
11940 VLENVal = VLENExpr->EvaluateKnownConstInt(C);
11941 ExprLoc = VLENExpr->getExprLoc();
11942 }
11943 OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
11944 if (CGM.getTriple().isX86()) {
10
Taking true branch
11945 emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
11
Calling 'emitX86DeclareSimdFunction'
11946 } else if (CGM.getTriple().getArch() == llvm::Triple::aarch64) {
11947 unsigned VLEN = VLENVal.getExtValue();
11948 StringRef MangledName = Fn->getName();
11949 if (CGM.getTarget().hasFeature("sve"))
11950 emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
11951 MangledName, 's', 128, Fn, ExprLoc);
11952 if (CGM.getTarget().hasFeature("neon"))
11953 emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
11954 MangledName, 'n', 128, Fn, ExprLoc);
11955 }
11956 }
11957 FD = FD->getPreviousDecl();
11958 }
11959}
11960
11961namespace {
11962/// Cleanup action for doacross support.
11963class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
11964public:
11965 static const int DoacrossFinArgs = 2;
11966
11967private:
11968 llvm::FunctionCallee RTLFn;
11969 llvm::Value *Args[DoacrossFinArgs];
11970
11971public:
11972 DoacrossCleanupTy(llvm::FunctionCallee RTLFn,
11973 ArrayRef<llvm::Value *> CallArgs)
11974 : RTLFn(RTLFn) {
11975 assert(CallArgs.size() == DoacrossFinArgs)((void)0);
11976 std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
11977 }
11978 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
11979 if (!CGF.HaveInsertPoint())
11980 return;
11981 CGF.EmitRuntimeCall(RTLFn, Args);
11982 }
11983};
11984} // namespace
11985
11986void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
11987 const OMPLoopDirective &D,
11988 ArrayRef<Expr *> NumIterations) {
11989 if (!CGF.HaveInsertPoint())
11990 return;
11991
11992 ASTContext &C = CGM.getContext();
11993 QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
11994 RecordDecl *RD;
11995 if (KmpDimTy.isNull()) {
11996 // Build struct kmp_dim { // loop bounds info casted to kmp_int64
11997 // kmp_int64 lo; // lower
11998 // kmp_int64 up; // upper
11999 // kmp_int64 st; // stride
12000 // };
12001 RD = C.buildImplicitRecord("kmp_dim");
12002 RD->startDefinition();
12003 addFieldToRecordDecl(C, RD, Int64Ty);
12004 addFieldToRecordDecl(C, RD, Int64Ty);
12005 addFieldToRecordDecl(C, RD, Int64Ty);
12006 RD->completeDefinition();
12007 KmpDimTy = C.getRecordType(RD);
12008 } else {
12009 RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
12010 }
12011 llvm::APInt Size(/*numBits=*/32, NumIterations.size());
12012 QualType ArrayTy =
12013 C.getConstantArrayType(KmpDimTy, Size, nullptr, ArrayType::Normal, 0);
12014
12015 Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
12016 CGF.EmitNullInitialization(DimsAddr, ArrayTy);
12017 enum { LowerFD = 0, UpperFD, StrideFD };
12018 // Fill dims with data.
12019 for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
12020 LValue DimsLVal = CGF.MakeAddrLValue(
12021 CGF.Builder.CreateConstArrayGEP(DimsAddr, I), KmpDimTy);
12022 // dims.upper = num_iterations;
12023 LValue UpperLVal = CGF.EmitLValueForField(
12024 DimsLVal, *std::next(RD->field_begin(), UpperFD));
12025 llvm::Value *NumIterVal = CGF.EmitScalarConversion(
12026 CGF.EmitScalarExpr(NumIterations[I]), NumIterations[I]->getType(),
12027 Int64Ty, NumIterations[I]->getExprLoc());
12028 CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
12029 // dims.stride = 1;
12030 LValue StrideLVal = CGF.EmitLValueForField(
12031 DimsLVal, *std::next(RD->field_begin(), StrideFD));
12032 CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
12033 StrideLVal);
12034 }
12035
12036 // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
12037 // kmp_int32 num_dims, struct kmp_dim * dims);
12038 llvm::Value *Args[] = {
12039 emitUpdateLocation(CGF, D.getBeginLoc()),
12040 getThreadID(CGF, D.getBeginLoc()),
12041 llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
12042 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
12043 CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(),
12044 CGM.VoidPtrTy)};
12045
12046 llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
12047 CGM.getModule(), OMPRTL___kmpc_doacross_init);
12048 CGF.EmitRuntimeCall(RTLFn, Args);
12049 llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
12050 emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())};
12051 llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
12052 CGM.getModule(), OMPRTL___kmpc_doacross_fini);
12053 CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
12054 llvm::makeArrayRef(FiniArgs));
12055}
12056
12057void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
12058 const OMPDependClause *C) {
12059 QualType Int64Ty =
12060 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
12061 llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
12062 QualType ArrayTy = CGM.getContext().getConstantArrayType(
12063 Int64Ty, Size, nullptr, ArrayType::Normal, 0);
12064 Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
12065 for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
12066 const Expr *CounterVal = C->getLoopData(I);
12067 assert(CounterVal)((void)0);
12068 llvm::Value *CntVal = CGF.EmitScalarConversion(
12069 CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
12070 CounterVal->getExprLoc());
12071 CGF.EmitStoreOfScalar(CntVal, CGF.Builder.CreateConstArrayGEP(CntAddr, I),
12072 /*Volatile=*/false, Int64Ty);
12073 }
12074 llvm::Value *Args[] = {
12075 emitUpdateLocation(CGF, C->getBeginLoc()),
12076 getThreadID(CGF, C->getBeginLoc()),
12077 CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
12078 llvm::FunctionCallee RTLFn;
12079 if (C->getDependencyKind() == OMPC_DEPEND_source) {
12080 RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
12081 OMPRTL___kmpc_doacross_post);
12082 } else {
12083 assert(C->getDependencyKind() == OMPC_DEPEND_sink)((void)0);
12084 RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
12085 OMPRTL___kmpc_doacross_wait);
12086 }
12087 CGF.EmitRuntimeCall(RTLFn, Args);
12088}
12089
12090void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
12091 llvm::FunctionCallee Callee,
12092 ArrayRef<llvm::Value *> Args) const {
12093 assert(Loc.isValid() && "Outlined function call location must be valid.")((void)0);
12094 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
12095
12096 if (auto *Fn = dyn_cast<llvm::Function>(Callee.getCallee())) {
12097 if (Fn->doesNotThrow()) {
12098 CGF.EmitNounwindRuntimeCall(Fn, Args);
12099 return;
12100 }
12101 }
12102 CGF.EmitRuntimeCall(Callee, Args);
12103}
12104
12105void CGOpenMPRuntime::emitOutlinedFunctionCall(
12106 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
12107 ArrayRef<llvm::Value *> Args) const {
12108 emitCall(CGF, Loc, OutlinedFn, Args);
12109}
12110
12111void CGOpenMPRuntime::emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) {
12112 if (const auto *FD = dyn_cast<FunctionDecl>(D))
12113 if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD))
12114 HasEmittedDeclareTargetRegion = true;
12115}
12116
12117Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
12118 const VarDecl *NativeParam,
12119 const VarDecl *TargetParam) const {
12120 return CGF.GetAddrOfLocalVar(NativeParam);
12121}
12122
12123Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
12124 const VarDecl *VD) {
12125 if (!VD)
12126 return Address::invalid();
12127 Address UntiedAddr = Address::invalid();
12128 Address UntiedRealAddr = Address::invalid();
12129 auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
12130 if (It != FunctionToUntiedTaskStackMap.end()) {
12131 const UntiedLocalVarsAddressesMap &UntiedData =
12132 UntiedLocalVarsStack[It->second];
12133 auto I = UntiedData.find(VD);
12134 if (I != UntiedData.end()) {
12135 UntiedAddr = I->second.first;
12136 UntiedRealAddr = I->second.second;
12137 }
12138 }
12139 const VarDecl *CVD = VD->getCanonicalDecl();
12140 if (CVD->hasAttr<OMPAllocateDeclAttr>()) {
12141 // Use the default allocation.
12142 if (!isAllocatableDecl(VD))
12143 return UntiedAddr;
12144 llvm::Value *Size;
12145 CharUnits Align = CGM.getContext().getDeclAlign(CVD);
12146 if (CVD->getType()->isVariablyModifiedType()) {
12147 Size = CGF.getTypeSize(CVD->getType());
12148 // Align the size: ((size + align - 1) / align) * align
12149 Size = CGF.Builder.CreateNUWAdd(
12150 Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
12151 Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
12152 Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
12153 } else {
12154 CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
12155 Size = CGM.getSize(Sz.alignTo(Align));
12156 }
12157 llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
12158 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
12159 assert(AA->getAllocator() &&((void)0)
12160 "Expected allocator expression for non-default allocator.")((void)0);
12161 llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
12162 // According to the standard, the original allocator type is a enum
12163 // (integer). Convert to pointer type, if required.
12164 Allocator = CGF.EmitScalarConversion(
12165 Allocator, AA->getAllocator()->getType(), CGF.getContext().VoidPtrTy,
12166 AA->getAllocator()->getExprLoc());
12167 llvm::Value *Args[] = {ThreadID, Size, Allocator};
12168
12169 llvm::Value *Addr =
12170 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
12171 CGM.getModule(), OMPRTL___kmpc_alloc),
12172 Args, getName({CVD->getName(), ".void.addr"}));
12173 llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
12174 CGM.getModule(), OMPRTL___kmpc_free);
12175 QualType Ty = CGM.getContext().getPointerType(CVD->getType());
12176 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
12177 Addr, CGF.ConvertTypeForMem(Ty), getName({CVD->getName(), ".addr"}));
12178 if (UntiedAddr.isValid())
12179 CGF.EmitStoreOfScalar(Addr, UntiedAddr, /*Volatile=*/false, Ty);
12180
12181 // Cleanup action for allocate support.
12182 class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
12183 llvm::FunctionCallee RTLFn;
12184 SourceLocation::UIntTy LocEncoding;
12185 Address Addr;
12186 const Expr *Allocator;
12187
12188 public:
12189 OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
12190 SourceLocation::UIntTy LocEncoding, Address Addr,
12191 const Expr *Allocator)
12192 : RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr),
12193 Allocator(Allocator) {}
12194 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
12195 if (!CGF.HaveInsertPoint())
12196 return;
12197 llvm::Value *Args[3];
12198 Args[0] = CGF.CGM.getOpenMPRuntime().getThreadID(
12199 CGF, SourceLocation::getFromRawEncoding(LocEncoding));
12200 Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
12201 Addr.getPointer(), CGF.VoidPtrTy);
12202 llvm::Value *AllocVal = CGF.EmitScalarExpr(Allocator);
12203 // According to the standard, the original allocator type is a enum
12204 // (integer). Convert to pointer type, if required.
12205 AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
12206 CGF.getContext().VoidPtrTy,
12207 Allocator->getExprLoc());
12208 Args[2] = AllocVal;
12209
12210 CGF.EmitRuntimeCall(RTLFn, Args);
12211 }
12212 };
12213 Address VDAddr =
12214 UntiedRealAddr.isValid() ? UntiedRealAddr : Address(Addr, Align);
12215 CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(
12216 NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(),
12217 VDAddr, AA->getAllocator());
12218 if (UntiedRealAddr.isValid())
12219 if (auto *Region =
12220 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
12221 Region->emitUntiedSwitch(CGF);
12222 return VDAddr;
12223 }
12224 return UntiedAddr;
12225}
12226
12227bool CGOpenMPRuntime::isLocalVarInUntiedTask(CodeGenFunction &CGF,
12228 const VarDecl *VD) const {
12229 auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
12230 if (It == FunctionToUntiedTaskStackMap.end())
12231 return false;
12232 return UntiedLocalVarsStack[It->second].count(VD) > 0;
12233}
12234
12235CGOpenMPRuntime::NontemporalDeclsRAII::NontemporalDeclsRAII(
12236 CodeGenModule &CGM, const OMPLoopDirective &S)
12237 : CGM(CGM), NeedToPush(S.hasClausesOfKind<OMPNontemporalClause>()) {
12238 assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.")((void)0);
12239 if (!NeedToPush)
12240 return;
12241 NontemporalDeclsSet &DS =
12242 CGM.getOpenMPRuntime().NontemporalDeclsStack.emplace_back();
12243 for (const auto *C : S.getClausesOfKind<OMPNontemporalClause>()) {
12244 for (const Stmt *Ref : C->private_refs()) {
12245 const auto *SimpleRefExpr = cast<Expr>(Ref)->IgnoreParenImpCasts();
12246 const ValueDecl *VD;
12247 if (const auto *DRE = dyn_cast<DeclRefExpr>(SimpleRefExpr)) {
12248 VD = DRE->getDecl();
12249 } else {
12250 const auto *ME = cast<MemberExpr>(SimpleRefExpr);
12251 assert((ME->isImplicitCXXThis() ||((void)0)
12252 isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts())) &&((void)0)
12253 "Expected member of current class.")((void)0);
12254 VD = ME->getMemberDecl();
12255 }
12256 DS.insert(VD);
12257 }
12258 }
12259}
12260
12261CGOpenMPRuntime::NontemporalDeclsRAII::~NontemporalDeclsRAII() {
12262 if (!NeedToPush)
12263 return;
12264 CGM.getOpenMPRuntime().NontemporalDeclsStack.pop_back();
12265}
12266
12267CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::UntiedTaskLocalDeclsRAII(
12268 CodeGenFunction &CGF,
12269 const llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
12270 std::pair<Address, Address>> &LocalVars)
12271 : CGM(CGF.CGM), NeedToPush(!LocalVars.empty()) {
12272 if (!NeedToPush)
12273 return;
12274 CGM.getOpenMPRuntime().FunctionToUntiedTaskStackMap.try_emplace(
12275 CGF.CurFn, CGM.getOpenMPRuntime().UntiedLocalVarsStack.size());
12276 CGM.getOpenMPRuntime().UntiedLocalVarsStack.push_back(LocalVars);
12277}
12278
12279CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::~UntiedTaskLocalDeclsRAII() {
12280 if (!NeedToPush)
12281 return;
12282 CGM.getOpenMPRuntime().UntiedLocalVarsStack.pop_back();
12283}
12284
12285bool CGOpenMPRuntime::isNontemporalDecl(const ValueDecl *VD) const {
12286 assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.")((void)0);
12287
12288 return llvm::any_of(
12289 CGM.getOpenMPRuntime().NontemporalDeclsStack,
12290 [VD](const NontemporalDeclsSet &Set) { return Set.count(VD) > 0; });
12291}
12292
12293void CGOpenMPRuntime::LastprivateConditionalRAII::tryToDisableInnerAnalysis(
12294 const OMPExecutableDirective &S,
12295 llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled)
12296 const {
12297 llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToCheckForLPCs;
12298 // Vars in target/task regions must be excluded completely.
12299 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()) ||
12300 isOpenMPTaskingDirective(S.getDirectiveKind())) {
12301 SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
12302 getOpenMPCaptureRegions(CaptureRegions, S.getDirectiveKind());
12303 const CapturedStmt *CS = S.getCapturedStmt(CaptureRegions.front());
12304 for (const CapturedStmt::Capture &Cap : CS->captures()) {
12305 if (Cap.capturesVariable() || Cap.capturesVariableByCopy())
12306 NeedToCheckForLPCs.insert(Cap.getCapturedVar());
12307 }
12308 }
12309 // Exclude vars in private clauses.
12310 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
12311 for (const Expr *Ref : C->varlists()) {
12312 if (!Ref->getType()->isScalarType())
12313 continue;
12314 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
12315 if (!DRE)
12316 continue;
12317 NeedToCheckForLPCs.insert(DRE->getDecl());
12318 }
12319 }
12320 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
12321 for (const Expr *Ref : C->varlists()) {
12322 if (!Ref->getType()->isScalarType())
12323 continue;
12324 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
12325 if (!DRE)
12326 continue;
12327 NeedToCheckForLPCs.insert(DRE->getDecl());
12328 }
12329 }
12330 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
12331 for (const Expr *Ref : C->varlists()) {
12332 if (!Ref->getType()->isScalarType())
12333 continue;
12334 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
12335 if (!DRE)
12336 continue;
12337 NeedToCheckForLPCs.insert(DRE->getDecl());
12338 }
12339 }
12340 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
12341 for (const Expr *Ref : C->varlists()) {
12342 if (!Ref->getType()->isScalarType())
12343 continue;
12344 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
12345 if (!DRE)
12346 continue;
12347 NeedToCheckForLPCs.insert(DRE->getDecl());
12348 }
12349 }
12350 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
12351 for (const Expr *Ref : C->varlists()) {
12352 if (!Ref->getType()->isScalarType())
12353 continue;
12354 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
12355 if (!DRE)
12356 continue;
12357 NeedToCheckForLPCs.insert(DRE->getDecl());
12358 }
12359 }
12360 for (const Decl *VD : NeedToCheckForLPCs) {
12361 for (const LastprivateConditionalData &Data :
12362 llvm::reverse(CGM.getOpenMPRuntime().LastprivateConditionalStack)) {
12363 if (Data.DeclToUniqueName.count(VD) > 0) {
12364 if (!Data.Disabled)
12365 NeedToAddForLPCsAsDisabled.insert(VD);
12366 break;
12367 }
12368 }
12369 }
12370}
12371
12372CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
12373 CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal)
12374 : CGM(CGF.CGM),
12375 Action((CGM.getLangOpts().OpenMP >= 50 &&
12376 llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
12377 [](const OMPLastprivateClause *C) {
12378 return C->getKind() ==
12379 OMPC_LASTPRIVATE_conditional;
12380 }))
12381 ? ActionToDo::PushAsLastprivateConditional
12382 : ActionToDo::DoNotPush) {
12383 assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.")((void)0);
12384 if (CGM.getLangOpts().OpenMP < 50 || Action == ActionToDo::DoNotPush)
12385 return;
12386 assert(Action == ActionToDo::PushAsLastprivateConditional &&((void)0)
12387 "Expected a push action.")((void)0);
12388 LastprivateConditionalData &Data =
12389 CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
12390 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
12391 if (C->getKind() != OMPC_LASTPRIVATE_conditional)
12392 continue;
12393
12394 for (const Expr *Ref : C->varlists()) {
12395 Data.DeclToUniqueName.insert(std::make_pair(
12396 cast<DeclRefExpr>(Ref->IgnoreParenImpCasts())->getDecl(),
12397 SmallString<16>(generateUniqueName(CGM, "pl_cond", Ref))));
12398 }
12399 }
12400 Data.IVLVal = IVLVal;
12401 Data.Fn = CGF.CurFn;
12402}
12403
12404CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
12405 CodeGenFunction &CGF, const OMPExecutableDirective &S)
12406 : CGM(CGF.CGM), Action(ActionToDo::DoNotPush) {
12407 assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.")((void)0);
12408 if (CGM.getLangOpts().OpenMP < 50)
12409 return;
12410 llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToAddForLPCsAsDisabled;
12411 tryToDisableInnerAnalysis(S, NeedToAddForLPCsAsDisabled);
12412 if (!NeedToAddForLPCsAsDisabled.empty()) {
12413 Action = ActionToDo::DisableLastprivateConditional;
12414 LastprivateConditionalData &Data =
12415 CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
12416 for (const Decl *VD : NeedToAddForLPCsAsDisabled)
12417 Data.DeclToUniqueName.insert(std::make_pair(VD, SmallString<16>()));
12418 Data.Fn = CGF.CurFn;
12419 Data.Disabled = true;
12420 }
12421}
12422
12423CGOpenMPRuntime::LastprivateConditionalRAII
12424CGOpenMPRuntime::LastprivateConditionalRAII::disable(
12425 CodeGenFunction &CGF, const OMPExecutableDirective &S) {
12426 return LastprivateConditionalRAII(CGF, S);
12427}
12428
12429CGOpenMPRuntime::LastprivateConditionalRAII::~LastprivateConditionalRAII() {
12430 if (CGM.getLangOpts().OpenMP < 50)
12431 return;
12432 if (Action == ActionToDo::DisableLastprivateConditional) {
12433 assert(CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&((void)0)
12434 "Expected list of disabled private vars.")((void)0);
12435 CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
12436 }
12437 if (Action == ActionToDo::PushAsLastprivateConditional) {
12438 assert(((void)0)
12439 !CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&((void)0)
12440 "Expected list of lastprivate conditional vars.")((void)0);
12441 CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
12442 }
12443}
12444
12445Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
12446 const VarDecl *VD) {
12447 ASTContext &C = CGM.getContext();
12448 auto I = LastprivateConditionalToTypes.find(CGF.CurFn);
12449 if (I == LastprivateConditionalToTypes.end())
12450 I = LastprivateConditionalToTypes.try_emplace(CGF.CurFn).first;
12451 QualType NewType;
12452 const FieldDecl *VDField;
12453 const FieldDecl *FiredField;
12454 LValue BaseLVal;
12455 auto VI = I->getSecond().find(VD);
12456 if (VI == I->getSecond().end()) {
12457 RecordDecl *RD = C.buildImplicitRecord("lasprivate.conditional");
12458 RD->startDefinition();
12459 VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType());
12460 FiredField = addFieldToRecordDecl(C, RD, C.CharTy);
12461 RD->completeDefinition();
12462 NewType = C.getRecordType(RD);
12463 Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName());
12464 BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl);
12465 I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
12466 } else {
12467 NewType = std::get<0>(VI->getSecond());
12468 VDField = std::get<1>(VI->getSecond());
12469 FiredField = std::get<2>(VI->getSecond());
12470 BaseLVal = std::get<3>(VI->getSecond());
12471 }
12472 LValue FiredLVal =
12473 CGF.EmitLValueForField(BaseLVal, FiredField);
12474 CGF.EmitStoreOfScalar(
12475 llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)),
12476 FiredLVal);
12477 return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF);
12478}
12479
12480namespace {
12481/// Checks if the lastprivate conditional variable is referenced in LHS.
12482class LastprivateConditionalRefChecker final
12483 : public ConstStmtVisitor<LastprivateConditionalRefChecker, bool> {
12484 ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM;
12485 const Expr *FoundE = nullptr;
12486 const Decl *FoundD = nullptr;
12487 StringRef UniqueDeclName;
12488 LValue IVLVal;
12489 llvm::Function *FoundFn = nullptr;
12490 SourceLocation Loc;
12491
12492public:
12493 bool VisitDeclRefExpr(const DeclRefExpr *E) {
12494 for (const CGOpenMPRuntime::LastprivateConditionalData &D :
12495 llvm::reverse(LPM)) {
12496 auto It = D.DeclToUniqueName.find(E->getDecl());
12497 if (It == D.DeclToUniqueName.end())
12498 continue;
12499 if (D.Disabled)
12500 return false;
12501 FoundE = E;
12502 FoundD = E->getDecl()->getCanonicalDecl();
12503 UniqueDeclName = It->second;
12504 IVLVal = D.IVLVal;
12505 FoundFn = D.Fn;
12506 break;
12507 }
12508 return FoundE == E;
12509 }
12510 bool VisitMemberExpr(const MemberExpr *E) {
12511 if (!CodeGenFunction::IsWrappedCXXThis(E->getBase()))
12512 return false;
12513 for (const CGOpenMPRuntime::LastprivateConditionalData &D :
12514 llvm::reverse(LPM)) {
12515 auto It = D.DeclToUniqueName.find(E->getMemberDecl());
12516 if (It == D.DeclToUniqueName.end())
12517 continue;
12518 if (D.Disabled)
12519 return false;
12520 FoundE = E;
12521 FoundD = E->getMemberDecl()->getCanonicalDecl();
12522 UniqueDeclName = It->second;
12523 IVLVal = D.IVLVal;
12524 FoundFn = D.Fn;
12525 break;
12526 }
12527 return FoundE == E;
12528 }
12529 bool VisitStmt(const Stmt *S) {
12530 for (const Stmt *Child : S->children()) {
12531 if (!Child)
12532 continue;
12533 if (const auto *E = dyn_cast<Expr>(Child))
12534 if (!E->isGLValue())
12535 continue;
12536 if (Visit(Child))
12537 return true;
12538 }
12539 return false;
12540 }
12541 explicit LastprivateConditionalRefChecker(
12542 ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM)
12543 : LPM(LPM) {}
12544 std::tuple<const Expr *, const Decl *, StringRef, LValue, llvm::Function *>
12545 getFoundData() const {
12546 return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn);
12547 }
12548};
12549} // namespace
12550
12551void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
12552 LValue IVLVal,
12553 StringRef UniqueDeclName,
12554 LValue LVal,
12555 SourceLocation Loc) {
12556 // Last updated loop counter for the lastprivate conditional var.
12557 // int<xx> last_iv = 0;
12558 llvm::Type *LLIVTy = CGF.ConvertTypeForMem(IVLVal.getType());
12559 llvm::Constant *LastIV =
12560 getOrCreateInternalVariable(LLIVTy, getName({UniqueDeclName, "iv"}));
12561 cast<llvm::GlobalVariable>(LastIV)->setAlignment(
12562 IVLVal.getAlignment().getAsAlign());
12563 LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType());
12564
12565 // Last value of the lastprivate conditional.
12566 // decltype(priv_a) last_a;
12567 llvm::Constant *Last = getOrCreateInternalVariable(
12568 CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
12569 cast<llvm::GlobalVariable>(Last)->setAlignment(
12570 LVal.getAlignment().getAsAlign());
12571 LValue LastLVal =
12572 CGF.MakeAddrLValue(Last, LVal.getType(), LVal.getAlignment());
12573
12574 // Global loop counter. Required to handle inner parallel-for regions.
12575 // iv
12576 llvm::Value *IVVal = CGF.EmitLoadOfScalar(IVLVal, Loc);
12577
12578 // #pragma omp critical(a)
12579 // if (last_iv <= iv) {
12580 // last_iv = iv;
12581 // last_a = priv_a;
12582 // }
12583 auto &&CodeGen = [&LastIVLVal, &IVLVal, IVVal, &LVal, &LastLVal,
12584 Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
12585 Action.Enter(CGF);
12586 llvm::Value *LastIVVal = CGF.EmitLoadOfScalar(LastIVLVal, Loc);
12587 // (last_iv <= iv) ? Check if the variable is updated and store new
12588 // value in global var.
12589 llvm::Value *CmpRes;
12590 if (IVLVal.getType()->isSignedIntegerType()) {
12591 CmpRes = CGF.Builder.CreateICmpSLE(LastIVVal, IVVal);
12592 } else {
12593 assert(IVLVal.getType()->isUnsignedIntegerType() &&((void)0)
12594 "Loop iteration variable must be integer.")((void)0);
12595 CmpRes = CGF.Builder.CreateICmpULE(LastIVVal, IVVal);
12596 }
12597 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lp_cond_then");
12598 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("lp_cond_exit");
12599 CGF.Builder.CreateCondBr(CmpRes, ThenBB, ExitBB);
12600 // {
12601 CGF.EmitBlock(ThenBB);
12602
12603 // last_iv = iv;
12604 CGF.EmitStoreOfScalar(IVVal, LastIVLVal);
12605
12606 // last_a = priv_a;
12607 switch (CGF.getEvaluationKind(LVal.getType())) {
12608 case TEK_Scalar: {
12609 llvm::Value *PrivVal = CGF.EmitLoadOfScalar(LVal, Loc);
12610 CGF.EmitStoreOfScalar(PrivVal, LastLVal);
12611 break;
12612 }
12613 case TEK_Complex: {
12614 CodeGenFunction::ComplexPairTy PrivVal = CGF.EmitLoadOfComplex(LVal, Loc);
12615 CGF.EmitStoreOfComplex(PrivVal, LastLVal, /*isInit=*/false);
12616 break;
12617 }
12618 case TEK_Aggregate:
12619 llvm_unreachable(__builtin_unreachable()
12620 "Aggregates are not supported in lastprivate conditional.")__builtin_unreachable();
12621 }
12622 // }
12623 CGF.EmitBranch(ExitBB);
12624 // There is no need to emit line number for unconditional branch.
12625 (void)ApplyDebugLocation::CreateEmpty(CGF);
12626 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
12627 };
12628
12629 if (CGM.getLangOpts().OpenMPSimd) {
12630 // Do not emit as a critical region as no parallel region could be emitted.
12631 RegionCodeGenTy ThenRCG(CodeGen);
12632 ThenRCG(CGF);
12633 } else {
12634 emitCriticalRegion(CGF, UniqueDeclName, CodeGen, Loc);
12635 }
12636}
12637
12638void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
12639 const Expr *LHS) {
12640 if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
12641 return;
12642 LastprivateConditionalRefChecker Checker(LastprivateConditionalStack);
12643 if (!Checker.Visit(LHS))
12644 return;
12645 const Expr *FoundE;
12646 const Decl *FoundD;
12647 StringRef UniqueDeclName;
12648 LValue IVLVal;
12649 llvm::Function *FoundFn;
12650 std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn) =
12651 Checker.getFoundData();
12652 if (FoundFn != CGF.CurFn) {
12653 // Special codegen for inner parallel regions.
12654 // ((struct.lastprivate.conditional*)&priv_a)->Fired = 1;
12655 auto It = LastprivateConditionalToTypes[FoundFn].find(FoundD);
12656 assert(It != LastprivateConditionalToTypes[FoundFn].end() &&((void)0)
12657 "Lastprivate conditional is not found in outer region.")((void)0);
12658 QualType StructTy = std::get<0>(It->getSecond());
12659 const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
12660 LValue PrivLVal = CGF.EmitLValue(FoundE);
12661 Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
12662 PrivLVal.getAddress(CGF),
12663 CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)));
12664 LValue BaseLVal =
12665 CGF.MakeAddrLValue(StructAddr, StructTy, AlignmentSource::Decl);
12666 LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredDecl);
12667 CGF.EmitAtomicStore(RValue::get(llvm::ConstantInt::get(
12668 CGF.ConvertTypeForMem(FiredDecl->getType()), 1)),
12669 FiredLVal, llvm::AtomicOrdering::Unordered,
12670 /*IsVolatile=*/true, /*isInit=*/false);
12671 return;
12672 }
12673
12674 // Private address of the lastprivate conditional in the current context.
12675 // priv_a
12676 LValue LVal = CGF.EmitLValue(FoundE);
12677 emitLastprivateConditionalUpdate(CGF, IVLVal, UniqueDeclName, LVal,
12678 FoundE->getExprLoc());
12679}
12680
12681void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional(
12682 CodeGenFunction &CGF, const OMPExecutableDirective &D,
12683 const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls) {
12684 if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
12685 return;
12686 auto Range = llvm::reverse(LastprivateConditionalStack);
12687 auto It = llvm::find_if(
12688 Range, [](const LastprivateConditionalData &D) { return !D.Disabled; });
12689 if (It == Range.end() || It->Fn != CGF.CurFn)
12690 return;
12691 auto LPCI = LastprivateConditionalToTypes.find(It->Fn);
12692 assert(LPCI != LastprivateConditionalToTypes.end() &&((void)0)
12693 "Lastprivates must be registered already.")((void)0);
12694 SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
12695 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
12696 const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
12697 for (const auto &Pair : It->DeclToUniqueName) {
12698 const auto *VD = cast<VarDecl>(Pair.first->getCanonicalDecl());
12699 if (!CS->capturesVariable(VD) || IgnoredDecls.count(VD) > 0)
12700 continue;
12701 auto I = LPCI->getSecond().find(Pair.first);
12702 assert(I != LPCI->getSecond().end() &&((void)0)
12703 "Lastprivate must be rehistered already.")((void)0);
12704 // bool Cmp = priv_a.Fired != 0;
12705 LValue BaseLVal = std::get<3>(I->getSecond());
12706 LValue FiredLVal =
12707 CGF.EmitLValueForField(BaseLVal, std::get<2>(I->getSecond()));
12708 llvm::Value *Res = CGF.EmitLoadOfScalar(FiredLVal, D.getBeginLoc());
12709 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Res);
12710 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lpc.then");
12711 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("lpc.done");
12712 // if (Cmp) {
12713 CGF.Builder.CreateCondBr(Cmp, ThenBB, DoneBB);
12714 CGF.EmitBlock(ThenBB);
12715 Address Addr = CGF.GetAddrOfLocalVar(VD);
12716 LValue LVal;
12717 if (VD->getType()->isReferenceType())
12718 LVal = CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
12719 AlignmentSource::Decl);
12720 else
12721 LVal = CGF.MakeAddrLValue(Addr, VD->getType().getNonReferenceType(),
12722 AlignmentSource::Decl);
12723 emitLastprivateConditionalUpdate(CGF, It->IVLVal, Pair.second, LVal,
12724 D.getBeginLoc());
12725 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
12726 CGF.EmitBlock(DoneBB, /*IsFinal=*/true);
12727 // }
12728 }
12729}
12730
12731void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
12732 CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD,
12733 SourceLocation Loc) {
12734 if (CGF.getLangOpts().OpenMP < 50)
12735 return;
12736 auto It = LastprivateConditionalStack.back().DeclToUniqueName.find(VD);
12737 assert(It != LastprivateConditionalStack.back().DeclToUniqueName.end() &&((void)0)
12738 "Unknown lastprivate conditional variable.")((void)0);
12739 StringRef UniqueName = It->second;
12740 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(UniqueName);
12741 // The variable was not updated in the region - exit.
12742 if (!GV)
12743 return;
12744 LValue LPLVal = CGF.MakeAddrLValue(
12745 GV, PrivLVal.getType().getNonReferenceType(), PrivLVal.getAlignment());
12746 llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc);
12747 CGF.EmitStoreOfScalar(Res, PrivLVal);
12748}
12749
12750llvm::Function *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
12751 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
12752 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
12753 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12754}
12755
12756llvm::Function *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
12757 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
12758 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
12759 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12760}
12761
12762llvm::Function *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
12763 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
12764 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
12765 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
12766 bool Tied, unsigned &NumberOfParts) {
12767 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12768}
12769
12770void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
12771 SourceLocation Loc,
12772 llvm::Function *OutlinedFn,
12773 ArrayRef<llvm::Value *> CapturedVars,
12774 const Expr *IfCond) {
12775 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12776}
12777
12778void CGOpenMPSIMDRuntime::emitCriticalRegion(
12779 CodeGenFunction &CGF, StringRef CriticalName,
12780 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
12781 const Expr *Hint) {
12782 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12783}
12784
12785void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
12786 const RegionCodeGenTy &MasterOpGen,
12787 SourceLocation Loc) {
12788 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12789}
12790
12791void CGOpenMPSIMDRuntime::emitMaskedRegion(CodeGenFunction &CGF,
12792 const RegionCodeGenTy &MasterOpGen,
12793 SourceLocation Loc,
12794 const Expr *Filter) {
12795 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12796}
12797
12798void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
12799 SourceLocation Loc) {
12800 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12801}
12802
12803void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
12804 CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
12805 SourceLocation Loc) {
12806 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12807}
12808
12809void CGOpenMPSIMDRuntime::emitSingleRegion(
12810 CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
12811 SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
12812 ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
12813 ArrayRef<const Expr *> AssignmentOps) {
12814 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12815}
12816
12817void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
12818 const RegionCodeGenTy &OrderedOpGen,
12819 SourceLocation Loc,
12820 bool IsThreads) {
12821 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12822}
12823
12824void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
12825 SourceLocation Loc,
12826 OpenMPDirectiveKind Kind,
12827 bool EmitChecks,
12828 bool ForceSimpleCall) {
12829 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12830}
12831
12832void CGOpenMPSIMDRuntime::emitForDispatchInit(
12833 CodeGenFunction &CGF, SourceLocation Loc,
12834 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
12835 bool Ordered, const DispatchRTInput &DispatchValues) {
12836 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12837}
12838
12839void CGOpenMPSIMDRuntime::emitForStaticInit(
12840 CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
12841 const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
12842 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12843}
12844
12845void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
12846 CodeGenFunction &CGF, SourceLocation Loc,
12847 OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
12848 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12849}
12850
12851void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
12852 SourceLocation Loc,
12853 unsigned IVSize,
12854 bool IVSigned) {
12855 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12856}
12857
12858void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
12859 SourceLocation Loc,
12860 OpenMPDirectiveKind DKind) {
12861 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12862}
12863
12864llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
12865 SourceLocation Loc,
12866 unsigned IVSize, bool IVSigned,
12867 Address IL, Address LB,
12868 Address UB, Address ST) {
12869 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12870}
12871
12872void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
12873 llvm::Value *NumThreads,
12874 SourceLocation Loc) {
12875 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12876}
12877
12878void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
12879 ProcBindKind ProcBind,
12880 SourceLocation Loc) {
12881 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12882}
12883
12884Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
12885 const VarDecl *VD,
12886 Address VDAddr,
12887 SourceLocation Loc) {
12888 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12889}
12890
12891llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
12892 const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
12893 CodeGenFunction *CGF) {
12894 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12895}
12896
12897Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
12898 CodeGenFunction &CGF, QualType VarType, StringRef Name) {
12899 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12900}
12901
12902void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
12903 ArrayRef<const Expr *> Vars,
12904 SourceLocation Loc,
12905 llvm::AtomicOrdering AO) {
12906 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12907}
12908
12909void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
12910 const OMPExecutableDirective &D,
12911 llvm::Function *TaskFunction,
12912 QualType SharedsTy, Address Shareds,
12913 const Expr *IfCond,
12914 const OMPTaskDataTy &Data) {
12915 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12916}
12917
12918void CGOpenMPSIMDRuntime::emitTaskLoopCall(
12919 CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
12920 llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds,
12921 const Expr *IfCond, const OMPTaskDataTy &Data) {
12922 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12923}
12924
12925void CGOpenMPSIMDRuntime::emitReduction(
12926 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
12927 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
12928 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
12929 assert(Options.SimpleReduction && "Only simple reduction is expected.")((void)0);
12930 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
12931 ReductionOps, Options);
12932}
12933
12934llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
12935 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
12936 ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
12937 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12938}
12939
12940void CGOpenMPSIMDRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
12941 SourceLocation Loc,
12942 bool IsWorksharingReduction) {
12943 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12944}
12945
12946void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
12947 SourceLocation Loc,
12948 ReductionCodeGen &RCG,
12949 unsigned N) {
12950 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12951}
12952
12953Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
12954 SourceLocation Loc,
12955 llvm::Value *ReductionsPtr,
12956 LValue SharedLVal) {
12957 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12958}
12959
12960void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
12961 SourceLocation Loc) {
12962 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12963}
12964
12965void CGOpenMPSIMDRuntime::emitCancellationPointCall(
12966 CodeGenFunction &CGF, SourceLocation Loc,
12967 OpenMPDirectiveKind CancelRegion) {
12968 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12969}
12970
12971void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
12972 SourceLocation Loc, const Expr *IfCond,
12973 OpenMPDirectiveKind CancelRegion) {
12974 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12975}
12976
12977void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
12978 const OMPExecutableDirective &D, StringRef ParentName,
12979 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
12980 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
12981 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12982}
12983
12984void CGOpenMPSIMDRuntime::emitTargetCall(
12985 CodeGenFunction &CGF, const OMPExecutableDirective &D,
12986 llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
12987 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
12988 llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
12989 const OMPLoopDirective &D)>
12990 SizeEmitter) {
12991 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12992}
12993
12994bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
12995 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
12996}
12997
12998bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
12999 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
13000}
13001
13002bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
13003 return false;
13004}
13005
13006void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
13007 const OMPExecutableDirective &D,
13008 SourceLocation Loc,
13009 llvm::Function *OutlinedFn,
13010 ArrayRef<llvm::Value *> CapturedVars) {
13011 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
13012}
13013
13014void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
13015 const Expr *NumTeams,
13016 const Expr *ThreadLimit,
13017 SourceLocation Loc) {
13018 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
13019}
13020
13021void CGOpenMPSIMDRuntime::emitTargetDataCalls(
13022 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
13023 const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
13024 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
13025}
13026
13027void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
13028 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
13029 const Expr *Device) {
13030 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
13031}
13032
13033void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
13034 const OMPLoopDirective &D,
13035 ArrayRef<Expr *> NumIterations) {
13036 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
13037}
13038
13039void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
13040 const OMPDependClause *C) {
13041 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
13042}
13043
13044const VarDecl *
13045CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
13046 const VarDecl *NativeParam) const {
13047 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
13048}
13049
13050Address
13051CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
13052 const VarDecl *NativeParam,
13053 const VarDecl *TargetParam) const {
13054 llvm_unreachable("Not supported in SIMD-only mode")__builtin_unreachable();
13055}

/usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include/llvm/ADT/APInt.h

1//===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements a class to represent arbitrary precision
11/// integral constant values and operations on them.
12///
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_ADT_APINT_H
16#define LLVM_ADT_APINT_H
17
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/MathExtras.h"
20#include <cassert>
21#include <climits>
22#include <cstring>
23#include <utility>
24
25namespace llvm {
26class FoldingSetNodeID;
27class StringRef;
28class hash_code;
29class raw_ostream;
30
31template <typename T> class SmallVectorImpl;
32template <typename T> class ArrayRef;
33template <typename T> class Optional;
34template <typename T> struct DenseMapInfo;
35
36class APInt;
37
38inline APInt operator-(APInt);
39
40//===----------------------------------------------------------------------===//
41// APInt Class
42//===----------------------------------------------------------------------===//
43
44/// Class for arbitrary precision integers.
45///
46/// APInt is a functional replacement for common case unsigned integer type like
47/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width
48/// integer sizes and large integer value types such as 3-bits, 15-bits, or more
49/// than 64-bits of precision. APInt provides a variety of arithmetic operators
50/// and methods to manipulate integer values of any bit-width. It supports both
51/// the typical integer arithmetic and comparison operations as well as bitwise
52/// manipulation.
53///
54/// The class has several invariants worth noting:
55/// * All bit, byte, and word positions are zero-based.
56/// * Once the bit width is set, it doesn't change except by the Truncate,
57/// SignExtend, or ZeroExtend operations.
58/// * All binary operators must be on APInt instances of the same bit width.
59/// Attempting to use these operators on instances with different bit
60/// widths will yield an assertion.
61/// * The value is stored canonically as an unsigned value. For operations
62/// where it makes a difference, there are both signed and unsigned variants
63/// of the operation. For example, sdiv and udiv. However, because the bit
64/// widths must be the same, operations such as Mul and Add produce the same
65/// results regardless of whether the values are interpreted as signed or
66/// not.
67/// * In general, the class tries to follow the style of computation that LLVM
68/// uses in its IR. This simplifies its use for LLVM.
69///
70class LLVM_NODISCARD[[clang::warn_unused_result]] APInt {
71public:
72 typedef uint64_t WordType;
73
74 /// This enum is used to hold the constants we needed for APInt.
75 enum : unsigned {
76 /// Byte size of a word.
77 APINT_WORD_SIZE = sizeof(WordType),
78 /// Bits in a word.
79 APINT_BITS_PER_WORD = APINT_WORD_SIZE * CHAR_BIT8
80 };
81
82 enum class Rounding {
83 DOWN,
84 TOWARD_ZERO,
85 UP,
86 };
87
88 static constexpr WordType WORDTYPE_MAX = ~WordType(0);
89
90private:
91 /// This union is used to store the integer value. When the
92 /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal.
93 union {
94 uint64_t VAL; ///< Used to store the <= 64 bits integer value.
95 uint64_t *pVal; ///< Used to store the >64 bits integer value.
96 } U;
97
98 unsigned BitWidth; ///< The number of bits in this APInt.
99
100 friend struct DenseMapInfo<APInt>;
101
102 friend class APSInt;
103
104 /// Fast internal constructor
105 ///
106 /// This constructor is used only internally for speed of construction of
107 /// temporaries. It is unsafe for general use so it is not public.
108 APInt(uint64_t *val, unsigned bits) : BitWidth(bits) {
109 U.pVal = val;
110 }
111
112 /// Determine which word a bit is in.
113 ///
114 /// \returns the word position for the specified bit position.
115 static unsigned whichWord(unsigned bitPosition) {
116 return bitPosition / APINT_BITS_PER_WORD;
117 }
118
119 /// Determine which bit in a word a bit is in.
120 ///
121 /// \returns the bit position in a word for the specified bit position
122 /// in the APInt.
123 static unsigned whichBit(unsigned bitPosition) {
124 return bitPosition % APINT_BITS_PER_WORD;
125 }
126
127 /// Get a single bit mask.
128 ///
129 /// \returns a uint64_t with only bit at "whichBit(bitPosition)" set
130 /// This method generates and returns a uint64_t (word) mask for a single
131 /// bit at a specific bit position. This is used to mask the bit in the
132 /// corresponding word.
133 static uint64_t maskBit(unsigned bitPosition) {
134 return 1ULL << whichBit(bitPosition);
135 }
136
137 /// Clear unused high order bits
138 ///
139 /// This method is used internally to clear the top "N" bits in the high order
140 /// word that are not used by the APInt. This is needed after the most
141 /// significant word is assigned a value to ensure that those bits are
142 /// zero'd out.
143 APInt &clearUnusedBits() {
144 // Compute how many bits are used in the final word
145 unsigned WordBits = ((BitWidth-1) % APINT_BITS_PER_WORD) + 1;
146
147 // Mask out the high bits.
148 uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - WordBits);
149 if (isSingleWord())
150 U.VAL &= mask;
151 else
152 U.pVal[getNumWords() - 1] &= mask;
153 return *this;
154 }
155
156 /// Get the word corresponding to a bit position
157 /// \returns the corresponding word for the specified bit position.
158 uint64_t getWord(unsigned bitPosition) const {
159 return isSingleWord() ? U.VAL : U.pVal[whichWord(bitPosition)];
160 }
161
162 /// Utility method to change the bit width of this APInt to new bit width,
163 /// allocating and/or deallocating as necessary. There is no guarantee on the
164 /// value of any bits upon return. Caller should populate the bits after.
165 void reallocate(unsigned NewBitWidth);
166
167 /// Convert a char array into an APInt
168 ///
169 /// \param radix 2, 8, 10, 16, or 36
170 /// Converts a string into a number. The string must be non-empty
171 /// and well-formed as a number of the given base. The bit-width
172 /// must be sufficient to hold the result.
173 ///
174 /// This is used by the constructors that take string arguments.
175 ///
176 /// StringRef::getAsInteger is superficially similar but (1) does
177 /// not assume that the string is well-formed and (2) grows the
178 /// result to hold the input.
179 void fromString(unsigned numBits, StringRef str, uint8_t radix);
180
181 /// An internal division function for dividing APInts.
182 ///
183 /// This is used by the toString method to divide by the radix. It simply
184 /// provides a more convenient form of divide for internal use since KnuthDiv
185 /// has specific constraints on its inputs. If those constraints are not met
186 /// then it provides a simpler form of divide.
187 static void divide(const WordType *LHS, unsigned lhsWords,
188 const WordType *RHS, unsigned rhsWords, WordType *Quotient,
189 WordType *Remainder);
190
191 /// out-of-line slow case for inline constructor
192 void initSlowCase(uint64_t val, bool isSigned);
193
194 /// shared code between two array constructors
195 void initFromArray(ArrayRef<uint64_t> array);
196
197 /// out-of-line slow case for inline copy constructor
198 void initSlowCase(const APInt &that);
199
200 /// out-of-line slow case for shl
201 void shlSlowCase(unsigned ShiftAmt);
202
203 /// out-of-line slow case for lshr.
204 void lshrSlowCase(unsigned ShiftAmt);
205
206 /// out-of-line slow case for ashr.
207 void ashrSlowCase(unsigned ShiftAmt);
208
209 /// out-of-line slow case for operator=
210 void AssignSlowCase(const APInt &RHS);
211
212 /// out-of-line slow case for operator==
213 bool EqualSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__));
214
215 /// out-of-line slow case for countLeadingZeros
216 unsigned countLeadingZerosSlowCase() const LLVM_READONLY__attribute__((__pure__));
217
218 /// out-of-line slow case for countLeadingOnes.
219 unsigned countLeadingOnesSlowCase() const LLVM_READONLY__attribute__((__pure__));
220
221 /// out-of-line slow case for countTrailingZeros.
222 unsigned countTrailingZerosSlowCase() const LLVM_READONLY__attribute__((__pure__));
223
224 /// out-of-line slow case for countTrailingOnes
225 unsigned countTrailingOnesSlowCase() const LLVM_READONLY__attribute__((__pure__));
226
227 /// out-of-line slow case for countPopulation
228 unsigned countPopulationSlowCase() const LLVM_READONLY__attribute__((__pure__));
229
230 /// out-of-line slow case for intersects.
231 bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__));
232
233 /// out-of-line slow case for isSubsetOf.
234 bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__));
235
236 /// out-of-line slow case for setBits.
237 void setBitsSlowCase(unsigned loBit, unsigned hiBit);
238
239 /// out-of-line slow case for flipAllBits.
240 void flipAllBitsSlowCase();
241
242 /// out-of-line slow case for operator&=.
243 void AndAssignSlowCase(const APInt& RHS);
244
245 /// out-of-line slow case for operator|=.
246 void OrAssignSlowCase(const APInt& RHS);
247
248 /// out-of-line slow case for operator^=.
249 void XorAssignSlowCase(const APInt& RHS);
250
251 /// Unsigned comparison. Returns -1, 0, or 1 if this APInt is less than, equal
252 /// to, or greater than RHS.
253 int compare(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__));
254
255 /// Signed comparison. Returns -1, 0, or 1 if this APInt is less than, equal
256 /// to, or greater than RHS.
257 int compareSigned(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__));
258
259public:
260 /// \name Constructors
261 /// @{
262
263 /// Create a new APInt of numBits width, initialized as val.
264 ///
265 /// If isSigned is true then val is treated as if it were a signed value
266 /// (i.e. as an int64_t) and the appropriate sign extension to the bit width
267 /// will be done. Otherwise, no sign extension occurs (high order bits beyond
268 /// the range of val are zero filled).
269 ///
270 /// \param numBits the bit width of the constructed APInt
271 /// \param val the initial value of the APInt
272 /// \param isSigned how to treat signedness of val
273 APInt(unsigned numBits, uint64_t val, bool isSigned = false)
274 : BitWidth(numBits) {
275 assert(BitWidth && "bitwidth too small")((void)0);
276 if (isSingleWord()) {
277 U.VAL = val;
278 clearUnusedBits();
279 } else {
280 initSlowCase(val, isSigned);
281 }
282 }
283
284 /// Construct an APInt of numBits width, initialized as bigVal[].
285 ///
286 /// Note that bigVal.size() can be smaller or larger than the corresponding
287 /// bit width but any extraneous bits will be dropped.
288 ///
289 /// \param numBits the bit width of the constructed APInt
290 /// \param bigVal a sequence of words to form the initial value of the APInt
291 APInt(unsigned numBits, ArrayRef<uint64_t> bigVal);
292
293 /// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but
294 /// deprecated because this constructor is prone to ambiguity with the
295 /// APInt(unsigned, uint64_t, bool) constructor.
296 ///
297 /// If this overload is ever deleted, care should be taken to prevent calls
298 /// from being incorrectly captured by the APInt(unsigned, uint64_t, bool)
299 /// constructor.
300 APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]);
301
302 /// Construct an APInt from a string representation.
303 ///
304 /// This constructor interprets the string \p str in the given radix. The
305 /// interpretation stops when the first character that is not suitable for the
306 /// radix is encountered, or the end of the string. Acceptable radix values
307 /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the
308 /// string to require more bits than numBits.
309 ///
310 /// \param numBits the bit width of the constructed APInt
311 /// \param str the string to be interpreted
312 /// \param radix the radix to use for the conversion
313 APInt(unsigned numBits, StringRef str, uint8_t radix);
314
315 /// Simply makes *this a copy of that.
316 /// Copy Constructor.
317 APInt(const APInt &that) : BitWidth(that.BitWidth) {
318 if (isSingleWord())
319 U.VAL = that.U.VAL;
320 else
321 initSlowCase(that);
322 }
323
324 /// Move Constructor.
325 APInt(APInt &&that) : BitWidth(that.BitWidth) {
326 memcpy(&U, &that.U, sizeof(U));
327 that.BitWidth = 0;
328 }
329
330 /// Destructor.
331 ~APInt() {
332 if (needsCleanup())
333 delete[] U.pVal;
334 }
335
336 /// Default constructor that creates an uninteresting APInt
337 /// representing a 1-bit zero value.
338 ///
339 /// This is useful for object deserialization (pair this with the static
340 /// method Read).
341 explicit APInt() : BitWidth(1) { U.VAL = 0; }
342
343 /// Returns whether this instance allocated memory.
344 bool needsCleanup() const { return !isSingleWord(); }
345
346 /// Used to insert APInt objects, or objects that contain APInt objects, into
347 /// FoldingSets.
348 void Profile(FoldingSetNodeID &id) const;
349
350 /// @}
351 /// \name Value Tests
352 /// @{
353
354 /// Determine if this APInt just has one word to store value.
355 ///
356 /// \returns true if the number of bits <= 64, false otherwise.
357 bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; }
358
359 /// Determine sign of this APInt.
360 ///
361 /// This tests the high bit of this APInt to determine if it is set.
362 ///
363 /// \returns true if this APInt is negative, false otherwise
364 bool isNegative() const { return (*this)[BitWidth - 1]; }
365
366 /// Determine if this APInt Value is non-negative (>= 0)
367 ///
368 /// This tests the high bit of the APInt to determine if it is unset.
369 bool isNonNegative() const { return !isNegative(); }
370
371 /// Determine if sign bit of this APInt is set.
372 ///
373 /// This tests the high bit of this APInt to determine if it is set.
374 ///
375 /// \returns true if this APInt has its sign bit set, false otherwise.
376 bool isSignBitSet() const { return (*this)[BitWidth-1]; }
377
378 /// Determine if sign bit of this APInt is clear.
379 ///
380 /// This tests the high bit of this APInt to determine if it is clear.
381 ///
382 /// \returns true if this APInt has its sign bit clear, false otherwise.
383 bool isSignBitClear() const { return !isSignBitSet(); }
384
385 /// Determine if this APInt Value is positive.
386 ///
387 /// This tests if the value of this APInt is positive (> 0). Note
388 /// that 0 is not a positive value.
389 ///
390 /// \returns true if this APInt is positive.
391 bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); }
392
393 /// Determine if this APInt Value is non-positive (<= 0).
394 ///
395 /// \returns true if this APInt is non-positive.
396 bool isNonPositive() const { return !isStrictlyPositive(); }
397
398 /// Determine if all bits are set
399 ///
400 /// This checks to see if the value has all bits of the APInt are set or not.
401 bool isAllOnesValue() const {
402 if (isSingleWord())
403 return U.VAL == WORDTYPE_MAX >> (APINT_BITS_PER_WORD - BitWidth);
404 return countTrailingOnesSlowCase() == BitWidth;
405 }
406
407 /// Determine if all bits are clear
408 ///
409 /// This checks to see if the value has all bits of the APInt are clear or
410 /// not.
411 bool isNullValue() const { return !*this; }
412
413 /// Determine if this is a value of 1.
414 ///
415 /// This checks to see if the value of this APInt is one.
416 bool isOneValue() const {
417 if (isSingleWord())
418 return U.VAL == 1;
419 return countLeadingZerosSlowCase() == BitWidth - 1;
420 }
421
422 /// Determine if this is the largest unsigned value.
423 ///
424 /// This checks to see if the value of this APInt is the maximum unsigned
425 /// value for the APInt's bit width.
426 bool isMaxValue() const { return isAllOnesValue(); }
427
428 /// Determine if this is the largest signed value.
429 ///
430 /// This checks to see if the value of this APInt is the maximum signed
431 /// value for the APInt's bit width.
432 bool isMaxSignedValue() const {
433 if (isSingleWord())
434 return U.VAL == ((WordType(1) << (BitWidth - 1)) - 1);
435 return !isNegative() && countTrailingOnesSlowCase() == BitWidth - 1;
436 }
437
438 /// Determine if this is the smallest unsigned value.
439 ///
440 /// This checks to see if the value of this APInt is the minimum unsigned
441 /// value for the APInt's bit width.
442 bool isMinValue() const { return isNullValue(); }
443
444 /// Determine if this is the smallest signed value.
445 ///
446 /// This checks to see if the value of this APInt is the minimum signed
447 /// value for the APInt's bit width.
448 bool isMinSignedValue() const {
449 if (isSingleWord())
450 return U.VAL == (WordType(1) << (BitWidth - 1));
451 return isNegative() && countTrailingZerosSlowCase() == BitWidth - 1;
452 }
453
454 /// Check if this APInt has an N-bits unsigned integer value.
455 bool isIntN(unsigned N) const {
456 assert(N && "N == 0 ???")((void)0);
457 return getActiveBits() <= N;
458 }
459
460 /// Check if this APInt has an N-bits signed integer value.
461 bool isSignedIntN(unsigned N) const {
462 assert(N && "N == 0 ???")((void)0);
463 return getMinSignedBits() <= N;
464 }
465
466 /// Check if this APInt's value is a power of two greater than zero.
467 ///
468 /// \returns true if the argument APInt value is a power of two > 0.
469 bool isPowerOf2() const {
470 if (isSingleWord())
471 return isPowerOf2_64(U.VAL);
472 return countPopulationSlowCase() == 1;
473 }
474
475 /// Check if the APInt's value is returned by getSignMask.
476 ///
477 /// \returns true if this is the value returned by getSignMask.
478 bool isSignMask() const { return isMinSignedValue(); }
479
480 /// Convert APInt to a boolean value.
481 ///
482 /// This converts the APInt to a boolean value as a test against zero.
483 bool getBoolValue() const { return !!*this; }
484
485 /// If this value is smaller than the specified limit, return it, otherwise
486 /// return the limit value. This causes the value to saturate to the limit.
487 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX0xffffffffffffffffULL) const {
488 return ugt(Limit) ? Limit : getZExtValue();
489 }
490
491 /// Check if the APInt consists of a repeated bit pattern.
492 ///
493 /// e.g. 0x01010101 satisfies isSplat(8).
494 /// \param SplatSizeInBits The size of the pattern in bits. Must divide bit
495 /// width without remainder.
496 bool isSplat(unsigned SplatSizeInBits) const;
497
498 /// \returns true if this APInt value is a sequence of \param numBits ones
499 /// starting at the least significant bit with the remainder zero.
500 bool isMask(unsigned numBits) const {
501 assert(numBits != 0 && "numBits must be non-zero")((void)0);
502 assert(numBits <= BitWidth && "numBits out of range")((void)0);
503 if (isSingleWord())
504 return U.VAL == (WORDTYPE_MAX >> (APINT_BITS_PER_WORD - numBits));
505 unsigned Ones = countTrailingOnesSlowCase();
506 return (numBits == Ones) &&
507 ((Ones + countLeadingZerosSlowCase()) == BitWidth);
508 }
509
510 /// \returns true if this APInt is a non-empty sequence of ones starting at
511 /// the least significant bit with the remainder zero.
512 /// Ex. isMask(0x0000FFFFU) == true.
513 bool isMask() const {
514 if (isSingleWord())
515 return isMask_64(U.VAL);
516 unsigned Ones = countTrailingOnesSlowCase();
517 return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth);
518 }
519
520 /// Return true if this APInt value contains a sequence of ones with
521 /// the remainder zero.
522 bool isShiftedMask() const {
523 if (isSingleWord())
524 return isShiftedMask_64(U.VAL);
525 unsigned Ones = countPopulationSlowCase();
526 unsigned LeadZ = countLeadingZerosSlowCase();
527 return (Ones + LeadZ + countTrailingZeros()) == BitWidth;
528 }
529
530 /// @}
531 /// \name Value Generators
532 /// @{
533
534 /// Gets maximum unsigned value of APInt for specific bit width.
535 static APInt getMaxValue(unsigned numBits) {
536 return getAllOnesValue(numBits);
537 }
538
539 /// Gets maximum signed value of APInt for a specific bit width.
540 static APInt getSignedMaxValue(unsigned numBits) {
541 APInt API = getAllOnesValue(numBits);
542 API.clearBit(numBits - 1);
543 return API;
544 }
545
546 /// Gets minimum unsigned value of APInt for a specific bit width.
547 static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); }
548
549 /// Gets minimum signed value of APInt for a specific bit width.
550 static APInt getSignedMinValue(unsigned numBits) {
551 APInt API(numBits, 0);
552 API.setBit(numBits - 1);
553 return API;
554 }
555
556 /// Get the SignMask for a specific bit width.
557 ///
558 /// This is just a wrapper function of getSignedMinValue(), and it helps code
559 /// readability when we want to get a SignMask.
560 static APInt getSignMask(unsigned BitWidth) {
561 return getSignedMinValue(BitWidth);
562 }
563
564 /// Get the all-ones value.
565 ///
566 /// \returns the all-ones value for an APInt of the specified bit-width.
567 static APInt getAllOnesValue(unsigned numBits) {
568 return APInt(numBits, WORDTYPE_MAX, true);
569 }
570
571 /// Get the '0' value.
572 ///
573 /// \returns the '0' value for an APInt of the specified bit-width.
574 static APInt getNullValue(unsigned numBits) { return APInt(numBits, 0); }
575
576 /// Compute an APInt containing numBits highbits from this APInt.
577 ///
578 /// Get an APInt with the same BitWidth as this APInt, just zero mask
579 /// the low bits and right shift to the least significant bit.
580 ///
581 /// \returns the high "numBits" bits of this APInt.
582 APInt getHiBits(unsigned numBits) const;
583
584 /// Compute an APInt containing numBits lowbits from this APInt.
585 ///
586 /// Get an APInt with the same BitWidth as this APInt, just zero mask
587 /// the high bits.
588 ///
589 /// \returns the low "numBits" bits of this APInt.
590 APInt getLoBits(unsigned numBits) const;
591
592 /// Return an APInt with exactly one bit set in the result.
593 static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
594 APInt Res(numBits, 0);
595 Res.setBit(BitNo);
596 return Res;
597 }
598
599 /// Get a value with a block of bits set.
600 ///
601 /// Constructs an APInt value that has a contiguous range of bits set. The
602 /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
603 /// bits will be zero. For example, with parameters(32, 0, 16) you would get
604 /// 0x0000FFFF. Please call getBitsSetWithWrap if \p loBit may be greater than
605 /// \p hiBit.
606 ///
607 /// \param numBits the intended bit width of the result
608 /// \param loBit the index of the lowest bit set.
609 /// \param hiBit the index of the highest bit set.
610 ///
611 /// \returns An APInt value with the requested bits set.
612 static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) {
613 assert(loBit <= hiBit && "loBit greater than hiBit")((void)0);
614 APInt Res(numBits, 0);
615 Res.setBits(loBit, hiBit);
616 return Res;
617 }
618
619 /// Wrap version of getBitsSet.
620 /// If \p hiBit is bigger than \p loBit, this is same with getBitsSet.
621 /// If \p hiBit is not bigger than \p loBit, the set bits "wrap". For example,
622 /// with parameters (32, 28, 4), you would get 0xF000000F.
623 /// If \p hiBit is equal to \p loBit, you would get a result with all bits
624 /// set.
625 static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit,
626 unsigned hiBit) {
627 APInt Res(numBits, 0);
628 Res.setBitsWithWrap(loBit, hiBit);
629 return Res;
630 }
631
632 /// Get a value with upper bits starting at loBit set.
633 ///
634 /// Constructs an APInt value that has a contiguous range of bits set. The
635 /// bits from loBit (inclusive) to numBits (exclusive) will be set. All other
636 /// bits will be zero. For example, with parameters(32, 12) you would get
637 /// 0xFFFFF000.
638 ///
639 /// \param numBits the intended bit width of the result
640 /// \param loBit the index of the lowest bit to set.
641 ///
642 /// \returns An APInt value with the requested bits set.
643 static APInt getBitsSetFrom(unsigned numBits, unsigned loBit) {
644 APInt Res(numBits, 0);
645 Res.setBitsFrom(loBit);
646 return Res;
647 }
648
649 /// Get a value with high bits set
650 ///
651 /// Constructs an APInt value that has the top hiBitsSet bits set.
652 ///
653 /// \param numBits the bitwidth of the result
654 /// \param hiBitsSet the number of high-order bits set in the result.
655 static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) {
656 APInt Res(numBits, 0);
657 Res.setHighBits(hiBitsSet);
658 return Res;
659 }
660
661 /// Get a value with low bits set
662 ///
663 /// Constructs an APInt value that has the bottom loBitsSet bits set.
664 ///
665 /// \param numBits the bitwidth of the result
666 /// \param loBitsSet the number of low-order bits set in the result.
667 static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) {
668 APInt Res(numBits, 0);
669 Res.setLowBits(loBitsSet);
670 return Res;
671 }
672
673 /// Return a value containing V broadcasted over NewLen bits.
674 static APInt getSplat(unsigned NewLen, const APInt &V);
675
676 /// Determine if two APInts have the same value, after zero-extending
677 /// one of them (if needed!) to ensure that the bit-widths match.
678 static bool isSameValue(const APInt &I1, const APInt &I2) {
679 if (I1.getBitWidth() == I2.getBitWidth())
680 return I1 == I2;
681
682 if (I1.getBitWidth() > I2.getBitWidth())
683 return I1 == I2.zext(I1.getBitWidth());
684
685 return I1.zext(I2.getBitWidth()) == I2;
686 }
687
688 /// Overload to compute a hash_code for an APInt value.
689 friend hash_code hash_value(const APInt &Arg);
690
691 /// This function returns a pointer to the internal storage of the APInt.
692 /// This is useful for writing out the APInt in binary form without any
693 /// conversions.
694 const uint64_t *getRawData() const {
695 if (isSingleWord())
696 return &U.VAL;
697 return &U.pVal[0];
698 }
699
700 /// @}
701 /// \name Unary Operators
702 /// @{
703
704 /// Postfix increment operator.
705 ///
706 /// Increments *this by 1.
707 ///
708 /// \returns a new APInt value representing the original value of *this.
709 const APInt operator++(int) {
710 APInt API(*this);
711 ++(*this);
712 return API;
713 }
714
715 /// Prefix increment operator.
716 ///
717 /// \returns *this incremented by one
718 APInt &operator++();
719
720 /// Postfix decrement operator.
721 ///
722 /// Decrements *this by 1.
723 ///
724 /// \returns a new APInt value representing the original value of *this.
725 const APInt operator--(int) {
726 APInt API(*this);
727 --(*this);
728 return API;
729 }
730
731 /// Prefix decrement operator.
732 ///
733 /// \returns *this decremented by one.
734 APInt &operator--();
735
736 /// Logical negation operator.
737 ///
738 /// Performs logical negation operation on this APInt.
739 ///
740 /// \returns true if *this is zero, false otherwise.
741 bool operator!() const {
742 if (isSingleWord())
16
Taking true branch
743 return U.VAL == 0;
17
Returning the value 1, which participates in a condition later
744 return countLeadingZerosSlowCase() == BitWidth;
745 }
746
747 /// @}
748 /// \name Assignment Operators
749 /// @{
750
751 /// Copy assignment operator.
752 ///
753 /// \returns *this after assignment of RHS.
754 APInt &operator=(const APInt &RHS) {
755 // If the bitwidths are the same, we can avoid mucking with memory
756 if (isSingleWord() && RHS.isSingleWord()) {
757 U.VAL = RHS.U.VAL;
758 BitWidth = RHS.BitWidth;
759 return clearUnusedBits();
760 }
761
762 AssignSlowCase(RHS);
763 return *this;
764 }
765
766 /// Move assignment operator.
767 APInt &operator=(APInt &&that) {
768#ifdef EXPENSIVE_CHECKS
769 // Some std::shuffle implementations still do self-assignment.
770 if (this == &that)
771 return *this;
772#endif
773 assert(this != &that && "Self-move not supported")((void)0);
774 if (!isSingleWord())
775 delete[] U.pVal;
776
777 // Use memcpy so that type based alias analysis sees both VAL and pVal
778 // as modified.
779 memcpy(&U, &that.U, sizeof(U));
780
781 BitWidth = that.BitWidth;
782 that.BitWidth = 0;
783
784 return *this;
785 }
786
787 /// Assignment operator.
788 ///
789 /// The RHS value is assigned to *this. If the significant bits in RHS exceed
790 /// the bit width, the excess bits are truncated. If the bit width is larger
791 /// than 64, the value is zero filled in the unspecified high order bits.
792 ///
793 /// \returns *this after assignment of RHS value.
794 APInt &operator=(uint64_t RHS) {
795 if (isSingleWord()) {
796 U.VAL = RHS;
797 return clearUnusedBits();
798 }
799 U.pVal[0] = RHS;
800 memset(U.pVal + 1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
801 return *this;
802 }
803
804 /// Bitwise AND assignment operator.
805 ///
806 /// Performs a bitwise AND operation on this APInt and RHS. The result is
807 /// assigned to *this.
808 ///
809 /// \returns *this after ANDing with RHS.
810 APInt &operator&=(const APInt &RHS) {
811 assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((void)0);
812 if (isSingleWord())
813 U.VAL &= RHS.U.VAL;
814 else
815 AndAssignSlowCase(RHS);
816 return *this;
817 }
818
819 /// Bitwise AND assignment operator.
820 ///
821 /// Performs a bitwise AND operation on this APInt and RHS. RHS is
822 /// logically zero-extended or truncated to match the bit-width of
823 /// the LHS.
824 APInt &operator&=(uint64_t RHS) {
825 if (isSingleWord()) {
826 U.VAL &= RHS;
827 return *this;
828 }
829 U.pVal[0] &= RHS;
830 memset(U.pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
831 return *this;
832 }
833
834 /// Bitwise OR assignment operator.
835 ///
836 /// Performs a bitwise OR operation on this APInt and RHS. The result is
837 /// assigned *this;
838 ///
839 /// \returns *this after ORing with RHS.
840 APInt &operator|=(const APInt &RHS) {
841 assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((void)0);
842 if (isSingleWord())
843 U.VAL |= RHS.U.VAL;
844 else
845 OrAssignSlowCase(RHS);
846 return *this;
847 }
848
849 /// Bitwise OR assignment operator.
850 ///
851 /// Performs a bitwise OR operation on this APInt and RHS. RHS is
852 /// logically zero-extended or truncated to match the bit-width of
853 /// the LHS.
854 APInt &operator|=(uint64_t RHS) {
855 if (isSingleWord()) {
856 U.VAL |= RHS;
857 return clearUnusedBits();
858 }
859 U.pVal[0] |= RHS;
860 return *this;
861 }
862
863 /// Bitwise XOR assignment operator.
864 ///
865 /// Performs a bitwise XOR operation on this APInt and RHS. The result is
866 /// assigned to *this.
867 ///
868 /// \returns *this after XORing with RHS.
869 APInt &operator^=(const APInt &RHS) {
870 assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((void)0);
871 if (isSingleWord())
872 U.VAL ^= RHS.U.VAL;
873 else
874 XorAssignSlowCase(RHS);
875 return *this;
876 }
877
878 /// Bitwise XOR assignment operator.
879 ///
880 /// Performs a bitwise XOR operation on this APInt and RHS. RHS is
881 /// logically zero-extended or truncated to match the bit-width of
882 /// the LHS.
883 APInt &operator^=(uint64_t RHS) {
884 if (isSingleWord()) {
885 U.VAL ^= RHS;
886 return clearUnusedBits();
887 }
888 U.pVal[0] ^= RHS;
889 return *this;
890 }
891
892 /// Multiplication assignment operator.
893 ///
894 /// Multiplies this APInt by RHS and assigns the result to *this.
895 ///
896 /// \returns *this
897 APInt &operator*=(const APInt &RHS);
898 APInt &operator*=(uint64_t RHS);
899
900 /// Addition assignment operator.
901 ///
902 /// Adds RHS to *this and assigns the result to *this.
903 ///
904 /// \returns *this
905 APInt &operator+=(const APInt &RHS);
906 APInt &operator+=(uint64_t RHS);
907
908 /// Subtraction assignment operator.
909 ///
910 /// Subtracts RHS from *this and assigns the result to *this.
911 ///
912 /// \returns *this
913 APInt &operator-=(const APInt &RHS);
914 APInt &operator-=(uint64_t RHS);
915
916 /// Left-shift assignment function.
917 ///
918 /// Shifts *this left by shiftAmt and assigns the result to *this.
919 ///
920 /// \returns *this after shifting left by ShiftAmt
921 APInt &operator<<=(unsigned ShiftAmt) {
922 assert(ShiftAmt <= BitWidth && "Invalid shift amount")((void)0);
923 if (isSingleWord()) {
924 if (ShiftAmt == BitWidth)
925 U.VAL = 0;
926 else
927 U.VAL <<= ShiftAmt;
928 return clearUnusedBits();
929 }
930 shlSlowCase(ShiftAmt);
931 return *this;
932 }
933
934 /// Left-shift assignment function.
935 ///
936 /// Shifts *this left by shiftAmt and assigns the result to *this.
937 ///
938 /// \returns *this after shifting left by ShiftAmt
939 APInt &operator<<=(const APInt &ShiftAmt);
940
941 /// @}
942 /// \name Binary Operators
943 /// @{
944
945 /// Multiplication operator.
946 ///
947 /// Multiplies this APInt by RHS and returns the result.
948 APInt operator*(const APInt &RHS) const;
949
950 /// Left logical shift operator.
951 ///
952 /// Shifts this APInt left by \p Bits and returns the result.
953 APInt operator<<(unsigned Bits) const { return shl(Bits); }
954
955 /// Left logical shift operator.
956 ///
957 /// Shifts this APInt left by \p Bits and returns the result.
958 APInt operator<<(const APInt &Bits) const { return shl(Bits); }
959
960 /// Arithmetic right-shift function.
961 ///
962 /// Arithmetic right-shift this APInt by shiftAmt.
963 APInt ashr(unsigned ShiftAmt) const {
964 APInt R(*this);
965 R.ashrInPlace(ShiftAmt);
966 return R;
967 }
968
969 /// Arithmetic right-shift this APInt by ShiftAmt in place.
970 void ashrInPlace(unsigned ShiftAmt) {
971 assert(ShiftAmt <= BitWidth && "Invalid shift amount")((void)0);
972 if (isSingleWord()) {
973 int64_t SExtVAL = SignExtend64(U.VAL, BitWidth);
974 if (ShiftAmt == BitWidth)
975 U.VAL = SExtVAL >> (APINT_BITS_PER_WORD - 1); // Fill with sign bit.
976 else
977 U.VAL = SExtVAL >> ShiftAmt;
978 clearUnusedBits();
979 return;
980 }
981 ashrSlowCase(ShiftAmt);
982 }
983
984 /// Logical right-shift function.
985 ///
986 /// Logical right-shift this APInt by shiftAmt.
987 APInt lshr(unsigned shiftAmt) const {
988 APInt R(*this);
989 R.lshrInPlace(shiftAmt);
990 return R;
991 }
992
993 /// Logical right-shift this APInt by ShiftAmt in place.
994 void lshrInPlace(unsigned ShiftAmt) {
995 assert(ShiftAmt <= BitWidth && "Invalid shift amount")((void)0);
996 if (isSingleWord()) {
997 if (ShiftAmt == BitWidth)
998 U.VAL = 0;
999 else
1000 U.VAL >>= ShiftAmt;
1001 return;
1002 }
1003 lshrSlowCase(ShiftAmt);
1004 }
1005
1006 /// Left-shift function.
1007 ///
1008 /// Left-shift this APInt by shiftAmt.
1009 APInt shl(unsigned shiftAmt) const {
1010 APInt R(*this);
1011 R <<= shiftAmt;
1012 return R;
1013 }
1014
1015 /// Rotate left by rotateAmt.
1016 APInt rotl(unsigned rotateAmt) const;
1017
1018 /// Rotate right by rotateAmt.
1019 APInt rotr(unsigned rotateAmt) const;
1020
1021 /// Arithmetic right-shift function.
1022 ///
1023 /// Arithmetic right-shift this APInt by shiftAmt.
1024 APInt ashr(const APInt &ShiftAmt) const {
1025 APInt R(*this);
1026 R.ashrInPlace(ShiftAmt);
1027 return R;
1028 }
1029
1030 /// Arithmetic right-shift this APInt by shiftAmt in place.
1031 void ashrInPlace(const APInt &shiftAmt);
1032
1033 /// Logical right-shift function.
1034 ///
1035 /// Logical right-shift this APInt by shiftAmt.
1036 APInt lshr(const APInt &ShiftAmt) const {
1037 APInt R(*this);
1038 R.lshrInPlace(ShiftAmt);
1039 return R;
1040 }
1041
1042 /// Logical right-shift this APInt by ShiftAmt in place.
1043 void lshrInPlace(const APInt &ShiftAmt);
1044
1045 /// Left-shift function.
1046 ///
1047 /// Left-shift this APInt by shiftAmt.
1048 APInt shl(const APInt &ShiftAmt) const {
1049 APInt R(*this);
1050 R <<= ShiftAmt;
1051 return R;
1052 }
1053
1054 /// Rotate left by rotateAmt.
1055 APInt rotl(const APInt &rotateAmt) const;
1056
1057 /// Rotate right by rotateAmt.
1058 APInt rotr(const APInt &rotateAmt) const;
1059
1060 /// Unsigned division operation.
1061 ///
1062 /// Perform an unsigned divide operation on this APInt by RHS. Both this and
1063 /// RHS are treated as unsigned quantities for purposes of this division.
1064 ///
1065 /// \returns a new APInt value containing the division result, rounded towards
1066 /// zero.
1067 APInt udiv(const APInt &RHS) const;
1068 APInt udiv(uint64_t RHS) const;
1069
1070 /// Signed division function for APInt.
1071 ///
1072 /// Signed divide this APInt by APInt RHS.
1073 ///
1074 /// The result is rounded towards zero.
1075 APInt sdiv(const APInt &RHS) const;
1076 APInt sdiv(int64_t RHS) const;
1077
1078 /// Unsigned remainder operation.
1079 ///
1080 /// Perform an unsigned remainder operation on this APInt with RHS being the
1081 /// divisor. Both this and RHS are treated as unsigned quantities for purposes
1082 /// of this operation. Note that this is a true remainder operation and not a
1083 /// modulo operation because the sign follows the sign of the dividend which
1084 /// is *this.
1085 ///
1086 /// \returns a new APInt value containing the remainder result
1087 APInt urem(const APInt &RHS) const;
1088 uint64_t urem(uint64_t RHS) const;
1089
1090 /// Function for signed remainder operation.
1091 ///
1092 /// Signed remainder operation on APInt.
1093 APInt srem(const APInt &RHS) const;
1094 int64_t srem(int64_t RHS) const;
1095
1096 /// Dual division/remainder interface.
1097 ///
1098 /// Sometimes it is convenient to divide two APInt values and obtain both the
1099 /// quotient and remainder. This function does both operations in the same
1100 /// computation making it a little more efficient. The pair of input arguments
1101 /// may overlap with the pair of output arguments. It is safe to call
1102 /// udivrem(X, Y, X, Y), for example.
1103 static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
1104 APInt &Remainder);
1105 static void udivrem(const APInt &LHS, uint64_t RHS, APInt &Quotient,
1106 uint64_t &Remainder);
1107
1108 static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
1109 APInt &Remainder);
1110 static void sdivrem(const APInt &LHS, int64_t RHS, APInt &Quotient,
1111 int64_t &Remainder);
1112
1113 // Operations that return overflow indicators.
1114 APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
1115 APInt uadd_ov(const APInt &RHS, bool &Overflow) const;
1116 APInt ssub_ov(const APInt &RHS, bool &Overflow) const;
1117 APInt usub_ov(const APInt &RHS, bool &Overflow) const;
1118 APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
1119 APInt smul_ov(const APInt &RHS, bool &Overflow) const;
1120 APInt umul_ov(const APInt &RHS, bool &Overflow) const;
1121 APInt sshl_ov(const APInt &Amt, bool &Overflow) const;
1122 APInt ushl_ov(const APInt &Amt, bool &Overflow) const;
1123
1124 // Operations that saturate
1125 APInt sadd_sat(const APInt &RHS) const;
1126 APInt uadd_sat(const APInt &RHS) const;
1127 APInt ssub_sat(const APInt &RHS) const;
1128 APInt usub_sat(const APInt &RHS) const;
1129 APInt smul_sat(const APInt &RHS) const;
1130 APInt umul_sat(const APInt &RHS) const;
1131 APInt sshl_sat(const APInt &RHS) const;
1132 APInt ushl_sat(const APInt &RHS) const;
1133
1134 /// Array-indexing support.
1135 ///
1136 /// \returns the bit value at bitPosition
1137 bool operator[](unsigned bitPosition) const {
1138 assert(bitPosition < getBitWidth() && "Bit position out of bounds!")((void)0);
1139 return (maskBit(bitPosition) & getWord(bitPosition)) != 0;
1140 }
1141
1142 /// @}
1143 /// \name Comparison Operators
1144 /// @{
1145
1146 /// Equality operator.
1147 ///
1148 /// Compares this APInt with RHS for the validity of the equality
1149 /// relationship.
1150 bool operator==(const APInt &RHS) const {
1151 assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths")((void)0);
1152 if (isSingleWord())
1153 return U.VAL == RHS.U.VAL;
1154 return EqualSlowCase(RHS);
1155 }
1156
1157 /// Equality operator.
1158 ///
1159 /// Compares this APInt with a uint64_t for the validity of the equality
1160 /// relationship.
1161 ///
1162 /// \returns true if *this == Val
1163 bool operator==(uint64_t Val) const {
1164 return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val;
1165 }
1166
1167 /// Equality comparison.
1168 ///
1169 /// Compares this APInt with RHS for the validity of the equality
1170 /// relationship.
1171 ///
1172 /// \returns true if *this == Val
1173 bool eq(const APInt &RHS) const { return (*this) == RHS; }
1174
1175 /// Inequality operator.
1176 ///
1177 /// Compares this APInt with RHS for the validity of the inequality
1178 /// relationship.
1179 ///
1180 /// \returns true if *this != Val
1181 bool operator!=(const APInt &RHS) const { return !((*this) == RHS); }
1182
1183 /// Inequality operator.
1184 ///
1185 /// Compares this APInt with a uint64_t for the validity of the inequality
1186 /// relationship.
1187 ///
1188 /// \returns true if *this != Val
1189 bool operator!=(uint64_t Val) const { return !((*this) == Val); }
1190
1191 /// Inequality comparison
1192 ///
1193 /// Compares this APInt with RHS for the validity of the inequality
1194 /// relationship.
1195 ///
1196 /// \returns true if *this != Val
1197 bool ne(const APInt &RHS) const { return !((*this) == RHS); }
1198
1199 /// Unsigned less than comparison
1200 ///
1201 /// Regards both *this and RHS as unsigned quantities and compares them for
1202 /// the validity of the less-than relationship.
1203 ///
1204 /// \returns true if *this < RHS when both are considered unsigned.
1205 bool ult(const APInt &RHS) const { return compare(RHS) < 0; }
1206
1207 /// Unsigned less than comparison
1208 ///
1209 /// Regards both *this as an unsigned quantity and compares it with RHS for
1210 /// the validity of the less-than relationship.
1211 ///
1212 /// \returns true if *this < RHS when considered unsigned.
1213 bool ult(uint64_t RHS) const {
1214 // Only need to check active bits if not a single word.
1215 return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS;
1216 }
1217
1218 /// Signed less than comparison
1219 ///
1220 /// Regards both *this and RHS as signed quantities and compares them for
1221 /// validity of the less-than relationship.
1222 ///
1223 /// \returns true if *this < RHS when both are considered signed.
1224 bool slt(const APInt &RHS) const { return compareSigned(RHS) < 0; }
1225
1226 /// Signed less than comparison
1227 ///
1228 /// Regards both *this as a signed quantity and compares it with RHS for
1229 /// the validity of the less-than relationship.
1230 ///
1231 /// \returns true if *this < RHS when considered signed.
1232 bool slt(int64_t RHS) const {
1233 return (!isSingleWord() && getMinSignedBits() > 64) ? isNegative()
1234 : getSExtValue() < RHS;
1235 }
1236
1237 /// Unsigned less or equal comparison
1238 ///
1239 /// Regards both *this and RHS as unsigned quantities and compares them for
1240 /// validity of the less-or-equal relationship.
1241 ///
1242 /// \returns true if *this <= RHS when both are considered unsigned.
1243 bool ule(const APInt &RHS) const { return compare(RHS) <= 0; }
1244
1245 /// Unsigned less or equal comparison
1246 ///
1247 /// Regards both *this as an unsigned quantity and compares it with RHS for
1248 /// the validity of the less-or-equal relationship.
1249 ///
1250 /// \returns true if *this <= RHS when considered unsigned.
1251 bool ule(uint64_t RHS) const { return !ugt(RHS); }
1252
1253 /// Signed less or equal comparison
1254 ///
1255 /// Regards both *this and RHS as signed quantities and compares them for
1256 /// validity of the less-or-equal relationship.
1257 ///
1258 /// \returns true if *this <= RHS when both are considered signed.
1259 bool sle(const APInt &RHS) const { return compareSigned(RHS) <= 0; }
1260
1261 /// Signed less or equal comparison
1262 ///
1263 /// Regards both *this as a signed quantity and compares it with RHS for the
1264 /// validity of the less-or-equal relationship.
1265 ///
1266 /// \returns true if *this <= RHS when considered signed.
1267 bool sle(uint64_t RHS) const { return !sgt(RHS); }
1268
1269 /// Unsigned greater than comparison
1270 ///
1271 /// Regards both *this and RHS as unsigned quantities and compares them for
1272 /// the validity of the greater-than relationship.
1273 ///
1274 /// \returns true if *this > RHS when both are considered unsigned.
1275 bool ugt(const APInt &RHS) const { return !ule(RHS); }
1276
1277 /// Unsigned greater than comparison
1278 ///
1279 /// Regards both *this as an unsigned quantity and compares it with RHS for
1280 /// the validity of the greater-than relationship.
1281 ///
1282 /// \returns true if *this > RHS when considered unsigned.
1283 bool ugt(uint64_t RHS) const {
1284 // Only need to check active bits if not a single word.
1285 return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS;
1286 }
1287
1288 /// Signed greater than comparison
1289 ///
1290 /// Regards both *this and RHS as signed quantities and compares them for the
1291 /// validity of the greater-than relationship.
1292 ///
1293 /// \returns true if *this > RHS when both are considered signed.
1294 bool sgt(const APInt &RHS) const { return !sle(RHS); }
1295
1296 /// Signed greater than comparison
1297 ///
1298 /// Regards both *this as a signed quantity and compares it with RHS for
1299 /// the validity of the greater-than relationship.
1300 ///
1301 /// \returns true if *this > RHS when considered signed.
1302 bool sgt(int64_t RHS) const {
1303 return (!isSingleWord() && getMinSignedBits() > 64) ? !isNegative()
1304 : getSExtValue() > RHS;
1305 }
1306
1307 /// Unsigned greater or equal comparison
1308 ///
1309 /// Regards both *this and RHS as unsigned quantities and compares them for
1310 /// validity of the greater-or-equal relationship.
1311 ///
1312 /// \returns true if *this >= RHS when both are considered unsigned.
1313 bool uge(const APInt &RHS) const { return !ult(RHS); }
1314
1315 /// Unsigned greater or equal comparison
1316 ///
1317 /// Regards both *this as an unsigned quantity and compares it with RHS for
1318 /// the validity of the greater-or-equal relationship.
1319 ///
1320 /// \returns true if *this >= RHS when considered unsigned.
1321 bool uge(uint64_t RHS) const { return !ult(RHS); }
1322
1323 /// Signed greater or equal comparison
1324 ///
1325 /// Regards both *this and RHS as signed quantities and compares them for
1326 /// validity of the greater-or-equal relationship.
1327 ///
1328 /// \returns true if *this >= RHS when both are considered signed.
1329 bool sge(const APInt &RHS) const { return !slt(RHS); }
1330
1331 /// Signed greater or equal comparison
1332 ///
1333 /// Regards both *this as a signed quantity and compares it with RHS for
1334 /// the validity of the greater-or-equal relationship.
1335 ///
1336 /// \returns true if *this >= RHS when considered signed.
1337 bool sge(int64_t RHS) const { return !slt(RHS); }
1338
1339 /// This operation tests if there are any pairs of corresponding bits
1340 /// between this APInt and RHS that are both set.
1341 bool intersects(const APInt &RHS) const {
1342 assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((void)0);
1343 if (isSingleWord())
1344 return (U.VAL & RHS.U.VAL) != 0;
1345 return intersectsSlowCase(RHS);
1346 }
1347
1348 /// This operation checks that all bits set in this APInt are also set in RHS.
1349 bool isSubsetOf(const APInt &RHS) const {
1350 assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((void)0);
1351 if (isSingleWord())
1352 return (U.VAL & ~RHS.U.VAL) == 0;
1353 return isSubsetOfSlowCase(RHS);
1354 }
1355
1356 /// @}
1357 /// \name Resizing Operators
1358 /// @{
1359
1360 /// Truncate to new width.
1361 ///
1362 /// Truncate the APInt to a specified width. It is an error to specify a width
1363 /// that is greater than or equal to the current width.
1364 APInt trunc(unsigned width) const;
1365
1366 /// Truncate to new width with unsigned saturation.
1367 ///
1368 /// If the APInt, treated as unsigned integer, can be losslessly truncated to
1369 /// the new bitwidth, then return truncated APInt. Else, return max value.
1370 APInt truncUSat(unsigned width) const;
1371
1372 /// Truncate to new width with signed saturation.
1373 ///
1374 /// If this APInt, treated as signed integer, can be losslessly truncated to
1375 /// the new bitwidth, then return truncated APInt. Else, return either
1376 /// signed min value if the APInt was negative, or signed max value.
1377 APInt truncSSat(unsigned width) const;
1378
1379 /// Sign extend to a new width.
1380 ///
1381 /// This operation sign extends the APInt to a new width. If the high order
1382 /// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
1383 /// It is an error to specify a width that is less than or equal to the
1384 /// current width.
1385 APInt sext(unsigned width) const;
1386
1387 /// Zero extend to a new width.
1388 ///
1389 /// This operation zero extends the APInt to a new width. The high order bits
1390 /// are filled with 0 bits. It is an error to specify a width that is less
1391 /// than or equal to the current width.
1392 APInt zext(unsigned width) const;
1393
1394 /// Sign extend or truncate to width
1395 ///
1396 /// Make this APInt have the bit width given by \p width. The value is sign
1397 /// extended, truncated, or left alone to make it that width.
1398 APInt sextOrTrunc(unsigned width) const;
1399
1400 /// Zero extend or truncate to width
1401 ///
1402 /// Make this APInt have the bit width given by \p width. The value is zero
1403 /// extended, truncated, or left alone to make it that width.
1404 APInt zextOrTrunc(unsigned width) const;
1405
1406 /// Truncate to width
1407 ///
1408 /// Make this APInt have the bit width given by \p width. The value is
1409 /// truncated or left alone to make it that width.
1410 APInt truncOrSelf(unsigned width) const;
1411
1412 /// Sign extend or truncate to width
1413 ///
1414 /// Make this APInt have the bit width given by \p width. The value is sign
1415 /// extended, or left alone to make it that width.
1416 APInt sextOrSelf(unsigned width) const;
1417
1418 /// Zero extend or truncate to width
1419 ///
1420 /// Make this APInt have the bit width given by \p width. The value is zero
1421 /// extended, or left alone to make it that width.
1422 APInt zextOrSelf(unsigned width) const;
1423
1424 /// @}
1425 /// \name Bit Manipulation Operators
1426 /// @{
1427
1428 /// Set every bit to 1.
1429 void setAllBits() {
1430 if (isSingleWord())
1431 U.VAL = WORDTYPE_MAX;
1432 else
1433 // Set all the bits in all the words.
1434 memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE);
1435 // Clear the unused ones
1436 clearUnusedBits();
1437 }
1438
1439 /// Set a given bit to 1.
1440 ///
1441 /// Set the given bit to 1 whose position is given as "bitPosition".
1442 void setBit(unsigned BitPosition) {
1443 assert(BitPosition < BitWidth && "BitPosition out of range")((void)0);
1444 WordType Mask = maskBit(BitPosition);
1445 if (isSingleWord())
1446 U.VAL |= Mask;
1447 else
1448 U.pVal[whichWord(BitPosition)] |= Mask;
1449 }
1450
1451 /// Set the sign bit to 1.
1452 void setSignBit() {
1453 setBit(BitWidth - 1);
1454 }
1455
1456 /// Set a given bit to a given value.
1457 void setBitVal(unsigned BitPosition, bool BitValue) {
1458 if (BitValue)
1459 setBit(BitPosition);
1460 else
1461 clearBit(BitPosition);
1462 }
1463
1464 /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
1465 /// This function handles "wrap" case when \p loBit >= \p hiBit, and calls
1466 /// setBits when \p loBit < \p hiBit.
1467 /// For \p loBit == \p hiBit wrap case, set every bit to 1.
1468 void setBitsWithWrap(unsigned loBit, unsigned hiBit) {
1469 assert(hiBit <= BitWidth && "hiBit out of range")((void)0);
1470 assert(loBit <= BitWidth && "loBit out of range")((void)0);
1471 if (loBit < hiBit) {
1472 setBits(loBit, hiBit);
1473 return;
1474 }
1475 setLowBits(hiBit);
1476 setHighBits(BitWidth - loBit);
1477 }
1478
1479 /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
1480 /// This function handles case when \p loBit <= \p hiBit.
1481 void setBits(unsigned loBit, unsigned hiBit) {
1482 assert(hiBit <= BitWidth && "hiBit out of range")((void)0);
1483 assert(loBit <= BitWidth && "loBit out of range")((void)0);
1484 assert(loBit <= hiBit && "loBit greater than hiBit")((void)0);
1485 if (loBit == hiBit)
1486 return;
1487 if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
1488 uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
1489 mask <<= loBit;
1490 if (isSingleWord())
1491 U.VAL |= mask;
1492 else
1493 U.pVal[0] |= mask;
1494 } else {
1495 setBitsSlowCase(loBit, hiBit);
1496 }
1497 }
1498
1499 /// Set the top bits starting from loBit.
1500 void setBitsFrom(unsigned loBit) {
1501 return setBits(loBit, BitWidth);
1502 }
1503
1504 /// Set the bottom loBits bits.
1505 void setLowBits(unsigned loBits) {
1506 return setBits(0, loBits);
1507 }
1508
1509 /// Set the top hiBits bits.
1510 void setHighBits(unsigned hiBits) {
1511 return setBits(BitWidth - hiBits, BitWidth);
1512 }
1513
1514 /// Set every bit to 0.
1515 void clearAllBits() {
1516 if (isSingleWord())
1517 U.VAL = 0;
1518 else
1519 memset(U.pVal, 0, getNumWords() * APINT_WORD_SIZE);
1520 }
1521
1522 /// Set a given bit to 0.
1523 ///
1524 /// Set the given bit to 0 whose position is given as "bitPosition".
1525 void clearBit(unsigned BitPosition) {
1526 assert(BitPosition < BitWidth && "BitPosition out of range")((void)0);
1527 WordType Mask = ~maskBit(BitPosition);
1528 if (isSingleWord())
1529 U.VAL &= Mask;
1530 else
1531 U.pVal[whichWord(BitPosition)] &= Mask;
1532 }
1533
1534 /// Set bottom loBits bits to 0.
1535 void clearLowBits(unsigned loBits) {
1536 assert(loBits <= BitWidth && "More bits than bitwidth")((void)0);
1537 APInt Keep = getHighBitsSet(BitWidth, BitWidth - loBits);
1538 *this &= Keep;
1539 }
1540
1541 /// Set the sign bit to 0.
1542 void clearSignBit() {
1543 clearBit(BitWidth - 1);
1544 }
1545
1546 /// Toggle every bit to its opposite value.
1547 void flipAllBits() {
1548 if (isSingleWord()) {
1549 U.VAL ^= WORDTYPE_MAX;
1550 clearUnusedBits();
1551 } else {
1552 flipAllBitsSlowCase();
1553 }
1554 }
1555
1556 /// Toggles a given bit to its opposite value.
1557 ///
1558 /// Toggle a given bit to its opposite value whose position is given
1559 /// as "bitPosition".
1560 void flipBit(unsigned bitPosition);
1561
1562 /// Negate this APInt in place.
1563 void negate() {
1564 flipAllBits();
1565 ++(*this);
1566 }
1567
1568 /// Insert the bits from a smaller APInt starting at bitPosition.
1569 void insertBits(const APInt &SubBits, unsigned bitPosition);
1570 void insertBits(uint64_t SubBits, unsigned bitPosition, unsigned numBits);
1571
1572 /// Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
1573 APInt extractBits(unsigned numBits, unsigned bitPosition) const;
1574 uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const;
1575
1576 /// @}
1577 /// \name Value Characterization Functions
1578 /// @{
1579
1580 /// Return the number of bits in the APInt.
1581 unsigned getBitWidth() const { return BitWidth; }
1582
1583 /// Get the number of words.
1584 ///
1585 /// Here one word's bitwidth equals to that of uint64_t.
1586 ///
1587 /// \returns the number of words to hold the integer value of this APInt.
1588 unsigned getNumWords() const { return getNumWords(BitWidth); }
1589
1590 /// Get the number of words.
1591 ///
1592 /// *NOTE* Here one word's bitwidth equals to that of uint64_t.
1593 ///
1594 /// \returns the number of words to hold the integer value with a given bit
1595 /// width.
1596 static unsigned getNumWords(unsigned BitWidth) {
1597 return ((uint64_t)BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD;
1598 }
1599
1600 /// Compute the number of active bits in the value
1601 ///
1602 /// This function returns the number of active bits which is defined as the
1603 /// bit width minus the number of leading zeros. This is used in several
1604 /// computations to see how "wide" the value is.
1605 unsigned getActiveBits() const { return BitWidth - countLeadingZeros(); }
1606
1607 /// Compute the number of active words in the value of this APInt.
1608 ///
1609 /// This is used in conjunction with getActiveData to extract the raw value of
1610 /// the APInt.
1611 unsigned getActiveWords() const {
1612 unsigned numActiveBits = getActiveBits();
1613 return numActiveBits ? whichWord(numActiveBits - 1) + 1 : 1;
1614 }
1615
1616 /// Get the minimum bit size for this signed APInt
1617 ///
1618 /// Computes the minimum bit width for this APInt while considering it to be a
1619 /// signed (and probably negative) value. If the value is not negative, this
1620 /// function returns the same value as getActiveBits()+1. Otherwise, it
1621 /// returns the smallest bit width that will retain the negative value. For
1622 /// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so
1623 /// for -1, this function will always return 1.
1624 unsigned getMinSignedBits() const { return BitWidth - getNumSignBits() + 1; }
1625
1626 /// Get zero extended value
1627 ///
1628 /// This method attempts to return the value of this APInt as a zero extended
1629 /// uint64_t. The bitwidth must be <= 64 or the value must fit within a
1630 /// uint64_t. Otherwise an assertion will result.
1631 uint64_t getZExtValue() const {
1632 if (isSingleWord())
1633 return U.VAL;
1634 assert(getActiveBits() <= 64 && "Too many bits for uint64_t")((void)0);
1635 return U.pVal[0];
1636 }
1637
1638 /// Get sign extended value
1639 ///
1640 /// This method attempts to return the value of this APInt as a sign extended
1641 /// int64_t. The bit width must be <= 64 or the value must fit within an
1642 /// int64_t. Otherwise an assertion will result.
1643 int64_t getSExtValue() const {
1644 if (isSingleWord())
1645 return SignExtend64(U.VAL, BitWidth);
1646 assert(getMinSignedBits() <= 64 && "Too many bits for int64_t")((void)0);
1647 return int64_t(U.pVal[0]);
1648 }
1649
1650 /// Get bits required for string value.
1651 ///
1652 /// This method determines how many bits are required to hold the APInt
1653 /// equivalent of the string given by \p str.
1654 static unsigned getBitsNeeded(StringRef str, uint8_t radix);
1655
1656 /// The APInt version of the countLeadingZeros functions in
1657 /// MathExtras.h.
1658 ///
1659 /// It counts the number of zeros from the most significant bit to the first
1660 /// one bit.
1661 ///
1662 /// \returns BitWidth if the value is zero, otherwise returns the number of
1663 /// zeros from the most significant bit to the first one bits.
1664 unsigned countLeadingZeros() const {
1665 if (isSingleWord()) {
1666 unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth;
1667 return llvm::countLeadingZeros(U.VAL) - unusedBits;
1668 }
1669 return countLeadingZerosSlowCase();
1670 }
1671
1672 /// Count the number of leading one bits.
1673 ///
1674 /// This function is an APInt version of the countLeadingOnes
1675 /// functions in MathExtras.h. It counts the number of ones from the most
1676 /// significant bit to the first zero bit.
1677 ///
1678 /// \returns 0 if the high order bit is not set, otherwise returns the number
1679 /// of 1 bits from the most significant to the least
1680 unsigned countLeadingOnes() const {
1681 if (isSingleWord())
1682 return llvm::countLeadingOnes(U.VAL << (APINT_BITS_PER_WORD - BitWidth));
1683 return countLeadingOnesSlowCase();
1684 }
1685
1686 /// Computes the number of leading bits of this APInt that are equal to its
1687 /// sign bit.
1688 unsigned getNumSignBits() const {
1689 return isNegative() ? countLeadingOnes() : countLeadingZeros();
1690 }
1691
1692 /// Count the number of trailing zero bits.
1693 ///
1694 /// This function is an APInt version of the countTrailingZeros
1695 /// functions in MathExtras.h. It counts the number of zeros from the least
1696 /// significant bit to the first set bit.
1697 ///
1698 /// \returns BitWidth if the value is zero, otherwise returns the number of
1699 /// zeros from the least significant bit to the first one bit.
1700 unsigned countTrailingZeros() const {
1701 if (isSingleWord()) {
1702 unsigned TrailingZeros = llvm::countTrailingZeros(U.VAL);
1703 return (TrailingZeros > BitWidth ? BitWidth : TrailingZeros);
1704 }
1705 return countTrailingZerosSlowCase();
1706 }
1707
1708 /// Count the number of trailing one bits.
1709 ///
1710 /// This function is an APInt version of the countTrailingOnes
1711 /// functions in MathExtras.h. It counts the number of ones from the least
1712 /// significant bit to the first zero bit.
1713 ///
1714 /// \returns BitWidth if the value is all ones, otherwise returns the number
1715 /// of ones from the least significant bit to the first zero bit.
1716 unsigned countTrailingOnes() const {
1717 if (isSingleWord())
1718 return llvm::countTrailingOnes(U.VAL);
1719 return countTrailingOnesSlowCase();
1720 }
1721
1722 /// Count the number of bits set.
1723 ///
1724 /// This function is an APInt version of the countPopulation functions
1725 /// in MathExtras.h. It counts the number of 1 bits in the APInt value.
1726 ///
1727 /// \returns 0 if the value is zero, otherwise returns the number of set bits.
1728 unsigned countPopulation() const {
1729 if (isSingleWord())
1730 return llvm::countPopulation(U.VAL);
1731 return countPopulationSlowCase();
1732 }
1733
1734 /// @}
1735 /// \name Conversion Functions
1736 /// @{
1737 void print(raw_ostream &OS, bool isSigned) const;
1738
1739 /// Converts an APInt to a string and append it to Str. Str is commonly a
1740 /// SmallString.
1741 void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed,
1742 bool formatAsCLiteral = false) const;
1743
1744 /// Considers the APInt to be unsigned and converts it into a string in the
1745 /// radix given. The radix can be 2, 8, 10 16, or 36.
1746 void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
1747 toString(Str, Radix, false, false);
1748 }
1749
1750 /// Considers the APInt to be signed and converts it into a string in the
1751 /// radix given. The radix can be 2, 8, 10, 16, or 36.
1752 void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
1753 toString(Str, Radix, true, false);
1754 }
1755
1756 /// \returns a byte-swapped representation of this APInt Value.
1757 APInt byteSwap() const;
1758
1759 /// \returns the value with the bit representation reversed of this APInt
1760 /// Value.
1761 APInt reverseBits() const;
1762
1763 /// Converts this APInt to a double value.
1764 double roundToDouble(bool isSigned) const;
1765
1766 /// Converts this unsigned APInt to a double value.
1767 double roundToDouble() const { return roundToDouble(false); }
1768
1769 /// Converts this signed APInt to a double value.
1770 double signedRoundToDouble() const { return roundToDouble(true); }
1771
1772 /// Converts APInt bits to a double
1773 ///
1774 /// The conversion does not do a translation from integer to double, it just
1775 /// re-interprets the bits as a double. Note that it is valid to do this on
1776 /// any bit width. Exactly 64 bits will be translated.
1777 double bitsToDouble() const {
1778 return BitsToDouble(getWord(0));
1779 }
1780
1781 /// Converts APInt bits to a float
1782 ///
1783 /// The conversion does not do a translation from integer to float, it just
1784 /// re-interprets the bits as a float. Note that it is valid to do this on
1785 /// any bit width. Exactly 32 bits will be translated.
1786 float bitsToFloat() const {
1787 return BitsToFloat(static_cast<uint32_t>(getWord(0)));
1788 }
1789
1790 /// Converts a double to APInt bits.
1791 ///
1792 /// The conversion does not do a translation from double to integer, it just
1793 /// re-interprets the bits of the double.
1794 static APInt doubleToBits(double V) {
1795 return APInt(sizeof(double) * CHAR_BIT8, DoubleToBits(V));
1796 }
1797
1798 /// Converts a float to APInt bits.
1799 ///
1800 /// The conversion does not do a translation from float to integer, it just
1801 /// re-interprets the bits of the float.
1802 static APInt floatToBits(float V) {
1803 return APInt(sizeof(float) * CHAR_BIT8, FloatToBits(V));
1804 }
1805
1806 /// @}
1807 /// \name Mathematics Operations
1808 /// @{
1809
1810 /// \returns the floor log base 2 of this APInt.
1811 unsigned logBase2() const { return getActiveBits() - 1; }
1812
1813 /// \returns the ceil log base 2 of this APInt.
1814 unsigned ceilLogBase2() const {
1815 APInt temp(*this);
1816 --temp;
1817 return temp.getActiveBits();
1818 }
1819
1820 /// \returns the nearest log base 2 of this APInt. Ties round up.
1821 ///
1822 /// NOTE: When we have a BitWidth of 1, we define:
1823 ///
1824 /// log2(0) = UINT32_MAX
1825 /// log2(1) = 0
1826 ///
1827 /// to get around any mathematical concerns resulting from
1828 /// referencing 2 in a space where 2 does no exist.
1829 unsigned nearestLogBase2() const {
1830 // Special case when we have a bitwidth of 1. If VAL is 1, then we
1831 // get 0. If VAL is 0, we get WORDTYPE_MAX which gets truncated to
1832 // UINT32_MAX.
1833 if (BitWidth == 1)
1834 return U.VAL - 1;
1835
1836 // Handle the zero case.
1837 if (isNullValue())
1838 return UINT32_MAX0xffffffffU;
1839
1840 // The non-zero case is handled by computing:
1841 //
1842 // nearestLogBase2(x) = logBase2(x) + x[logBase2(x)-1].
1843 //
1844 // where x[i] is referring to the value of the ith bit of x.
1845 unsigned lg = logBase2();
1846 return lg + unsigned((*this)[lg - 1]);
1847 }
1848
1849 /// \returns the log base 2 of this APInt if its an exact power of two, -1
1850 /// otherwise
1851 int32_t exactLogBase2() const {
1852 if (!isPowerOf2())
1853 return -1;
1854 return logBase2();
1855 }
1856
1857 /// Compute the square root
1858 APInt sqrt() const;
1859
1860 /// Get the absolute value;
1861 ///
1862 /// If *this is < 0 then return -(*this), otherwise *this;
1863 APInt abs() const {
1864 if (isNegative())
1865 return -(*this);
1866 return *this;
1867 }
1868
1869 /// \returns the multiplicative inverse for a given modulo.
1870 APInt multiplicativeInverse(const APInt &modulo) const;
1871
1872 /// @}
1873 /// \name Support for division by constant
1874 /// @{
1875
1876 /// Calculate the magic number for signed division by a constant.
1877 struct ms;
1878 ms magic() const;
1879
1880 /// Calculate the magic number for unsigned division by a constant.
1881 struct mu;
1882 mu magicu(unsigned LeadingZeros = 0) const;
1883
1884 /// @}
1885 /// \name Building-block Operations for APInt and APFloat
1886 /// @{
1887
1888 // These building block operations operate on a representation of arbitrary
1889 // precision, two's-complement, bignum integer values. They should be
1890 // sufficient to implement APInt and APFloat bignum requirements. Inputs are
1891 // generally a pointer to the base of an array of integer parts, representing
1892 // an unsigned bignum, and a count of how many parts there are.
1893
1894 /// Sets the least significant part of a bignum to the input value, and zeroes
1895 /// out higher parts.
1896 static void tcSet(WordType *, WordType, unsigned);
1897
1898 /// Assign one bignum to another.
1899 static void tcAssign(WordType *, const WordType *, unsigned);
1900
1901 /// Returns true if a bignum is zero, false otherwise.
1902 static bool tcIsZero(const WordType *, unsigned);
1903
1904 /// Extract the given bit of a bignum; returns 0 or 1. Zero-based.
1905 static int tcExtractBit(const WordType *, unsigned bit);
1906
1907 /// Copy the bit vector of width srcBITS from SRC, starting at bit srcLSB, to
1908 /// DST, of dstCOUNT parts, such that the bit srcLSB becomes the least
1909 /// significant bit of DST. All high bits above srcBITS in DST are
1910 /// zero-filled.
1911 static void tcExtract(WordType *, unsigned dstCount,
1912 const WordType *, unsigned srcBits,
1913 unsigned srcLSB);
1914
1915 /// Set the given bit of a bignum. Zero-based.
1916 static void tcSetBit(WordType *, unsigned bit);
1917
1918 /// Clear the given bit of a bignum. Zero-based.
1919 static void tcClearBit(WordType *, unsigned bit);
1920
1921 /// Returns the bit number of the least or most significant set bit of a
1922 /// number. If the input number has no bits set -1U is returned.
1923 static unsigned tcLSB(const WordType *, unsigned n);
1924 static unsigned tcMSB(const WordType *parts, unsigned n);
1925
1926 /// Negate a bignum in-place.
1927 static void tcNegate(WordType *, unsigned);
1928
1929 /// DST += RHS + CARRY where CARRY is zero or one. Returns the carry flag.
1930 static WordType tcAdd(WordType *, const WordType *,
1931 WordType carry, unsigned);
1932 /// DST += RHS. Returns the carry flag.
1933 static WordType tcAddPart(WordType *, WordType, unsigned);
1934
1935 /// DST -= RHS + CARRY where CARRY is zero or one. Returns the carry flag.
1936 static WordType tcSubtract(WordType *, const WordType *,
1937 WordType carry, unsigned);
1938 /// DST -= RHS. Returns the carry flag.
1939 static WordType tcSubtractPart(WordType *, WordType, unsigned);
1940
1941 /// DST += SRC * MULTIPLIER + PART if add is true
1942 /// DST = SRC * MULTIPLIER + PART if add is false
1943 ///
1944 /// Requires 0 <= DSTPARTS <= SRCPARTS + 1. If DST overlaps SRC they must
1945 /// start at the same point, i.e. DST == SRC.
1946 ///
1947 /// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is returned.
1948 /// Otherwise DST is filled with the least significant DSTPARTS parts of the
1949 /// result, and if all of the omitted higher parts were zero return zero,
1950 /// otherwise overflow occurred and return one.
1951 static int tcMultiplyPart(WordType *dst, const WordType *src,
1952 WordType multiplier, WordType carry,
1953 unsigned srcParts, unsigned dstParts,
1954 bool add);
1955
1956 /// DST = LHS * RHS, where DST has the same width as the operands and is
1957 /// filled with the least significant parts of the result. Returns one if
1958 /// overflow occurred, otherwise zero. DST must be disjoint from both
1959 /// operands.
1960 static int tcMultiply(WordType *, const WordType *, const WordType *,
1961 unsigned);
1962
1963 /// DST = LHS * RHS, where DST has width the sum of the widths of the
1964 /// operands. No overflow occurs. DST must be disjoint from both operands.
1965 static void tcFullMultiply(WordType *, const WordType *,
1966 const WordType *, unsigned, unsigned);
1967
1968 /// If RHS is zero LHS and REMAINDER are left unchanged, return one.
1969 /// Otherwise set LHS to LHS / RHS with the fractional part discarded, set
1970 /// REMAINDER to the remainder, return zero. i.e.
1971 ///
1972 /// OLD_LHS = RHS * LHS + REMAINDER
1973 ///
1974 /// SCRATCH is a bignum of the same size as the operands and result for use by
1975 /// the routine; its contents need not be initialized and are destroyed. LHS,
1976 /// REMAINDER and SCRATCH must be distinct.
1977 static int tcDivide(WordType *lhs, const WordType *rhs,
1978 WordType *remainder, WordType *scratch,
1979 unsigned parts);
1980
1981 /// Shift a bignum left Count bits. Shifted in bits are zero. There are no
1982 /// restrictions on Count.
1983 static void tcShiftLeft(WordType *, unsigned Words, unsigned Count);
1984
1985 /// Shift a bignum right Count bits. Shifted in bits are zero. There are no
1986 /// restrictions on Count.
1987 static void tcShiftRight(WordType *, unsigned Words, unsigned Count);
1988
1989 /// The obvious AND, OR and XOR and complement operations.
1990 static void tcAnd(WordType *, const WordType *, unsigned);
1991 static void tcOr(WordType *, const WordType *, unsigned);
1992 static void tcXor(WordType *, const WordType *, unsigned);
1993 static void tcComplement(WordType *, unsigned);
1994
1995 /// Comparison (unsigned) of two bignums.
1996 static int tcCompare(const WordType *, const WordType *, unsigned);
1997
1998 /// Increment a bignum in-place. Return the carry flag.
1999 static WordType tcIncrement(WordType *dst, unsigned parts) {
2000 return tcAddPart(dst, 1, parts);
2001 }
2002
2003 /// Decrement a bignum in-place. Return the borrow flag.
2004 static WordType tcDecrement(WordType *dst, unsigned parts) {
2005 return tcSubtractPart(dst, 1, parts);
2006 }
2007
2008 /// Set the least significant BITS and clear the rest.
2009 static void tcSetLeastSignificantBits(WordType *, unsigned, unsigned bits);
2010
2011 /// debug method
2012 void dump() const;
2013
2014 /// @}
2015};
2016
2017/// Magic data for optimising signed division by a constant.
2018struct APInt::ms {
2019 APInt m; ///< magic number
2020 unsigned s; ///< shift amount
2021};
2022
2023/// Magic data for optimising unsigned division by a constant.
2024struct APInt::mu {
2025 APInt m; ///< magic number
2026 bool a; ///< add indicator
2027 unsigned s; ///< shift amount
2028};
2029
2030inline bool operator==(uint64_t V1, const APInt &V2) { return V2 == V1; }
2031
2032inline bool operator!=(uint64_t V1, const APInt &V2) { return V2 != V1; }
2033
2034/// Unary bitwise complement operator.
2035///
2036/// \returns an APInt that is the bitwise complement of \p v.
2037inline APInt operator~(APInt v) {
2038 v.flipAllBits();
2039 return v;
2040}
2041
2042inline APInt operator&(APInt a, const APInt &b) {
2043 a &= b;
2044 return a;
2045}
2046
2047inline APInt operator&(const APInt &a, APInt &&b) {
2048 b &= a;
2049 return std::move(b);
2050}
2051
2052inline APInt operator&(APInt a, uint64_t RHS) {
2053 a &= RHS;
2054 return a;
2055}
2056
2057inline APInt operator&(uint64_t LHS, APInt b) {
2058 b &= LHS;
2059 return b;
2060}
2061
2062inline APInt operator|(APInt a, const APInt &b) {
2063 a |= b;
2064 return a;
2065}
2066
2067inline APInt operator|(const APInt &a, APInt &&b) {
2068 b |= a;
2069 return std::move(b);
2070}
2071
2072inline APInt operator|(APInt a, uint64_t RHS) {
2073 a |= RHS;
2074 return a;
2075}
2076
2077inline APInt operator|(uint64_t LHS, APInt b) {
2078 b |= LHS;
2079 return b;
2080}
2081
2082inline APInt operator^(APInt a, const APInt &b) {
2083 a ^= b;
2084 return a;
2085}
2086
2087inline APInt operator^(const APInt &a, APInt &&b) {
2088 b ^= a;
2089 return std::move(b);
2090}
2091
2092inline APInt operator^(APInt a, uint64_t RHS) {
2093 a ^= RHS;
2094 return a;
2095}
2096
2097inline APInt operator^(uint64_t LHS, APInt b) {
2098 b ^= LHS;
2099 return b;
2100}
2101
2102inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) {
2103 I.print(OS, true);
2104 return OS;
2105}
2106
2107inline APInt operator-(APInt v) {
2108 v.negate();
2109 return v;
2110}
2111
2112inline APInt operator+(APInt a, const APInt &b) {
2113 a += b;
2114 return a;
2115}
2116
2117inline APInt operator+(const APInt &a, APInt &&b) {
2118 b += a;
2119 return std::move(b);
2120}
2121
2122inline APInt operator+(APInt a, uint64_t RHS) {
2123 a += RHS;
2124 return a;
2125}
2126
2127inline APInt operator+(uint64_t LHS, APInt b) {
2128 b += LHS;
2129 return b;
2130}
2131
2132inline APInt operator-(APInt a, const APInt &b) {
2133 a -= b;
2134 return a;
2135}
2136
2137inline APInt operator-(const APInt &a, APInt &&b) {
2138 b.negate();
2139 b += a;
2140 return std::move(b);
2141}
2142
2143inline APInt operator-(APInt a, uint64_t RHS) {
2144 a -= RHS;
2145 return a;
2146}
2147
2148inline APInt operator-(uint64_t LHS, APInt b) {
2149 b.negate();
2150 b += LHS;
2151 return b;
2152}
2153
2154inline APInt operator*(APInt a, uint64_t RHS) {
2155 a *= RHS;
2156 return a;
2157}
2158
2159inline APInt operator*(uint64_t LHS, APInt b) {
2160 b *= LHS;
2161 return b;
2162}
2163
2164
2165namespace APIntOps {
2166
2167/// Determine the smaller of two APInts considered to be signed.
2168inline const APInt &smin(const APInt &A, const APInt &B) {
2169 return A.slt(B) ? A : B;
2170}
2171
2172/// Determine the larger of two APInts considered to be signed.
2173inline const APInt &smax(const APInt &A, const APInt &B) {
2174 return A.sgt(B) ? A : B;
2175}
2176
2177/// Determine the smaller of two APInts considered to be unsigned.
2178inline const APInt &umin(const APInt &A, const APInt &B) {
2179 return A.ult(B) ? A : B;
2180}
2181
2182/// Determine the larger of two APInts considered to be unsigned.
2183inline const APInt &umax(const APInt &A, const APInt &B) {
2184 return A.ugt(B) ? A : B;
2185}
2186
2187/// Compute GCD of two unsigned APInt values.
2188///
2189/// This function returns the greatest common divisor of the two APInt values
2190/// using Stein's algorithm.
2191///
2192/// \returns the greatest common divisor of A and B.
2193APInt GreatestCommonDivisor(APInt A, APInt B);
2194
2195/// Converts the given APInt to a double value.
2196///
2197/// Treats the APInt as an unsigned value for conversion purposes.
2198inline double RoundAPIntToDouble(const APInt &APIVal) {
2199 return APIVal.roundToDouble();
2200}
2201
2202/// Converts the given APInt to a double value.
2203///
2204/// Treats the APInt as a signed value for conversion purposes.
2205inline double RoundSignedAPIntToDouble(const APInt &APIVal) {
2206 return APIVal.signedRoundToDouble();
2207}
2208
2209/// Converts the given APInt to a float value.
2210inline float RoundAPIntToFloat(const APInt &APIVal) {
2211 return float(RoundAPIntToDouble(APIVal));
2212}
2213
2214/// Converts the given APInt to a float value.
2215///
2216/// Treats the APInt as a signed value for conversion purposes.
2217inline float RoundSignedAPIntToFloat(const APInt &APIVal) {
2218 return float(APIVal.signedRoundToDouble());
2219}
2220
2221/// Converts the given double value into a APInt.
2222///
2223/// This function convert a double value to an APInt value.
2224APInt RoundDoubleToAPInt(double Double, unsigned width);
2225
2226/// Converts a float value into a APInt.
2227///
2228/// Converts a float value into an APInt value.
2229inline APInt RoundFloatToAPInt(float Float, unsigned width) {
2230 return RoundDoubleToAPInt(double(Float), width);
2231}
2232
2233/// Return A unsign-divided by B, rounded by the given rounding mode.
2234APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM);
2235
2236/// Return A sign-divided by B, rounded by the given rounding mode.
2237APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM);
2238
2239/// Let q(n) = An^2 + Bn + C, and BW = bit width of the value range
2240/// (e.g. 32 for i32).
2241/// This function finds the smallest number n, such that
2242/// (a) n >= 0 and q(n) = 0, or
2243/// (b) n >= 1 and q(n-1) and q(n), when evaluated in the set of all
2244/// integers, belong to two different intervals [Rk, Rk+R),
2245/// where R = 2^BW, and k is an integer.
2246/// The idea here is to find when q(n) "overflows" 2^BW, while at the
2247/// same time "allowing" subtraction. In unsigned modulo arithmetic a
2248/// subtraction (treated as addition of negated numbers) would always
2249/// count as an overflow, but here we want to allow values to decrease
2250/// and increase as long as they are within the same interval.
2251/// Specifically, adding of two negative numbers should not cause an
2252/// overflow (as long as the magnitude does not exceed the bit width).
2253/// On the other hand, given a positive number, adding a negative
2254/// number to it can give a negative result, which would cause the
2255/// value to go from [-2^BW, 0) to [0, 2^BW). In that sense, zero is
2256/// treated as a special case of an overflow.
2257///
2258/// This function returns None if after finding k that minimizes the
2259/// positive solution to q(n) = kR, both solutions are contained between
2260/// two consecutive integers.
2261///
2262/// There are cases where q(n) > T, and q(n+1) < T (assuming evaluation
2263/// in arithmetic modulo 2^BW, and treating the values as signed) by the
2264/// virtue of *signed* overflow. This function will *not* find such an n,
2265/// however it may find a value of n satisfying the inequalities due to
2266/// an *unsigned* overflow (if the values are treated as unsigned).
2267/// To find a solution for a signed overflow, treat it as a problem of
2268/// finding an unsigned overflow with a range with of BW-1.
2269///
2270/// The returned value may have a different bit width from the input
2271/// coefficients.
2272Optional<APInt> SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
2273 unsigned RangeWidth);
2274
2275/// Compare two values, and if they are different, return the position of the
2276/// most significant bit that is different in the values.
2277Optional<unsigned> GetMostSignificantDifferentBit(const APInt &A,
2278 const APInt &B);
2279
2280} // End of APIntOps namespace
2281
2282// See friend declaration above. This additional declaration is required in
2283// order to compile LLVM with IBM xlC compiler.
2284hash_code hash_value(const APInt &Arg);
2285
2286/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
2287/// with the integer held in IntVal.
2288void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes);
2289
2290/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
2291/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
2292void LoadIntFromMemory(APInt &IntVal, const uint8_t *Src, unsigned LoadBytes);
2293
2294/// Provide DenseMapInfo for APInt.
2295template <> struct DenseMapInfo<APInt> {
2296 static inline APInt getEmptyKey() {
2297 APInt V(nullptr, 0);
2298 V.U.VAL = 0;
2299 return V;
2300 }
2301
2302 static inline APInt getTombstoneKey() {
2303 APInt V(nullptr, 0);
2304 V.U.VAL = 1;
2305 return V;
2306 }
2307
2308 static unsigned getHashValue(const APInt &Key);
2309
2310 static bool isEqual(const APInt &LHS, const APInt &RHS) {
2311 return LHS.getBitWidth() == RHS.getBitWidth() && LHS == RHS;
2312 }
2313};
2314
2315} // namespace llvm
2316
2317#endif

/usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/include/clang/AST/Type.h

1//===- Type.h - C Language Family Type Representation -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// C Language Family Type Representation
11///
12/// This file defines the clang::Type interface and subclasses, used to
13/// represent types for languages in the C family.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_CLANG_AST_TYPE_H
18#define LLVM_CLANG_AST_TYPE_H
19
20#include "clang/AST/DependenceFlags.h"
21#include "clang/AST/NestedNameSpecifier.h"
22#include "clang/AST/TemplateName.h"
23#include "clang/Basic/AddressSpaces.h"
24#include "clang/Basic/AttrKinds.h"
25#include "clang/Basic/Diagnostic.h"
26#include "clang/Basic/ExceptionSpecificationType.h"
27#include "clang/Basic/LLVM.h"
28#include "clang/Basic/Linkage.h"
29#include "clang/Basic/PartialDiagnostic.h"
30#include "clang/Basic/SourceLocation.h"
31#include "clang/Basic/Specifiers.h"
32#include "clang/Basic/Visibility.h"
33#include "llvm/ADT/APInt.h"
34#include "llvm/ADT/APSInt.h"
35#include "llvm/ADT/ArrayRef.h"
36#include "llvm/ADT/FoldingSet.h"
37#include "llvm/ADT/None.h"
38#include "llvm/ADT/Optional.h"
39#include "llvm/ADT/PointerIntPair.h"
40#include "llvm/ADT/PointerUnion.h"
41#include "llvm/ADT/StringRef.h"
42#include "llvm/ADT/Twine.h"
43#include "llvm/ADT/iterator_range.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/Compiler.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/PointerLikeTypeTraits.h"
48#include "llvm/Support/TrailingObjects.h"
49#include "llvm/Support/type_traits.h"
50#include <cassert>
51#include <cstddef>
52#include <cstdint>
53#include <cstring>
54#include <string>
55#include <type_traits>
56#include <utility>
57
58namespace clang {
59
60class ExtQuals;
61class QualType;
62class ConceptDecl;
63class TagDecl;
64class TemplateParameterList;
65class Type;
66
67enum {
68 TypeAlignmentInBits = 4,
69 TypeAlignment = 1 << TypeAlignmentInBits
70};
71
72namespace serialization {
73 template <class T> class AbstractTypeReader;
74 template <class T> class AbstractTypeWriter;
75}
76
77} // namespace clang
78
79namespace llvm {
80
81 template <typename T>
82 struct PointerLikeTypeTraits;
83 template<>
84 struct PointerLikeTypeTraits< ::clang::Type*> {
85 static inline void *getAsVoidPointer(::clang::Type *P) { return P; }
86
87 static inline ::clang::Type *getFromVoidPointer(void *P) {
88 return static_cast< ::clang::Type*>(P);
89 }
90
91 static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
92 };
93
94 template<>
95 struct PointerLikeTypeTraits< ::clang::ExtQuals*> {
96 static inline void *getAsVoidPointer(::clang::ExtQuals *P) { return P; }
97
98 static inline ::clang::ExtQuals *getFromVoidPointer(void *P) {
99 return static_cast< ::clang::ExtQuals*>(P);
100 }
101
102 static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
103 };
104
105} // namespace llvm
106
107namespace clang {
108
109class ASTContext;
110template <typename> class CanQual;
111class CXXRecordDecl;
112class DeclContext;
113class EnumDecl;
114class Expr;
115class ExtQualsTypeCommonBase;
116class FunctionDecl;
117class IdentifierInfo;
118class NamedDecl;
119class ObjCInterfaceDecl;
120class ObjCProtocolDecl;
121class ObjCTypeParamDecl;
122struct PrintingPolicy;
123class RecordDecl;
124class Stmt;
125class TagDecl;
126class TemplateArgument;
127class TemplateArgumentListInfo;
128class TemplateArgumentLoc;
129class TemplateTypeParmDecl;
130class TypedefNameDecl;
131class UnresolvedUsingTypenameDecl;
132
133using CanQualType = CanQual<Type>;
134
135// Provide forward declarations for all of the *Type classes.
136#define TYPE(Class, Base) class Class##Type;
137#include "clang/AST/TypeNodes.inc"
138
139/// The collection of all-type qualifiers we support.
140/// Clang supports five independent qualifiers:
141/// * C99: const, volatile, and restrict
142/// * MS: __unaligned
143/// * Embedded C (TR18037): address spaces
144/// * Objective C: the GC attributes (none, weak, or strong)
145class Qualifiers {
146public:
147 enum TQ { // NOTE: These flags must be kept in sync with DeclSpec::TQ.
148 Const = 0x1,
149 Restrict = 0x2,
150 Volatile = 0x4,
151 CVRMask = Const | Volatile | Restrict
152 };
153
154 enum GC {
155 GCNone = 0,
156 Weak,
157 Strong
158 };
159
160 enum ObjCLifetime {
161 /// There is no lifetime qualification on this type.
162 OCL_None,
163
164 /// This object can be modified without requiring retains or
165 /// releases.
166 OCL_ExplicitNone,
167
168 /// Assigning into this object requires the old value to be
169 /// released and the new value to be retained. The timing of the
170 /// release of the old value is inexact: it may be moved to
171 /// immediately after the last known point where the value is
172 /// live.
173 OCL_Strong,
174
175 /// Reading or writing from this object requires a barrier call.
176 OCL_Weak,
177
178 /// Assigning into this object requires a lifetime extension.
179 OCL_Autoreleasing
180 };
181
182 enum {
183 /// The maximum supported address space number.
184 /// 23 bits should be enough for anyone.
185 MaxAddressSpace = 0x7fffffu,
186
187 /// The width of the "fast" qualifier mask.
188 FastWidth = 3,
189
190 /// The fast qualifier mask.
191 FastMask = (1 << FastWidth) - 1
192 };
193
194 /// Returns the common set of qualifiers while removing them from
195 /// the given sets.
196 static Qualifiers removeCommonQualifiers(Qualifiers &L, Qualifiers &R) {
197 // If both are only CVR-qualified, bit operations are sufficient.
198 if (!(L.Mask & ~CVRMask) && !(R.Mask & ~CVRMask)) {
199 Qualifiers Q;
200 Q.Mask = L.Mask & R.Mask;
201 L.Mask &= ~Q.Mask;
202 R.Mask &= ~Q.Mask;
203 return Q;
204 }
205
206 Qualifiers Q;
207 unsigned CommonCRV = L.getCVRQualifiers() & R.getCVRQualifiers();
208 Q.addCVRQualifiers(CommonCRV);
209 L.removeCVRQualifiers(CommonCRV);
210 R.removeCVRQualifiers(CommonCRV);
211
212 if (L.getObjCGCAttr() == R.getObjCGCAttr()) {
213 Q.setObjCGCAttr(L.getObjCGCAttr());
214 L.removeObjCGCAttr();
215 R.removeObjCGCAttr();
216 }
217
218 if (L.getObjCLifetime() == R.getObjCLifetime()) {
219 Q.setObjCLifetime(L.getObjCLifetime());
220 L.removeObjCLifetime();
221 R.removeObjCLifetime();
222 }
223
224 if (L.getAddressSpace() == R.getAddressSpace()) {
225 Q.setAddressSpace(L.getAddressSpace());
226 L.removeAddressSpace();
227 R.removeAddressSpace();
228 }
229 return Q;
230 }
231
232 static Qualifiers fromFastMask(unsigned Mask) {
233 Qualifiers Qs;
234 Qs.addFastQualifiers(Mask);
235 return Qs;
236 }
237
238 static Qualifiers fromCVRMask(unsigned CVR) {
239 Qualifiers Qs;
240 Qs.addCVRQualifiers(CVR);
241 return Qs;
242 }
243
244 static Qualifiers fromCVRUMask(unsigned CVRU) {
245 Qualifiers Qs;
246 Qs.addCVRUQualifiers(CVRU);
247 return Qs;
248 }
249
250 // Deserialize qualifiers from an opaque representation.
251 static Qualifiers fromOpaqueValue(unsigned opaque) {
252 Qualifiers Qs;
253 Qs.Mask = opaque;
254 return Qs;
255 }
256
257 // Serialize these qualifiers into an opaque representation.
258 unsigned getAsOpaqueValue() const {
259 return Mask;
260 }
261
262 bool hasConst() const { return Mask & Const; }
263 bool hasOnlyConst() const { return Mask == Const; }
264 void removeConst() { Mask &= ~Const; }
265 void addConst() { Mask |= Const; }
266
267 bool hasVolatile() const { return Mask & Volatile; }
268 bool hasOnlyVolatile() const { return Mask == Volatile; }
269 void removeVolatile() { Mask &= ~Volatile; }
270 void addVolatile() { Mask |= Volatile; }
271
272 bool hasRestrict() const { return Mask & Restrict; }
273 bool hasOnlyRestrict() const { return Mask == Restrict; }
274 void removeRestrict() { Mask &= ~Restrict; }
275 void addRestrict() { Mask |= Restrict; }
276
277 bool hasCVRQualifiers() const { return getCVRQualifiers(); }
278 unsigned getCVRQualifiers() const { return Mask & CVRMask; }
279 unsigned getCVRUQualifiers() const { return Mask & (CVRMask | UMask); }
280
281 void setCVRQualifiers(unsigned mask) {
282 assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits")((void)0);
283 Mask = (Mask & ~CVRMask) | mask;
284 }
285 void removeCVRQualifiers(unsigned mask) {
286 assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits")((void)0);
287 Mask &= ~mask;
288 }
289 void removeCVRQualifiers() {
290 removeCVRQualifiers(CVRMask);
291 }
292 void addCVRQualifiers(unsigned mask) {
293 assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits")((void)0);
294 Mask |= mask;
295 }
296 void addCVRUQualifiers(unsigned mask) {
297 assert(!(mask & ~CVRMask & ~UMask) && "bitmask contains non-CVRU bits")((void)0);
298 Mask |= mask;
299 }
300
301 bool hasUnaligned() const { return Mask & UMask; }
302 void setUnaligned(bool flag) {
303 Mask = (Mask & ~UMask) | (flag ? UMask : 0);
304 }
305 void removeUnaligned() { Mask &= ~UMask; }
306 void addUnaligned() { Mask |= UMask; }
307
308 bool hasObjCGCAttr() const { return Mask & GCAttrMask; }
309 GC getObjCGCAttr() const { return GC((Mask & GCAttrMask) >> GCAttrShift); }
310 void setObjCGCAttr(GC type) {
311 Mask = (Mask & ~GCAttrMask) | (type << GCAttrShift);
312 }
313 void removeObjCGCAttr() { setObjCGCAttr(GCNone); }
314 void addObjCGCAttr(GC type) {
315 assert(type)((void)0);
316 setObjCGCAttr(type);
317 }
318 Qualifiers withoutObjCGCAttr() const {
319 Qualifiers qs = *this;
320 qs.removeObjCGCAttr();
321 return qs;
322 }
323 Qualifiers withoutObjCLifetime() const {
324 Qualifiers qs = *this;
325 qs.removeObjCLifetime();
326 return qs;
327 }
328 Qualifiers withoutAddressSpace() const {
329 Qualifiers qs = *this;
330 qs.removeAddressSpace();
331 return qs;
332 }
333
334 bool hasObjCLifetime() const { return Mask & LifetimeMask; }
335 ObjCLifetime getObjCLifetime() const {
336 return ObjCLifetime((Mask & LifetimeMask) >> LifetimeShift);
337 }
338 void setObjCLifetime(ObjCLifetime type) {
339 Mask = (Mask & ~LifetimeMask) | (type << LifetimeShift);
340 }
341 void removeObjCLifetime() { setObjCLifetime(OCL_None); }
342 void addObjCLifetime(ObjCLifetime type) {
343 assert(type)((void)0);
344 assert(!hasObjCLifetime())((void)0);
345 Mask |= (type << LifetimeShift);
346 }
347
348 /// True if the lifetime is neither None or ExplicitNone.
349 bool hasNonTrivialObjCLifetime() const {
350 ObjCLifetime lifetime = getObjCLifetime();
351 return (lifetime > OCL_ExplicitNone);
352 }
353
354 /// True if the lifetime is either strong or weak.
355 bool hasStrongOrWeakObjCLifetime() const {
356 ObjCLifetime lifetime = getObjCLifetime();
357 return (lifetime == OCL_Strong || lifetime == OCL_Weak);
358 }
359
360 bool hasAddressSpace() const { return Mask & AddressSpaceMask; }
361 LangAS getAddressSpace() const {
362 return static_cast<LangAS>(Mask >> AddressSpaceShift);
363 }
364 bool hasTargetSpecificAddressSpace() const {
365 return isTargetAddressSpace(getAddressSpace());
366 }
367 /// Get the address space attribute value to be printed by diagnostics.
368 unsigned getAddressSpaceAttributePrintValue() const {
369 auto Addr = getAddressSpace();
370 // This function is not supposed to be used with language specific
371 // address spaces. If that happens, the diagnostic message should consider
372 // printing the QualType instead of the address space value.
373 assert(Addr == LangAS::Default || hasTargetSpecificAddressSpace())((void)0);
374 if (Addr != LangAS::Default)
375 return toTargetAddressSpace(Addr);
376 // TODO: The diagnostic messages where Addr may be 0 should be fixed
377 // since it cannot differentiate the situation where 0 denotes the default
378 // address space or user specified __attribute__((address_space(0))).
379 return 0;
380 }
381 void setAddressSpace(LangAS space) {
382 assert((unsigned)space <= MaxAddressSpace)((void)0);
383 Mask = (Mask & ~AddressSpaceMask)
384 | (((uint32_t) space) << AddressSpaceShift);
385 }
386 void removeAddressSpace() { setAddressSpace(LangAS::Default); }
387 void addAddressSpace(LangAS space) {
388 assert(space != LangAS::Default)((void)0);
389 setAddressSpace(space);
390 }
391
392 // Fast qualifiers are those that can be allocated directly
393 // on a QualType object.
394 bool hasFastQualifiers() const { return getFastQualifiers(); }
395 unsigned getFastQualifiers() const { return Mask & FastMask; }
396 void setFastQualifiers(unsigned mask) {
397 assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits")((void)0);
398 Mask = (Mask & ~FastMask) | mask;
399 }
400 void removeFastQualifiers(unsigned mask) {
401 assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits")((void)0);
402 Mask &= ~mask;
403 }
404 void removeFastQualifiers() {
405 removeFastQualifiers(FastMask);
406 }
407 void addFastQualifiers(unsigned mask) {
408 assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits")((void)0);
409 Mask |= mask;
410 }
411
412 /// Return true if the set contains any qualifiers which require an ExtQuals
413 /// node to be allocated.
414 bool hasNonFastQualifiers() const { return Mask & ~FastMask; }
415 Qualifiers getNonFastQualifiers() const {
416 Qualifiers Quals = *this;
417 Quals.setFastQualifiers(0);
418 return Quals;
419 }
420
421 /// Return true if the set contains any qualifiers.
422 bool hasQualifiers() const { return Mask; }
423 bool empty() const { return !Mask; }
424
425 /// Add the qualifiers from the given set to this set.
426 void addQualifiers(Qualifiers Q) {
427 // If the other set doesn't have any non-boolean qualifiers, just
428 // bit-or it in.
429 if (!(Q.Mask & ~CVRMask))
430 Mask |= Q.Mask;
431 else {
432 Mask |= (Q.Mask & CVRMask);
433 if (Q.hasAddressSpace())
434 addAddressSpace(Q.getAddressSpace());
435 if (Q.hasObjCGCAttr())
436 addObjCGCAttr(Q.getObjCGCAttr());
437 if (Q.hasObjCLifetime())
438 addObjCLifetime(Q.getObjCLifetime());
439 }
440 }
441
442 /// Remove the qualifiers from the given set from this set.
443 void removeQualifiers(Qualifiers Q) {
444 // If the other set doesn't have any non-boolean qualifiers, just
445 // bit-and the inverse in.
446 if (!(Q.Mask & ~CVRMask))
447 Mask &= ~Q.Mask;
448 else {
449 Mask &= ~(Q.Mask & CVRMask);
450 if (getObjCGCAttr() == Q.getObjCGCAttr())
451 removeObjCGCAttr();
452 if (getObjCLifetime() == Q.getObjCLifetime())
453 removeObjCLifetime();
454 if (getAddressSpace() == Q.getAddressSpace())
455 removeAddressSpace();
456 }
457 }
458
459 /// Add the qualifiers from the given set to this set, given that
460 /// they don't conflict.
461 void addConsistentQualifiers(Qualifiers qs) {
462 assert(getAddressSpace() == qs.getAddressSpace() ||((void)0)
463 !hasAddressSpace() || !qs.hasAddressSpace())((void)0);
464 assert(getObjCGCAttr() == qs.getObjCGCAttr() ||((void)0)
465 !hasObjCGCAttr() || !qs.hasObjCGCAttr())((void)0);
466 assert(getObjCLifetime() == qs.getObjCLifetime() ||((void)0)
467 !hasObjCLifetime() || !qs.hasObjCLifetime())((void)0);
468 Mask |= qs.Mask;
469 }
470
471 /// Returns true if address space A is equal to or a superset of B.
472 /// OpenCL v2.0 defines conversion rules (OpenCLC v2.0 s6.5.5) and notion of
473 /// overlapping address spaces.
474 /// CL1.1 or CL1.2:
475 /// every address space is a superset of itself.
476 /// CL2.0 adds:
477 /// __generic is a superset of any address space except for __constant.
478 static bool isAddressSpaceSupersetOf(LangAS A, LangAS B) {
479 // Address spaces must match exactly.
480 return A == B ||
481 // Otherwise in OpenCLC v2.0 s6.5.5: every address space except
482 // for __constant can be used as __generic.
483 (A == LangAS::opencl_generic && B != LangAS::opencl_constant) ||
484 // We also define global_device and global_host address spaces,
485 // to distinguish global pointers allocated on host from pointers
486 // allocated on device, which are a subset of __global.
487 (A == LangAS::opencl_global && (B == LangAS::opencl_global_device ||
488 B == LangAS::opencl_global_host)) ||
489 (A == LangAS::sycl_global && (B == LangAS::sycl_global_device ||
490 B == LangAS::sycl_global_host)) ||
491 // Consider pointer size address spaces to be equivalent to default.
492 ((isPtrSizeAddressSpace(A) || A == LangAS::Default) &&
493 (isPtrSizeAddressSpace(B) || B == LangAS::Default)) ||
494 // Default is a superset of SYCL address spaces.
495 (A == LangAS::Default &&
496 (B == LangAS::sycl_private || B == LangAS::sycl_local ||
497 B == LangAS::sycl_global || B == LangAS::sycl_global_device ||
498 B == LangAS::sycl_global_host));
499 }
500
501 /// Returns true if the address space in these qualifiers is equal to or
502 /// a superset of the address space in the argument qualifiers.
503 bool isAddressSpaceSupersetOf(Qualifiers other) const {
504 return isAddressSpaceSupersetOf(getAddressSpace(), other.getAddressSpace());
505 }
506
507 /// Determines if these qualifiers compatibly include another set.
508 /// Generally this answers the question of whether an object with the other
509 /// qualifiers can be safely used as an object with these qualifiers.
510 bool compatiblyIncludes(Qualifiers other) const {
511 return isAddressSpaceSupersetOf(other) &&
512 // ObjC GC qualifiers can match, be added, or be removed, but can't
513 // be changed.
514 (getObjCGCAttr() == other.getObjCGCAttr() || !hasObjCGCAttr() ||
515 !other.hasObjCGCAttr()) &&
516 // ObjC lifetime qualifiers must match exactly.
517 getObjCLifetime() == other.getObjCLifetime() &&
518 // CVR qualifiers may subset.
519 (((Mask & CVRMask) | (other.Mask & CVRMask)) == (Mask & CVRMask)) &&
520 // U qualifier may superset.
521 (!other.hasUnaligned() || hasUnaligned());
522 }
523
524 /// Determines if these qualifiers compatibly include another set of
525 /// qualifiers from the narrow perspective of Objective-C ARC lifetime.
526 ///
527 /// One set of Objective-C lifetime qualifiers compatibly includes the other
528 /// if the lifetime qualifiers match, or if both are non-__weak and the
529 /// including set also contains the 'const' qualifier, or both are non-__weak
530 /// and one is None (which can only happen in non-ARC modes).
531 bool compatiblyIncludesObjCLifetime(Qualifiers other) const {
532 if (getObjCLifetime() == other.getObjCLifetime())
533 return true;
534
535 if (getObjCLifetime() == OCL_Weak || other.getObjCLifetime() == OCL_Weak)
536 return false;
537
538 if (getObjCLifetime() == OCL_None || other.getObjCLifetime() == OCL_None)
539 return true;
540
541 return hasConst();
542 }
543
544 /// Determine whether this set of qualifiers is a strict superset of
545 /// another set of qualifiers, not considering qualifier compatibility.
546 bool isStrictSupersetOf(Qualifiers Other) const;
547
548 bool operator==(Qualifiers Other) const { return Mask == Other.Mask; }
549 bool operator!=(Qualifiers Other) const { return Mask != Other.Mask; }
550
551 explicit operator bool() const { return hasQualifiers(); }
552
553 Qualifiers &operator+=(Qualifiers R) {
554 addQualifiers(R);
555 return *this;
556 }
557
558 // Union two qualifier sets. If an enumerated qualifier appears
559 // in both sets, use the one from the right.
560 friend Qualifiers operator+(Qualifiers L, Qualifiers R) {
561 L += R;
562 return L;
563 }
564
565 Qualifiers &operator-=(Qualifiers R) {
566 removeQualifiers(R);
567 return *this;
568 }
569
570 /// Compute the difference between two qualifier sets.
571 friend Qualifiers operator-(Qualifiers L, Qualifiers R) {
572 L -= R;
573 return L;
574 }
575
576 std::string getAsString() const;
577 std::string getAsString(const PrintingPolicy &Policy) const;
578
579 static std::string getAddrSpaceAsString(LangAS AS);
580
581 bool isEmptyWhenPrinted(const PrintingPolicy &Policy) const;
582 void print(raw_ostream &OS, const PrintingPolicy &Policy,
583 bool appendSpaceIfNonEmpty = false) const;
584
585 void Profile(llvm::FoldingSetNodeID &ID) const {
586 ID.AddInteger(Mask);
587 }
588
589private:
590 // bits: |0 1 2|3|4 .. 5|6 .. 8|9 ... 31|
591 // |C R V|U|GCAttr|Lifetime|AddressSpace|
592 uint32_t Mask = 0;
593
594 static const uint32_t UMask = 0x8;
595 static const uint32_t UShift = 3;
596 static const uint32_t GCAttrMask = 0x30;
597 static const uint32_t GCAttrShift = 4;
598 static const uint32_t LifetimeMask = 0x1C0;
599 static const uint32_t LifetimeShift = 6;
600 static const uint32_t AddressSpaceMask =
601 ~(CVRMask | UMask | GCAttrMask | LifetimeMask);
602 static const uint32_t AddressSpaceShift = 9;
603};
604
605/// A std::pair-like structure for storing a qualified type split
606/// into its local qualifiers and its locally-unqualified type.
607struct SplitQualType {
608 /// The locally-unqualified type.
609 const Type *Ty = nullptr;
610
611 /// The local qualifiers.
612 Qualifiers Quals;
613
614 SplitQualType() = default;
615 SplitQualType(const Type *ty, Qualifiers qs) : Ty(ty), Quals(qs) {}
616
617 SplitQualType getSingleStepDesugaredType() const; // end of this file
618
619 // Make std::tie work.
620 std::pair<const Type *,Qualifiers> asPair() const {
621 return std::pair<const Type *, Qualifiers>(Ty, Quals);
622 }
623
624 friend bool operator==(SplitQualType a, SplitQualType b) {
625 return a.Ty == b.Ty && a.Quals == b.Quals;
626 }
627 friend bool operator!=(SplitQualType a, SplitQualType b) {
628 return a.Ty != b.Ty || a.Quals != b.Quals;
629 }
630};
631
632/// The kind of type we are substituting Objective-C type arguments into.
633///
634/// The kind of substitution affects the replacement of type parameters when
635/// no concrete type information is provided, e.g., when dealing with an
636/// unspecialized type.
637enum class ObjCSubstitutionContext {
638 /// An ordinary type.
639 Ordinary,
640
641 /// The result type of a method or function.
642 Result,
643
644 /// The parameter type of a method or function.
645 Parameter,
646
647 /// The type of a property.
648 Property,
649
650 /// The superclass of a type.
651 Superclass,
652};
653
654/// A (possibly-)qualified type.
655///
656/// For efficiency, we don't store CV-qualified types as nodes on their
657/// own: instead each reference to a type stores the qualifiers. This
658/// greatly reduces the number of nodes we need to allocate for types (for
659/// example we only need one for 'int', 'const int', 'volatile int',
660/// 'const volatile int', etc).
661///
662/// As an added efficiency bonus, instead of making this a pair, we
663/// just store the two bits we care about in the low bits of the
664/// pointer. To handle the packing/unpacking, we make QualType be a
665/// simple wrapper class that acts like a smart pointer. A third bit
666/// indicates whether there are extended qualifiers present, in which
667/// case the pointer points to a special structure.
668class QualType {
669 friend class QualifierCollector;
670
671 // Thankfully, these are efficiently composable.
672 llvm::PointerIntPair<llvm::PointerUnion<const Type *, const ExtQuals *>,
673 Qualifiers::FastWidth> Value;
674
675 const ExtQuals *getExtQualsUnsafe() const {
676 return Value.getPointer().get<const ExtQuals*>();
677 }
678
679 const Type *getTypePtrUnsafe() const {
680 return Value.getPointer().get<const Type*>();
681 }
682
683 const ExtQualsTypeCommonBase *getCommonPtr() const {
684 assert(!isNull() && "Cannot retrieve a NULL type pointer")((void)0);
685 auto CommonPtrVal = reinterpret_cast<uintptr_t>(Value.getOpaqueValue());
686 CommonPtrVal &= ~(uintptr_t)((1 << TypeAlignmentInBits) - 1);
687 return reinterpret_cast<ExtQualsTypeCommonBase*>(CommonPtrVal);
688 }
689
690public:
691 QualType() = default;
692 QualType(const Type *Ptr, unsigned Quals) : Value(Ptr, Quals) {}
693 QualType(const ExtQuals *Ptr, unsigned Quals) : Value(Ptr, Quals) {}
694
695 unsigned getLocalFastQualifiers() const { return Value.getInt(); }
696 void setLocalFastQualifiers(unsigned Quals) { Value.setInt(Quals); }
697
698 /// Retrieves a pointer to the underlying (unqualified) type.
699 ///
700 /// This function requires that the type not be NULL. If the type might be
701 /// NULL, use the (slightly less efficient) \c getTypePtrOrNull().
702 const Type *getTypePtr() const;
703
704 const Type *getTypePtrOrNull() const;
705
706 /// Retrieves a pointer to the name of the base type.
707 const IdentifierInfo *getBaseTypeIdentifier() const;
708
709 /// Divides a QualType into its unqualified type and a set of local
710 /// qualifiers.
711 SplitQualType split() const;
712
713 void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
714
715 static QualType getFromOpaquePtr(const void *Ptr) {
716 QualType T;
717 T.Value.setFromOpaqueValue(const_cast<void*>(Ptr));
718 return T;
719 }
720
721 const Type &operator*() const {
722 return *getTypePtr();
723 }
724
725 const Type *operator->() const {
726 return getTypePtr();
727 }
728
729 bool isCanonical() const;
730 bool isCanonicalAsParam() const;
731
732 /// Return true if this QualType doesn't point to a type yet.
733 bool isNull() const {
734 return Value.getPointer().isNull();
22
Calling 'PointerUnion::isNull'
25
Returning from 'PointerUnion::isNull'
26
Returning the value 1, which participates in a condition later
735 }
736
737 /// Determine whether this particular QualType instance has the
738 /// "const" qualifier set, without looking through typedefs that may have
739 /// added "const" at a different level.
740 bool isLocalConstQualified() const {
741 return (getLocalFastQualifiers() & Qualifiers::Const);
742 }
743
744 /// Determine whether this type is const-qualified.
745 bool isConstQualified() const;
746
747 /// Determine whether this particular QualType instance has the
748 /// "restrict" qualifier set, without looking through typedefs that may have
749 /// added "restrict" at a different level.
750 bool isLocalRestrictQualified() const {
751 return (getLocalFastQualifiers() & Qualifiers::Restrict);
752 }
753
754 /// Determine whether this type is restrict-qualified.
755 bool isRestrictQualified() const;
756
757 /// Determine whether this particular QualType instance has the
758 /// "volatile" qualifier set, without looking through typedefs that may have
759 /// added "volatile" at a different level.
760 bool isLocalVolatileQualified() const {
761 return (getLocalFastQualifiers() & Qualifiers::Volatile);
762 }
763
764 /// Determine whether this type is volatile-qualified.
765 bool isVolatileQualified() const;
766
767 /// Determine whether this particular QualType instance has any
768 /// qualifiers, without looking through any typedefs that might add
769 /// qualifiers at a different level.
770 bool hasLocalQualifiers() const {
771 return getLocalFastQualifiers() || hasLocalNonFastQualifiers();
772 }
773
774 /// Determine whether this type has any qualifiers.
775 bool hasQualifiers() const;
776
777 /// Determine whether this particular QualType instance has any
778 /// "non-fast" qualifiers, e.g., those that are stored in an ExtQualType
779 /// instance.
780 bool hasLocalNonFastQualifiers() const {
781 return Value.getPointer().is<const ExtQuals*>();
782 }
783
784 /// Retrieve the set of qualifiers local to this particular QualType
785 /// instance, not including any qualifiers acquired through typedefs or
786 /// other sugar.
787 Qualifiers getLocalQualifiers() const;
788
789 /// Retrieve the set of qualifiers applied to this type.
790 Qualifiers getQualifiers() const;
791
792 /// Retrieve the set of CVR (const-volatile-restrict) qualifiers
793 /// local to this particular QualType instance, not including any qualifiers
794 /// acquired through typedefs or other sugar.
795 unsigned getLocalCVRQualifiers() const {
796 return getLocalFastQualifiers();
797 }
798
799 /// Retrieve the set of CVR (const-volatile-restrict) qualifiers
800 /// applied to this type.
801 unsigned getCVRQualifiers() const;
802
803 bool isConstant(const ASTContext& Ctx) const {
804 return QualType::isConstant(*this, Ctx);
805 }
806
807 /// Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
808 bool isPODType(const ASTContext &Context) const;
809
810 /// Return true if this is a POD type according to the rules of the C++98
811 /// standard, regardless of the current compilation's language.
812 bool isCXX98PODType(const ASTContext &Context) const;
813
814 /// Return true if this is a POD type according to the more relaxed rules
815 /// of the C++11 standard, regardless of the current compilation's language.
816 /// (C++0x [basic.types]p9). Note that, unlike
817 /// CXXRecordDecl::isCXX11StandardLayout, this takes DRs into account.
818 bool isCXX11PODType(const ASTContext &Context) const;
819
820 /// Return true if this is a trivial type per (C++0x [basic.types]p9)
821 bool isTrivialType(const ASTContext &Context) const;
822
823 /// Return true if this is a trivially copyable type (C++0x [basic.types]p9)
824 bool isTriviallyCopyableType(const ASTContext &Context) const;
825
826
827 /// Returns true if it is a class and it might be dynamic.
828 bool mayBeDynamicClass() const;
829
830 /// Returns true if it is not a class or if the class might not be dynamic.
831 bool mayBeNotDynamicClass() const;
832
833 // Don't promise in the API that anything besides 'const' can be
834 // easily added.
835
836 /// Add the `const` type qualifier to this QualType.
837 void addConst() {
838 addFastQualifiers(Qualifiers::Const);
839 }
840 QualType withConst() const {
841 return withFastQualifiers(Qualifiers::Const);
842 }
843
844 /// Add the `volatile` type qualifier to this QualType.
845 void addVolatile() {
846 addFastQualifiers(Qualifiers::Volatile);
847 }
848 QualType withVolatile() const {
849 return withFastQualifiers(Qualifiers::Volatile);
850 }
851
852 /// Add the `restrict` qualifier to this QualType.
853 void addRestrict() {
854 addFastQualifiers(Qualifiers::Restrict);
855 }
856 QualType withRestrict() const {
857 return withFastQualifiers(Qualifiers::Restrict);
858 }
859
860 QualType withCVRQualifiers(unsigned CVR) const {
861 return withFastQualifiers(CVR);
862 }
863
864 void addFastQualifiers(unsigned TQs) {
865 assert(!(TQs & ~Qualifiers::FastMask)((void)0)
866 && "non-fast qualifier bits set in mask!")((void)0);
867 Value.setInt(Value.getInt() | TQs);
868 }
869
870 void removeLocalConst();
871 void removeLocalVolatile();
872 void removeLocalRestrict();
873 void removeLocalCVRQualifiers(unsigned Mask);
874
875 void removeLocalFastQualifiers() { Value.setInt(0); }
876 void removeLocalFastQualifiers(unsigned Mask) {
877 assert(!(Mask & ~Qualifiers::FastMask) && "mask has non-fast qualifiers")((void)0);
878 Value.setInt(Value.getInt() & ~Mask);
879 }
880
881 // Creates a type with the given qualifiers in addition to any
882 // qualifiers already on this type.
883 QualType withFastQualifiers(unsigned TQs) const {
884 QualType T = *this;
885 T.addFastQualifiers(TQs);
886 return T;
887 }
888
889 // Creates a type with exactly the given fast qualifiers, removing
890 // any existing fast qualifiers.
891 QualType withExactLocalFastQualifiers(unsigned TQs) const {
892 return withoutLocalFastQualifiers().withFastQualifiers(TQs);
893 }
894
895 // Removes fast qualifiers, but leaves any extended qualifiers in place.
896 QualType withoutLocalFastQualifiers() const {
897 QualType T = *this;
898 T.removeLocalFastQualifiers();
899 return T;
900 }
901
902 QualType getCanonicalType() const;
903
904 /// Return this type with all of the instance-specific qualifiers
905 /// removed, but without removing any qualifiers that may have been applied
906 /// through typedefs.
907 QualType getLocalUnqualifiedType() const { return QualType(getTypePtr(), 0); }
908
909 /// Retrieve the unqualified variant of the given type,
910 /// removing as little sugar as possible.
911 ///
912 /// This routine looks through various kinds of sugar to find the
913 /// least-desugared type that is unqualified. For example, given:
914 ///
915 /// \code
916 /// typedef int Integer;
917 /// typedef const Integer CInteger;
918 /// typedef CInteger DifferenceType;
919 /// \endcode
920 ///
921 /// Executing \c getUnqualifiedType() on the type \c DifferenceType will
922 /// desugar until we hit the type \c Integer, which has no qualifiers on it.
923 ///
924 /// The resulting type might still be qualified if it's sugar for an array
925 /// type. To strip qualifiers even from within a sugared array type, use
926 /// ASTContext::getUnqualifiedArrayType.
927 inline QualType getUnqualifiedType() const;
928
929 /// Retrieve the unqualified variant of the given type, removing as little
930 /// sugar as possible.
931 ///
932 /// Like getUnqualifiedType(), but also returns the set of
933 /// qualifiers that were built up.
934 ///
935 /// The resulting type might still be qualified if it's sugar for an array
936 /// type. To strip qualifiers even from within a sugared array type, use
937 /// ASTContext::getUnqualifiedArrayType.
938 inline SplitQualType getSplitUnqualifiedType() const;
939
940 /// Determine whether this type is more qualified than the other
941 /// given type, requiring exact equality for non-CVR qualifiers.
942 bool isMoreQualifiedThan(QualType Other) const;
943
944 /// Determine whether this type is at least as qualified as the other
945 /// given type, requiring exact equality for non-CVR qualifiers.
946 bool isAtLeastAsQualifiedAs(QualType Other) const;
947
948 QualType getNonReferenceType() const;
949
950 /// Determine the type of a (typically non-lvalue) expression with the
951 /// specified result type.
952 ///
953 /// This routine should be used for expressions for which the return type is
954 /// explicitly specified (e.g., in a cast or call) and isn't necessarily
955 /// an lvalue. It removes a top-level reference (since there are no
956 /// expressions of reference type) and deletes top-level cvr-qualifiers
957 /// from non-class types (in C++) or all types (in C).
958 QualType getNonLValueExprType(const ASTContext &Context) const;
959
960 /// Remove an outer pack expansion type (if any) from this type. Used as part
961 /// of converting the type of a declaration to the type of an expression that
962 /// references that expression. It's meaningless for an expression to have a
963 /// pack expansion type.
964 QualType getNonPackExpansionType() const;
965
966 /// Return the specified type with any "sugar" removed from
967 /// the type. This takes off typedefs, typeof's etc. If the outer level of
968 /// the type is already concrete, it returns it unmodified. This is similar
969 /// to getting the canonical type, but it doesn't remove *all* typedefs. For
970 /// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
971 /// concrete.
972 ///
973 /// Qualifiers are left in place.
974 QualType getDesugaredType(const ASTContext &Context) const {
975 return getDesugaredType(*this, Context);
976 }
977
978 SplitQualType getSplitDesugaredType() const {
979 return getSplitDesugaredType(*this);
980 }
981
982 /// Return the specified type with one level of "sugar" removed from
983 /// the type.
984 ///
985 /// This routine takes off the first typedef, typeof, etc. If the outer level
986 /// of the type is already concrete, it returns it unmodified.
987 QualType getSingleStepDesugaredType(const ASTContext &Context) const {
988 return getSingleStepDesugaredTypeImpl(*this, Context);
989 }
990
991 /// Returns the specified type after dropping any
992 /// outer-level parentheses.
993 QualType IgnoreParens() const {
994 if (isa<ParenType>(*this))
995 return QualType::IgnoreParens(*this);
996 return *this;
997 }
998
999 /// Indicate whether the specified types and qualifiers are identical.
1000 friend bool operator==(const QualType &LHS, const QualType &RHS) {
1001 return LHS.Value == RHS.Value;
1002 }
1003 friend bool operator!=(const QualType &LHS, const QualType &RHS) {
1004 return LHS.Value != RHS.Value;
1005 }
1006 friend bool operator<(const QualType &LHS, const QualType &RHS) {
1007 return LHS.Value < RHS.Value;
1008 }
1009
1010 static std::string getAsString(SplitQualType split,
1011 const PrintingPolicy &Policy) {
1012 return getAsString(split.Ty, split.Quals, Policy);
1013 }
1014 static std::string getAsString(const Type *ty, Qualifiers qs,
1015 const PrintingPolicy &Policy);
1016
1017 std::string getAsString() const;
1018 std::string getAsString(const PrintingPolicy &Policy) const;
1019
1020 void print(raw_ostream &OS, const PrintingPolicy &Policy,
1021 const Twine &PlaceHolder = Twine(),
1022 unsigned Indentation = 0) const;
1023
1024 static void print(SplitQualType split, raw_ostream &OS,
1025 const PrintingPolicy &policy, const Twine &PlaceHolder,
1026 unsigned Indentation = 0) {
1027 return print(split.Ty, split.Quals, OS, policy, PlaceHolder, Indentation);
1028 }
1029
1030 static void print(const Type *ty, Qualifiers qs,
1031 raw_ostream &OS, const PrintingPolicy &policy,
1032 const Twine &PlaceHolder,
1033 unsigned Indentation = 0);
1034
1035 void getAsStringInternal(std::string &Str,
1036 const PrintingPolicy &Policy) const;
1037
1038 static void getAsStringInternal(SplitQualType split, std::string &out,
1039 const PrintingPolicy &policy) {
1040 return getAsStringInternal(split.Ty, split.Quals, out, policy);
1041 }
1042
1043 static void getAsStringInternal(const Type *ty, Qualifiers qs,
1044 std::string &out,
1045 const PrintingPolicy &policy);
1046
1047 class StreamedQualTypeHelper {
1048 const QualType &T;
1049 const PrintingPolicy &Policy;
1050 const Twine &PlaceHolder;
1051 unsigned Indentation;
1052
1053 public:
1054 StreamedQualTypeHelper(const QualType &T, const PrintingPolicy &Policy,
1055 const Twine &PlaceHolder, unsigned Indentation)
1056 : T(T), Policy(Policy), PlaceHolder(PlaceHolder),
1057 Indentation(Indentation) {}
1058
1059 friend raw_ostream &operator<<(raw_ostream &OS,
1060 const StreamedQualTypeHelper &SQT) {
1061 SQT.T.print(OS, SQT.Policy, SQT.PlaceHolder, SQT.Indentation);
1062 return OS;
1063 }
1064 };
1065
1066 StreamedQualTypeHelper stream(const PrintingPolicy &Policy,
1067 const Twine &PlaceHolder = Twine(),
1068 unsigned Indentation = 0) const {
1069 return StreamedQualTypeHelper(*this, Policy, PlaceHolder, Indentation);
1070 }
1071
1072 void dump(const char *s) const;
1073 void dump() const;
1074 void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
1075
1076 void Profile(llvm::FoldingSetNodeID &ID) const {
1077 ID.AddPointer(getAsOpaquePtr());
1078 }
1079
1080 /// Check if this type has any address space qualifier.
1081 inline bool hasAddressSpace() const;
1082
1083 /// Return the address space of this type.
1084 inline LangAS getAddressSpace() const;
1085
1086 /// Returns true if address space qualifiers overlap with T address space
1087 /// qualifiers.
1088 /// OpenCL C defines conversion rules for pointers to different address spaces
1089 /// and notion of overlapping address spaces.
1090 /// CL1.1 or CL1.2:
1091 /// address spaces overlap iff they are they same.
1092 /// OpenCL C v2.0 s6.5.5 adds:
1093 /// __generic overlaps with any address space except for __constant.
1094 bool isAddressSpaceOverlapping(QualType T) const {
1095 Qualifiers Q = getQualifiers();
1096 Qualifiers TQ = T.getQualifiers();
1097 // Address spaces overlap if at least one of them is a superset of another
1098 return Q.isAddressSpaceSupersetOf(TQ) || TQ.isAddressSpaceSupersetOf(Q);
1099 }
1100
1101 /// Returns gc attribute of this type.
1102 inline Qualifiers::GC getObjCGCAttr() const;
1103
1104 /// true when Type is objc's weak.
1105 bool isObjCGCWeak() const {
1106 return getObjCGCAttr() == Qualifiers::Weak;
1107 }
1108
1109 /// true when Type is objc's strong.
1110 bool isObjCGCStrong() const {
1111 return getObjCGCAttr() == Qualifiers::Strong;
1112 }
1113
1114 /// Returns lifetime attribute of this type.
1115 Qualifiers::ObjCLifetime getObjCLifetime() const {
1116 return getQualifiers().getObjCLifetime();
1117 }
1118
1119 bool hasNonTrivialObjCLifetime() const {
1120 return getQualifiers().hasNonTrivialObjCLifetime();
1121 }
1122
1123 bool hasStrongOrWeakObjCLifetime() const {
1124 return getQualifiers().hasStrongOrWeakObjCLifetime();
1125 }
1126
1127 // true when Type is objc's weak and weak is enabled but ARC isn't.
1128 bool isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const;
1129
1130 enum PrimitiveDefaultInitializeKind {
1131 /// The type does not fall into any of the following categories. Note that
1132 /// this case is zero-valued so that values of this enum can be used as a
1133 /// boolean condition for non-triviality.
1134 PDIK_Trivial,
1135
1136 /// The type is an Objective-C retainable pointer type that is qualified
1137 /// with the ARC __strong qualifier.
1138 PDIK_ARCStrong,
1139
1140 /// The type is an Objective-C retainable pointer type that is qualified
1141 /// with the ARC __weak qualifier.
1142 PDIK_ARCWeak,
1143
1144 /// The type is a struct containing a field whose type is not PCK_Trivial.
1145 PDIK_Struct
1146 };
1147
1148 /// Functions to query basic properties of non-trivial C struct types.
1149
1150 /// Check if this is a non-trivial type that would cause a C struct
1151 /// transitively containing this type to be non-trivial to default initialize
1152 /// and return the kind.
1153 PrimitiveDefaultInitializeKind
1154 isNonTrivialToPrimitiveDefaultInitialize() const;
1155
1156 enum PrimitiveCopyKind {
1157 /// The type does not fall into any of the following categories. Note that
1158 /// this case is zero-valued so that values of this enum can be used as a
1159 /// boolean condition for non-triviality.
1160 PCK_Trivial,
1161
1162 /// The type would be trivial except that it is volatile-qualified. Types
1163 /// that fall into one of the other non-trivial cases may additionally be
1164 /// volatile-qualified.
1165 PCK_VolatileTrivial,
1166
1167 /// The type is an Objective-C retainable pointer type that is qualified
1168 /// with the ARC __strong qualifier.
1169 PCK_ARCStrong,
1170
1171 /// The type is an Objective-C retainable pointer type that is qualified
1172 /// with the ARC __weak qualifier.
1173 PCK_ARCWeak,
1174
1175 /// The type is a struct containing a field whose type is neither
1176 /// PCK_Trivial nor PCK_VolatileTrivial.
1177 /// Note that a C++ struct type does not necessarily match this; C++ copying
1178 /// semantics are too complex to express here, in part because they depend
1179 /// on the exact constructor or assignment operator that is chosen by
1180 /// overload resolution to do the copy.
1181 PCK_Struct
1182 };
1183
1184 /// Check if this is a non-trivial type that would cause a C struct
1185 /// transitively containing this type to be non-trivial to copy and return the
1186 /// kind.
1187 PrimitiveCopyKind isNonTrivialToPrimitiveCopy() const;
1188
1189 /// Check if this is a non-trivial type that would cause a C struct
1190 /// transitively containing this type to be non-trivial to destructively
1191 /// move and return the kind. Destructive move in this context is a C++-style
1192 /// move in which the source object is placed in a valid but unspecified state
1193 /// after it is moved, as opposed to a truly destructive move in which the
1194 /// source object is placed in an uninitialized state.
1195 PrimitiveCopyKind isNonTrivialToPrimitiveDestructiveMove() const;
1196
1197 enum DestructionKind {
1198 DK_none,
1199 DK_cxx_destructor,
1200 DK_objc_strong_lifetime,
1201 DK_objc_weak_lifetime,
1202 DK_nontrivial_c_struct
1203 };
1204
1205 /// Returns a nonzero value if objects of this type require
1206 /// non-trivial work to clean up after. Non-zero because it's
1207 /// conceivable that qualifiers (objc_gc(weak)?) could make
1208 /// something require destruction.
1209 DestructionKind isDestructedType() const {
1210 return isDestructedTypeImpl(*this);
1211 }
1212
1213 /// Check if this is or contains a C union that is non-trivial to
1214 /// default-initialize, which is a union that has a member that is non-trivial
1215 /// to default-initialize. If this returns true,
1216 /// isNonTrivialToPrimitiveDefaultInitialize returns PDIK_Struct.
1217 bool hasNonTrivialToPrimitiveDefaultInitializeCUnion() const;
1218
1219 /// Check if this is or contains a C union that is non-trivial to destruct,
1220 /// which is a union that has a member that is non-trivial to destruct. If
1221 /// this returns true, isDestructedType returns DK_nontrivial_c_struct.
1222 bool hasNonTrivialToPrimitiveDestructCUnion() const;
1223
1224 /// Check if this is or contains a C union that is non-trivial to copy, which
1225 /// is a union that has a member that is non-trivial to copy. If this returns
1226 /// true, isNonTrivialToPrimitiveCopy returns PCK_Struct.
1227 bool hasNonTrivialToPrimitiveCopyCUnion() const;
1228
1229 /// Determine whether expressions of the given type are forbidden
1230 /// from being lvalues in C.
1231 ///
1232 /// The expression types that are forbidden to be lvalues are:
1233 /// - 'void', but not qualified void
1234 /// - function types
1235 ///
1236 /// The exact rule here is C99 6.3.2.1:
1237 /// An lvalue is an expression with an object type or an incomplete
1238 /// type other than void.
1239 bool isCForbiddenLValueType() const;
1240
1241 /// Substitute type arguments for the Objective-C type parameters used in the
1242 /// subject type.
1243 ///
1244 /// \param ctx ASTContext in which the type exists.
1245 ///
1246 /// \param typeArgs The type arguments that will be substituted for the
1247 /// Objective-C type parameters in the subject type, which are generally
1248 /// computed via \c Type::getObjCSubstitutions. If empty, the type
1249 /// parameters will be replaced with their bounds or id/Class, as appropriate
1250 /// for the context.
1251 ///
1252 /// \param context The context in which the subject type was written.
1253 ///
1254 /// \returns the resulting type.
1255 QualType substObjCTypeArgs(ASTContext &ctx,
1256 ArrayRef<QualType> typeArgs,
1257 ObjCSubstitutionContext context) const;
1258
1259 /// Substitute type arguments from an object type for the Objective-C type
1260 /// parameters used in the subject type.
1261 ///
1262 /// This operation combines the computation of type arguments for
1263 /// substitution (\c Type::getObjCSubstitutions) with the actual process of
1264 /// substitution (\c QualType::substObjCTypeArgs) for the convenience of
1265 /// callers that need to perform a single substitution in isolation.
1266 ///
1267 /// \param objectType The type of the object whose member type we're
1268 /// substituting into. For example, this might be the receiver of a message
1269 /// or the base of a property access.
1270 ///
1271 /// \param dc The declaration context from which the subject type was
1272 /// retrieved, which indicates (for example) which type parameters should
1273 /// be substituted.
1274 ///
1275 /// \param context The context in which the subject type was written.
1276 ///
1277 /// \returns the subject type after replacing all of the Objective-C type
1278 /// parameters with their corresponding arguments.
1279 QualType substObjCMemberType(QualType objectType,
1280 const DeclContext *dc,
1281 ObjCSubstitutionContext context) const;
1282
1283 /// Strip Objective-C "__kindof" types from the given type.
1284 QualType stripObjCKindOfType(const ASTContext &ctx) const;
1285
1286 /// Remove all qualifiers including _Atomic.
1287 QualType getAtomicUnqualifiedType() const;
1288
1289private:
1290 // These methods are implemented in a separate translation unit;
1291 // "static"-ize them to avoid creating temporary QualTypes in the
1292 // caller.
1293 static bool isConstant(QualType T, const ASTContext& Ctx);
1294 static QualType getDesugaredType(QualType T, const ASTContext &Context);
1295 static SplitQualType getSplitDesugaredType(QualType T);
1296 static SplitQualType getSplitUnqualifiedTypeImpl(QualType type);
1297 static QualType getSingleStepDesugaredTypeImpl(QualType type,
1298 const ASTContext &C);
1299 static QualType IgnoreParens(QualType T);
1300 static DestructionKind isDestructedTypeImpl(QualType type);
1301
1302 /// Check if \param RD is or contains a non-trivial C union.
1303 static bool hasNonTrivialToPrimitiveDefaultInitializeCUnion(const RecordDecl *RD);
1304 static bool hasNonTrivialToPrimitiveDestructCUnion(const RecordDecl *RD);
1305 static bool hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD);
1306};
1307
1308} // namespace clang
1309
1310namespace llvm {
1311
1312/// Implement simplify_type for QualType, so that we can dyn_cast from QualType
1313/// to a specific Type class.
1314template<> struct simplify_type< ::clang::QualType> {
1315 using SimpleType = const ::clang::Type *;
1316
1317 static SimpleType getSimplifiedValue(::clang::QualType Val) {
1318 return Val.getTypePtr();
1319 }
1320};
1321
1322// Teach SmallPtrSet that QualType is "basically a pointer".
1323template<>
1324struct PointerLikeTypeTraits<clang::QualType> {
1325 static inline void *getAsVoidPointer(clang::QualType P) {
1326 return P.getAsOpaquePtr();
1327 }
1328
1329 static inline clang::QualType getFromVoidPointer(void *P) {
1330 return clang::QualType::getFromOpaquePtr(P);
1331 }
1332
1333 // Various qualifiers go in low bits.
1334 static constexpr int NumLowBitsAvailable = 0;
1335};
1336
1337} // namespace llvm
1338
1339namespace clang {
1340
1341/// Base class that is common to both the \c ExtQuals and \c Type
1342/// classes, which allows \c QualType to access the common fields between the
1343/// two.
1344class ExtQualsTypeCommonBase {
1345 friend class ExtQuals;
1346 friend class QualType;
1347 friend class Type;
1348
1349 /// The "base" type of an extended qualifiers type (\c ExtQuals) or
1350 /// a self-referential pointer (for \c Type).
1351 ///
1352 /// This pointer allows an efficient mapping from a QualType to its
1353 /// underlying type pointer.
1354 const Type *const BaseType;
1355
1356 /// The canonical type of this type. A QualType.
1357 QualType CanonicalType;
1358
1359 ExtQualsTypeCommonBase(const Type *baseType, QualType canon)
1360 : BaseType(baseType), CanonicalType(canon) {}
1361};
1362
1363/// We can encode up to four bits in the low bits of a
1364/// type pointer, but there are many more type qualifiers that we want
1365/// to be able to apply to an arbitrary type. Therefore we have this
1366/// struct, intended to be heap-allocated and used by QualType to
1367/// store qualifiers.
1368///
1369/// The current design tags the 'const', 'restrict', and 'volatile' qualifiers
1370/// in three low bits on the QualType pointer; a fourth bit records whether
1371/// the pointer is an ExtQuals node. The extended qualifiers (address spaces,
1372/// Objective-C GC attributes) are much more rare.
1373class ExtQuals : public ExtQualsTypeCommonBase, public llvm::FoldingSetNode {
1374 // NOTE: changing the fast qualifiers should be straightforward as
1375 // long as you don't make 'const' non-fast.
1376 // 1. Qualifiers:
1377 // a) Modify the bitmasks (Qualifiers::TQ and DeclSpec::TQ).
1378 // Fast qualifiers must occupy the low-order bits.
1379 // b) Update Qualifiers::FastWidth and FastMask.
1380 // 2. QualType:
1381 // a) Update is{Volatile,Restrict}Qualified(), defined inline.
1382 // b) Update remove{Volatile,Restrict}, defined near the end of
1383 // this header.
1384 // 3. ASTContext:
1385 // a) Update get{Volatile,Restrict}Type.
1386
1387 /// The immutable set of qualifiers applied by this node. Always contains
1388 /// extended qualifiers.
1389 Qualifiers Quals;
1390
1391 ExtQuals *this_() { return this; }
1392
1393public:
1394 ExtQuals(const Type *baseType, QualType canon, Qualifiers quals)
1395 : ExtQualsTypeCommonBase(baseType,
1396 canon.isNull() ? QualType(this_(), 0) : canon),
1397 Quals(quals) {
1398 assert(Quals.hasNonFastQualifiers()((void)0)
1399 && "ExtQuals created with no fast qualifiers")((void)0);
1400 assert(!Quals.hasFastQualifiers()((void)0)
1401 && "ExtQuals created with fast qualifiers")((void)0);
1402 }
1403
1404 Qualifiers getQualifiers() const { return Quals; }
1405
1406 bool hasObjCGCAttr() const { return Quals.hasObjCGCAttr(); }
1407 Qualifiers::GC getObjCGCAttr() const { return Quals.getObjCGCAttr(); }
1408
1409 bool hasObjCLifetime() const { return Quals.hasObjCLifetime(); }
1410 Qualifiers::ObjCLifetime getObjCLifetime() const {
1411 return Quals.getObjCLifetime();
1412 }
1413
1414 bool hasAddressSpace() const { return Quals.hasAddressSpace(); }
1415 LangAS getAddressSpace() const { return Quals.getAddressSpace(); }
1416
1417 const Type *getBaseType() const { return BaseType; }
1418
1419public:
1420 void Profile(llvm::FoldingSetNodeID &ID) const {
1421 Profile(ID, getBaseType(), Quals);
1422 }
1423
1424 static void Profile(llvm::FoldingSetNodeID &ID,
1425 const Type *BaseType,
1426 Qualifiers Quals) {
1427 assert(!Quals.hasFastQualifiers() && "fast qualifiers in ExtQuals hash!")((void)0);
1428 ID.AddPointer(BaseType);
1429 Quals.Profile(ID);
1430 }
1431};
1432
1433/// The kind of C++11 ref-qualifier associated with a function type.
1434/// This determines whether a member function's "this" object can be an
1435/// lvalue, rvalue, or neither.
1436enum RefQualifierKind {
1437 /// No ref-qualifier was provided.
1438 RQ_None = 0,
1439
1440 /// An lvalue ref-qualifier was provided (\c &).
1441 RQ_LValue,
1442
1443 /// An rvalue ref-qualifier was provided (\c &&).
1444 RQ_RValue
1445};
1446
1447/// Which keyword(s) were used to create an AutoType.
1448enum class AutoTypeKeyword {
1449 /// auto
1450 Auto,
1451
1452 /// decltype(auto)
1453 DecltypeAuto,
1454
1455 /// __auto_type (GNU extension)
1456 GNUAutoType
1457};
1458
1459/// The base class of the type hierarchy.
1460///
1461/// A central concept with types is that each type always has a canonical
1462/// type. A canonical type is the type with any typedef names stripped out
1463/// of it or the types it references. For example, consider:
1464///
1465/// typedef int foo;
1466/// typedef foo* bar;
1467/// 'int *' 'foo *' 'bar'
1468///
1469/// There will be a Type object created for 'int'. Since int is canonical, its
1470/// CanonicalType pointer points to itself. There is also a Type for 'foo' (a
1471/// TypedefType). Its CanonicalType pointer points to the 'int' Type. Next
1472/// there is a PointerType that represents 'int*', which, like 'int', is
1473/// canonical. Finally, there is a PointerType type for 'foo*' whose canonical
1474/// type is 'int*', and there is a TypedefType for 'bar', whose canonical type
1475/// is also 'int*'.
1476///
1477/// Non-canonical types are useful for emitting diagnostics, without losing
1478/// information about typedefs being used. Canonical types are useful for type
1479/// comparisons (they allow by-pointer equality tests) and useful for reasoning
1480/// about whether something has a particular form (e.g. is a function type),
1481/// because they implicitly, recursively, strip all typedefs out of a type.
1482///
1483/// Types, once created, are immutable.
1484///
1485class alignas(8) Type : public ExtQualsTypeCommonBase {
1486public:
1487 enum TypeClass {
1488#define TYPE(Class, Base) Class,
1489#define LAST_TYPE(Class) TypeLast = Class
1490#define ABSTRACT_TYPE(Class, Base)
1491#include "clang/AST/TypeNodes.inc"
1492 };
1493
1494private:
1495 /// Bitfields required by the Type class.
1496 class TypeBitfields {
1497 friend class Type;
1498 template <class T> friend class TypePropertyCache;
1499
1500 /// TypeClass bitfield - Enum that specifies what subclass this belongs to.
1501 unsigned TC : 8;
1502
1503 /// Store information on the type dependency.
1504 unsigned Dependence : llvm::BitWidth<TypeDependence>;
1505
1506 /// True if the cache (i.e. the bitfields here starting with
1507 /// 'Cache') is valid.
1508 mutable unsigned CacheValid : 1;
1509
1510 /// Linkage of this type.
1511 mutable unsigned CachedLinkage : 3;
1512
1513 /// Whether this type involves and local or unnamed types.
1514 mutable unsigned CachedLocalOrUnnamed : 1;
1515
1516 /// Whether this type comes from an AST file.
1517 mutable unsigned FromAST : 1;
1518
1519 bool isCacheValid() const {
1520 return CacheValid;
1521 }
1522
1523 Linkage getLinkage() const {
1524 assert(isCacheValid() && "getting linkage from invalid cache")((void)0);
1525 return static_cast<Linkage>(CachedLinkage);
1526 }
1527
1528 bool hasLocalOrUnnamedType() const {
1529 assert(isCacheValid() && "getting linkage from invalid cache")((void)0);
1530 return CachedLocalOrUnnamed;
1531 }
1532 };
1533 enum { NumTypeBits = 8 + llvm::BitWidth<TypeDependence> + 6 };
1534
1535protected:
1536 // These classes allow subclasses to somewhat cleanly pack bitfields
1537 // into Type.
1538
1539 class ArrayTypeBitfields {
1540 friend class ArrayType;
1541
1542 unsigned : NumTypeBits;
1543
1544 /// CVR qualifiers from declarations like
1545 /// 'int X[static restrict 4]'. For function parameters only.
1546 unsigned IndexTypeQuals : 3;
1547
1548 /// Storage class qualifiers from declarations like
1549 /// 'int X[static restrict 4]'. For function parameters only.
1550 /// Actually an ArrayType::ArraySizeModifier.
1551 unsigned SizeModifier : 3;
1552 };
1553
1554 class ConstantArrayTypeBitfields {
1555 friend class ConstantArrayType;
1556
1557 unsigned : NumTypeBits + 3 + 3;
1558
1559 /// Whether we have a stored size expression.
1560 unsigned HasStoredSizeExpr : 1;
1561 };
1562
1563 class BuiltinTypeBitfields {
1564 friend class BuiltinType;
1565
1566 unsigned : NumTypeBits;
1567
1568 /// The kind (BuiltinType::Kind) of builtin type this is.
1569 unsigned Kind : 8;
1570 };
1571
1572 /// FunctionTypeBitfields store various bits belonging to FunctionProtoType.
1573 /// Only common bits are stored here. Additional uncommon bits are stored
1574 /// in a trailing object after FunctionProtoType.
1575 class FunctionTypeBitfields {
1576 friend class FunctionProtoType;
1577 friend class FunctionType;
1578
1579 unsigned : NumTypeBits;
1580
1581 /// Extra information which affects how the function is called, like
1582 /// regparm and the calling convention.
1583 unsigned ExtInfo : 13;
1584
1585 /// The ref-qualifier associated with a \c FunctionProtoType.
1586 ///
1587 /// This is a value of type \c RefQualifierKind.
1588 unsigned RefQualifier : 2;
1589
1590 /// Used only by FunctionProtoType, put here to pack with the
1591 /// other bitfields.
1592 /// The qualifiers are part of FunctionProtoType because...
1593 ///
1594 /// C++ 8.3.5p4: The return type, the parameter type list and the
1595 /// cv-qualifier-seq, [...], are part of the function type.
1596 unsigned FastTypeQuals : Qualifiers::FastWidth;
1597 /// Whether this function has extended Qualifiers.
1598 unsigned HasExtQuals : 1;
1599
1600 /// The number of parameters this function has, not counting '...'.
1601 /// According to [implimits] 8 bits should be enough here but this is
1602 /// somewhat easy to exceed with metaprogramming and so we would like to
1603 /// keep NumParams as wide as reasonably possible.
1604 unsigned NumParams : 16;
1605
1606 /// The type of exception specification this function has.
1607 unsigned ExceptionSpecType : 4;
1608
1609 /// Whether this function has extended parameter information.
1610 unsigned HasExtParameterInfos : 1;
1611
1612 /// Whether the function is variadic.
1613 unsigned Variadic : 1;
1614
1615 /// Whether this function has a trailing return type.
1616 unsigned HasTrailingReturn : 1;
1617 };
1618
1619 class ObjCObjectTypeBitfields {
1620 friend class ObjCObjectType;
1621
1622 unsigned : NumTypeBits;
1623
1624 /// The number of type arguments stored directly on this object type.
1625 unsigned NumTypeArgs : 7;
1626
1627 /// The number of protocols stored directly on this object type.
1628 unsigned NumProtocols : 6;
1629
1630 /// Whether this is a "kindof" type.
1631 unsigned IsKindOf : 1;
1632 };
1633
1634 class ReferenceTypeBitfields {
1635 friend class ReferenceType;
1636
1637 unsigned : NumTypeBits;
1638
1639 /// True if the type was originally spelled with an lvalue sigil.
1640 /// This is never true of rvalue references but can also be false
1641 /// on lvalue references because of C++0x [dcl.typedef]p9,
1642 /// as follows:
1643 ///
1644 /// typedef int &ref; // lvalue, spelled lvalue
1645 /// typedef int &&rvref; // rvalue
1646 /// ref &a; // lvalue, inner ref, spelled lvalue
1647 /// ref &&a; // lvalue, inner ref
1648 /// rvref &a; // lvalue, inner ref, spelled lvalue
1649 /// rvref &&a; // rvalue, inner ref
1650 unsigned SpelledAsLValue : 1;
1651
1652 /// True if the inner type is a reference type. This only happens
1653 /// in non-canonical forms.
1654 unsigned InnerRef : 1;
1655 };
1656
1657 class TypeWithKeywordBitfields {
1658 friend class TypeWithKeyword;
1659
1660 unsigned : NumTypeBits;
1661
1662 /// An ElaboratedTypeKeyword. 8 bits for efficient access.
1663 unsigned Keyword : 8;
1664 };
1665
1666 enum { NumTypeWithKeywordBits = 8 };
1667
1668 class ElaboratedTypeBitfields {
1669 friend class ElaboratedType;
1670
1671 unsigned : NumTypeBits;
1672 unsigned : NumTypeWithKeywordBits;
1673
1674 /// Whether the ElaboratedType has a trailing OwnedTagDecl.
1675 unsigned HasOwnedTagDecl : 1;
1676 };
1677
1678 class VectorTypeBitfields {
1679 friend class VectorType;
1680 friend class DependentVectorType;
1681
1682 unsigned : NumTypeBits;
1683
1684 /// The kind of vector, either a generic vector type or some
1685 /// target-specific vector type such as for AltiVec or Neon.
1686 unsigned VecKind : 3;
1687 /// The number of elements in the vector.
1688 uint32_t NumElements;
1689 };
1690
1691 class AttributedTypeBitfields {
1692 friend class AttributedType;
1693
1694 unsigned : NumTypeBits;
1695
1696 /// An AttributedType::Kind
1697 unsigned AttrKind : 32 - NumTypeBits;
1698 };
1699
1700 class AutoTypeBitfields {
1701 friend class AutoType;
1702
1703 unsigned : NumTypeBits;
1704
1705 /// Was this placeholder type spelled as 'auto', 'decltype(auto)',
1706 /// or '__auto_type'? AutoTypeKeyword value.
1707 unsigned Keyword : 2;
1708
1709 /// The number of template arguments in the type-constraints, which is
1710 /// expected to be able to hold at least 1024 according to [implimits].
1711 /// However as this limit is somewhat easy to hit with template
1712 /// metaprogramming we'd prefer to keep it as large as possible.
1713 /// At the moment it has been left as a non-bitfield since this type
1714 /// safely fits in 64 bits as an unsigned, so there is no reason to
1715 /// introduce the performance impact of a bitfield.
1716 unsigned NumArgs;
1717 };
1718
1719 class SubstTemplateTypeParmPackTypeBitfields {
1720 friend class SubstTemplateTypeParmPackType;
1721
1722 unsigned : NumTypeBits;
1723
1724 /// The number of template arguments in \c Arguments, which is
1725 /// expected to be able to hold at least 1024 according to [implimits].
1726 /// However as this limit is somewhat easy to hit with template
1727 /// metaprogramming we'd prefer to keep it as large as possible.
1728 /// At the moment it has been left as a non-bitfield since this type
1729 /// safely fits in 64 bits as an unsigned, so there is no reason to
1730 /// introduce the performance impact of a bitfield.
1731 unsigned NumArgs;
1732 };
1733
1734 class TemplateSpecializationTypeBitfields {
1735 friend class TemplateSpecializationType;
1736
1737 unsigned : NumTypeBits;
1738
1739 /// Whether this template specialization type is a substituted type alias.
1740 unsigned TypeAlias : 1;
1741
1742 /// The number of template arguments named in this class template
1743 /// specialization, which is expected to be able to hold at least 1024
1744 /// according to [implimits]. However, as this limit is somewhat easy to
1745 /// hit with template metaprogramming we'd prefer to keep it as large
1746 /// as possible. At the moment it has been left as a non-bitfield since
1747 /// this type safely fits in 64 bits as an unsigned, so there is no reason
1748 /// to introduce the performance impact of a bitfield.
1749 unsigned NumArgs;
1750 };
1751
1752 class DependentTemplateSpecializationTypeBitfields {
1753 friend class DependentTemplateSpecializationType;
1754
1755 unsigned : NumTypeBits;
1756 unsigned : NumTypeWithKeywordBits;
1757
1758 /// The number of template arguments named in this class template
1759 /// specialization, which is expected to be able to hold at least 1024
1760 /// according to [implimits]. However, as this limit is somewhat easy to
1761 /// hit with template metaprogramming we'd prefer to keep it as large
1762 /// as possible. At the moment it has been left as a non-bitfield since
1763 /// this type safely fits in 64 bits as an unsigned, so there is no reason
1764 /// to introduce the performance impact of a bitfield.
1765 unsigned NumArgs;
1766 };
1767
1768 class PackExpansionTypeBitfields {
1769 friend class PackExpansionType;
1770
1771 unsigned : NumTypeBits;
1772
1773 /// The number of expansions that this pack expansion will
1774 /// generate when substituted (+1), which is expected to be able to
1775 /// hold at least 1024 according to [implimits]. However, as this limit
1776 /// is somewhat easy to hit with template metaprogramming we'd prefer to
1777 /// keep it as large as possible. At the moment it has been left as a
1778 /// non-bitfield since this type safely fits in 64 bits as an unsigned, so
1779 /// there is no reason to introduce the performance impact of a bitfield.
1780 ///
1781 /// This field will only have a non-zero value when some of the parameter
1782 /// packs that occur within the pattern have been substituted but others
1783 /// have not.
1784 unsigned NumExpansions;
1785 };
1786
1787 union {
1788 TypeBitfields TypeBits;
1789 ArrayTypeBitfields ArrayTypeBits;
1790 ConstantArrayTypeBitfields ConstantArrayTypeBits;
1791 AttributedTypeBitfields AttributedTypeBits;
1792 AutoTypeBitfields AutoTypeBits;
1793 BuiltinTypeBitfields BuiltinTypeBits;
1794 FunctionTypeBitfields FunctionTypeBits;
1795 ObjCObjectTypeBitfields ObjCObjectTypeBits;
1796 ReferenceTypeBitfields ReferenceTypeBits;
1797 TypeWithKeywordBitfields TypeWithKeywordBits;
1798 ElaboratedTypeBitfields ElaboratedTypeBits;
1799 VectorTypeBitfields VectorTypeBits;
1800 SubstTemplateTypeParmPackTypeBitfields SubstTemplateTypeParmPackTypeBits;
1801 TemplateSpecializationTypeBitfields TemplateSpecializationTypeBits;
1802 DependentTemplateSpecializationTypeBitfields
1803 DependentTemplateSpecializationTypeBits;
1804 PackExpansionTypeBitfields PackExpansionTypeBits;
1805 };
1806
1807private:
1808 template <class T> friend class TypePropertyCache;
1809
1810 /// Set whether this type comes from an AST file.
1811 void setFromAST(bool V = true) const {
1812 TypeBits.FromAST = V;
1813 }
1814
1815protected:
1816 friend class ASTContext;
1817
1818 Type(TypeClass tc, QualType canon, TypeDependence Dependence)
1819 : ExtQualsTypeCommonBase(this,
1820 canon.isNull() ? QualType(this_(), 0) : canon) {
1821 static_assert(sizeof(*this) <= 8 + sizeof(ExtQualsTypeCommonBase),
1822 "changing bitfields changed sizeof(Type)!");
1823 static_assert(alignof(decltype(*this)) % sizeof(void *) == 0,
1824 "Insufficient alignment!");
1825 TypeBits.TC = tc;
1826 TypeBits.Dependence = static_cast<unsigned>(Dependence);
1827 TypeBits.CacheValid = false;
1828 TypeBits.CachedLocalOrUnnamed = false;
1829 TypeBits.CachedLinkage = NoLinkage;
1830 TypeBits.FromAST = false;
1831 }
1832
1833 // silence VC++ warning C4355: 'this' : used in base member initializer list
1834 Type *this_() { return this; }
1835
1836 void setDependence(TypeDependence D) {
1837 TypeBits.Dependence = static_cast<unsigned>(D);
1838 }
1839
1840 void addDependence(TypeDependence D) { setDependence(getDependence() | D); }
1841
1842public:
1843 friend class ASTReader;
1844 friend class ASTWriter;
1845 template <class T> friend class serialization::AbstractTypeReader;
1846 template <class T> friend class serialization::AbstractTypeWriter;
1847
1848 Type(const Type &) = delete;
1849 Type(Type &&) = delete;
1850 Type &operator=(const Type &) = delete;
1851 Type &operator=(Type &&) = delete;
1852
1853 TypeClass getTypeClass() const { return static_cast<TypeClass>(TypeBits.TC); }
1854
1855 /// Whether this type comes from an AST file.
1856 bool isFromAST() const { return TypeBits.FromAST; }
1857
1858 /// Whether this type is or contains an unexpanded parameter
1859 /// pack, used to support C++0x variadic templates.
1860 ///
1861 /// A type that contains a parameter pack shall be expanded by the
1862 /// ellipsis operator at some point. For example, the typedef in the
1863 /// following example contains an unexpanded parameter pack 'T':
1864 ///
1865 /// \code
1866 /// template<typename ...T>
1867 /// struct X {
1868 /// typedef T* pointer_types; // ill-formed; T is a parameter pack.
1869 /// };
1870 /// \endcode
1871 ///
1872 /// Note that this routine does not specify which
1873 bool containsUnexpandedParameterPack() const {
1874 return getDependence() & TypeDependence::UnexpandedPack;
1875 }
1876
1877 /// Determines if this type would be canonical if it had no further
1878 /// qualification.
1879 bool isCanonicalUnqualified() const {
1880 return CanonicalType == QualType(this, 0);
1881 }
1882
1883 /// Pull a single level of sugar off of this locally-unqualified type.
1884 /// Users should generally prefer SplitQualType::getSingleStepDesugaredType()
1885 /// or QualType::getSingleStepDesugaredType(const ASTContext&).
1886 QualType getLocallyUnqualifiedSingleStepDesugaredType() const;
1887
1888 /// As an extension, we classify types as one of "sized" or "sizeless";
1889 /// every type is one or the other. Standard types are all sized;
1890 /// sizeless types are purely an extension.
1891 ///
1892 /// Sizeless types contain data with no specified size, alignment,
1893 /// or layout.
1894 bool isSizelessType() const;
1895 bool isSizelessBuiltinType() const;
1896
1897 /// Determines if this is a sizeless type supported by the
1898 /// 'arm_sve_vector_bits' type attribute, which can be applied to a single
1899 /// SVE vector or predicate, excluding tuple types such as svint32x4_t.
1900 bool isVLSTBuiltinType() const;
1901
1902 /// Returns the representative type for the element of an SVE builtin type.
1903 /// This is used to represent fixed-length SVE vectors created with the
1904 /// 'arm_sve_vector_bits' type attribute as VectorType.
1905 QualType getSveEltType(const ASTContext &Ctx) const;
1906
1907 /// Types are partitioned into 3 broad categories (C99 6.2.5p1):
1908 /// object types, function types, and incomplete types.
1909
1910 /// Return true if this is an incomplete type.
1911 /// A type that can describe objects, but which lacks information needed to
1912 /// determine its size (e.g. void, or a fwd declared struct). Clients of this
1913 /// routine will need to determine if the size is actually required.
1914 ///
1915 /// Def If non-null, and the type refers to some kind of declaration
1916 /// that can be completed (such as a C struct, C++ class, or Objective-C
1917 /// class), will be set to the declaration.
1918 bool isIncompleteType(NamedDecl **Def = nullptr) const;
1919
1920 /// Return true if this is an incomplete or object
1921 /// type, in other words, not a function type.
1922 bool isIncompleteOrObjectType() const {
1923 return !isFunctionType();
1924 }
1925
1926 /// Determine whether this type is an object type.
1927 bool isObjectType() const {
1928 // C++ [basic.types]p8:
1929 // An object type is a (possibly cv-qualified) type that is not a
1930 // function type, not a reference type, and not a void type.
1931 return !isReferenceType() && !isFunctionType() && !isVoidType();
1932 }
1933
1934 /// Return true if this is a literal type
1935 /// (C++11 [basic.types]p10)
1936 bool isLiteralType(const ASTContext &Ctx) const;
1937
1938 /// Determine if this type is a structural type, per C++20 [temp.param]p7.
1939 bool isStructuralType() const;
1940
1941 /// Test if this type is a standard-layout type.
1942 /// (C++0x [basic.type]p9)
1943 bool isStandardLayoutType() const;
1944
1945 /// Helper methods to distinguish type categories. All type predicates
1946 /// operate on the canonical type, ignoring typedefs and qualifiers.
1947
1948 /// Returns true if the type is a builtin type.
1949 bool isBuiltinType() const;
1950
1951 /// Test for a particular builtin type.
1952 bool isSpecificBuiltinType(unsigned K) const;
1953
1954 /// Test for a type which does not represent an actual type-system type but
1955 /// is instead used as a placeholder for various convenient purposes within
1956 /// Clang. All such types are BuiltinTypes.
1957 bool isPlaceholderType() const;
1958 const BuiltinType *getAsPlaceholderType() const;
1959
1960 /// Test for a specific placeholder type.
1961 bool isSpecificPlaceholderType(unsigned K) const;
1962
1963 /// Test for a placeholder type other than Overload; see
1964 /// BuiltinType::isNonOverloadPlaceholderType.
1965 bool isNonOverloadPlaceholderType() const;
1966
1967 /// isIntegerType() does *not* include complex integers (a GCC extension).
1968 /// isComplexIntegerType() can be used to test for complex integers.
1969 bool isIntegerType() const; // C99 6.2.5p17 (int, char, bool, enum)
1970 bool isEnumeralType() const;
1971
1972 /// Determine whether this type is a scoped enumeration type.
1973 bool isScopedEnumeralType() const;
1974 bool isBooleanType() const;
1975 bool isCharType() const;
1976 bool isWideCharType() const;
1977 bool isChar8Type() const;
1978 bool isChar16Type() const;
1979 bool isChar32Type() const;
1980 bool isAnyCharacterType() const;
1981 bool isIntegralType(const ASTContext &Ctx) const;
1982
1983 /// Determine whether this type is an integral or enumeration type.
1984 bool isIntegralOrEnumerationType() const;
1985
1986 /// Determine whether this type is an integral or unscoped enumeration type.
1987 bool isIntegralOrUnscopedEnumerationType() const;
1988 bool isUnscopedEnumerationType() const;
1989
1990 /// Floating point categories.
1991 bool isRealFloatingType() const; // C99 6.2.5p10 (float, double, long double)
1992 /// isComplexType() does *not* include complex integers (a GCC extension).
1993 /// isComplexIntegerType() can be used to test for complex integers.
1994 bool isComplexType() const; // C99 6.2.5p11 (complex)
1995 bool isAnyComplexType() const; // C99 6.2.5p11 (complex) + Complex Int.
1996 bool isFloatingType() const; // C99 6.2.5p11 (real floating + complex)
1997 bool isHalfType() const; // OpenCL 6.1.1.1, NEON (IEEE 754-2008 half)
1998 bool isFloat16Type() const; // C11 extension ISO/IEC TS 18661
1999 bool isBFloat16Type() const;
2000 bool isFloat128Type() const;
2001 bool isRealType() const; // C99 6.2.5p17 (real floating + integer)
2002 bool isArithmeticType() const; // C99 6.2.5p18 (integer + floating)
2003 bool isVoidType() const; // C99 6.2.5p19
2004 bool isScalarType() const; // C99 6.2.5p21 (arithmetic + pointers)
2005 bool isAggregateType() const;
2006 bool isFundamentalType() const;
2007 bool isCompoundType() const;
2008
2009 // Type Predicates: Check to see if this type is structurally the specified
2010 // type, ignoring typedefs and qualifiers.
2011 bool isFunctionType() const;
2012 bool isFunctionNoProtoType() const { return getAs<FunctionNoProtoType>(); }
2013 bool isFunctionProtoType() const { return getAs<FunctionProtoType>(); }
2014 bool isPointerType() const;
2015 bool isAnyPointerType() const; // Any C pointer or ObjC object pointer
2016 bool isBlockPointerType() const;
2017 bool isVoidPointerType() const;
2018 bool isReferenceType() const;
2019 bool isLValueReferenceType() const;
2020 bool isRValueReferenceType() const;
2021 bool isObjectPointerType() const;
2022 bool isFunctionPointerType() const;
2023 bool isFunctionReferenceType() const;
2024 bool isMemberPointerType() const;
2025 bool isMemberFunctionPointerType() const;
2026 bool isMemberDataPointerType() const;
2027 bool isArrayType() const;
2028 bool isConstantArrayType() const;
2029 bool isIncompleteArrayType() const;
2030 bool isVariableArrayType() const;
2031 bool isDependentSizedArrayType() const;
2032 bool isRecordType() const;
2033 bool isClassType() const;
2034 bool isStructureType() const;
2035 bool isObjCBoxableRecordType() const;
2036 bool isInterfaceType() const;
2037 bool isStructureOrClassType() const;
2038 bool isUnionType() const;
2039 bool isComplexIntegerType() const; // GCC _Complex integer type.
2040 bool isVectorType() const; // GCC vector type.
2041 bool isExtVectorType() const; // Extended vector type.
2042 bool isMatrixType() const; // Matrix type.
2043 bool isConstantMatrixType() const; // Constant matrix type.
2044 bool isDependentAddressSpaceType() const; // value-dependent address space qualifier
2045 bool isObjCObjectPointerType() const; // pointer to ObjC object
2046 bool isObjCRetainableType() const; // ObjC object or block pointer
2047 bool isObjCLifetimeType() const; // (array of)* retainable type
2048 bool isObjCIndirectLifetimeType() const; // (pointer to)* lifetime type
2049 bool isObjCNSObjectType() const; // __attribute__((NSObject))
2050 bool isObjCIndependentClassType() const; // __attribute__((objc_independent_class))
2051 // FIXME: change this to 'raw' interface type, so we can used 'interface' type
2052 // for the common case.
2053 bool isObjCObjectType() const; // NSString or typeof(*(id)0)
2054 bool isObjCQualifiedInterfaceType() const; // NSString<foo>
2055 bool isObjCQualifiedIdType() const; // id<foo>
2056 bool isObjCQualifiedClassType() const; // Class<foo>
2057 bool isObjCObjectOrInterfaceType() const;
2058 bool isObjCIdType() const; // id
2059 bool isDecltypeType() const;
2060 /// Was this type written with the special inert-in-ARC __unsafe_unretained
2061 /// qualifier?
2062 ///
2063 /// This approximates the answer to the following question: if this
2064 /// translation unit were compiled in ARC, would this type be qualified
2065 /// with __unsafe_unretained?
2066 bool isObjCInertUnsafeUnretainedType() const {
2067 return hasAttr(attr::ObjCInertUnsafeUnretained);
2068 }
2069
2070 /// Whether the type is Objective-C 'id' or a __kindof type of an
2071 /// object type, e.g., __kindof NSView * or __kindof id
2072 /// <NSCopying>.
2073 ///
2074 /// \param bound Will be set to the bound on non-id subtype types,
2075 /// which will be (possibly specialized) Objective-C class type, or
2076 /// null for 'id.
2077 bool isObjCIdOrObjectKindOfType(const ASTContext &ctx,
2078 const ObjCObjectType *&bound) const;
2079
2080 bool isObjCClassType() const; // Class
2081
2082 /// Whether the type is Objective-C 'Class' or a __kindof type of an
2083 /// Class type, e.g., __kindof Class <NSCopying>.
2084 ///
2085 /// Unlike \c isObjCIdOrObjectKindOfType, there is no relevant bound
2086 /// here because Objective-C's type system cannot express "a class
2087 /// object for a subclass of NSFoo".
2088 bool isObjCClassOrClassKindOfType() const;
2089
2090 bool isBlockCompatibleObjCPointerType(ASTContext &ctx) const;
2091 bool isObjCSelType() const; // Class
2092 bool isObjCBuiltinType() const; // 'id' or 'Class'
2093 bool isObjCARCBridgableType() const;
2094 bool isCARCBridgableType() const;
2095 bool isTemplateTypeParmType() const; // C++ template type parameter
2096 bool isNullPtrType() const; // C++11 std::nullptr_t
2097 bool isNothrowT() const; // C++ std::nothrow_t
2098 bool isAlignValT() const; // C++17 std::align_val_t
2099 bool isStdByteType() const; // C++17 std::byte
2100 bool isAtomicType() const; // C11 _Atomic()
2101 bool isUndeducedAutoType() const; // C++11 auto or
2102 // C++14 decltype(auto)
2103 bool isTypedefNameType() const; // typedef or alias template
2104
2105#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2106 bool is##Id##Type() const;
2107#include "clang/Basic/OpenCLImageTypes.def"
2108
2109 bool isImageType() const; // Any OpenCL image type
2110
2111 bool isSamplerT() const; // OpenCL sampler_t
2112 bool isEventT() const; // OpenCL event_t
2113 bool isClkEventT() const; // OpenCL clk_event_t
2114 bool isQueueT() const; // OpenCL queue_t
2115 bool isReserveIDT() const; // OpenCL reserve_id_t
2116
2117#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2118 bool is##Id##Type() const;
2119#include "clang/Basic/OpenCLExtensionTypes.def"
2120 // Type defined in cl_intel_device_side_avc_motion_estimation OpenCL extension
2121 bool isOCLIntelSubgroupAVCType() const;
2122 bool isOCLExtOpaqueType() const; // Any OpenCL extension type
2123
2124 bool isPipeType() const; // OpenCL pipe type
2125 bool isExtIntType() const; // Extended Int Type
2126 bool isOpenCLSpecificType() const; // Any OpenCL specific type
2127
2128 /// Determines if this type, which must satisfy
2129 /// isObjCLifetimeType(), is implicitly __unsafe_unretained rather
2130 /// than implicitly __strong.
2131 bool isObjCARCImplicitlyUnretainedType() const;
2132
2133 /// Check if the type is the CUDA device builtin surface type.
2134 bool isCUDADeviceBuiltinSurfaceType() const;
2135 /// Check if the type is the CUDA device builtin texture type.
2136 bool isCUDADeviceBuiltinTextureType() const;
2137
2138 /// Return the implicit lifetime for this type, which must not be dependent.
2139 Qualifiers::ObjCLifetime getObjCARCImplicitLifetime() const;
2140
2141 enum ScalarTypeKind {
2142 STK_CPointer,
2143 STK_BlockPointer,
2144 STK_ObjCObjectPointer,
2145 STK_MemberPointer,
2146 STK_Bool,
2147 STK_Integral,
2148 STK_Floating,
2149 STK_IntegralComplex,
2150 STK_FloatingComplex,
2151 STK_FixedPoint
2152 };
2153
2154 /// Given that this is a scalar type, classify it.
2155 ScalarTypeKind getScalarTypeKind() const;
2156
2157 TypeDependence getDependence() const {
2158 return static_cast<TypeDependence>(TypeBits.Dependence);
2159 }
2160
2161 /// Whether this type is an error type.
2162 bool containsErrors() const {
2163 return getDependence() & TypeDependence::Error;
2164 }
2165
2166 /// Whether this type is a dependent type, meaning that its definition
2167 /// somehow depends on a template parameter (C++ [temp.dep.type]).
2168 bool isDependentType() const {
2169 return getDependence() & TypeDependence::Dependent;
2170 }
2171
2172 /// Determine whether this type is an instantiation-dependent type,
2173 /// meaning that the type involves a template parameter (even if the
2174 /// definition does not actually depend on the type substituted for that
2175 /// template parameter).
2176 bool isInstantiationDependentType() const {
2177 return getDependence() & TypeDependence::Instantiation;
2178 }
2179
2180 /// Determine whether this type is an undeduced type, meaning that
2181 /// it somehow involves a C++11 'auto' type or similar which has not yet been
2182 /// deduced.
2183 bool isUndeducedType() const;
2184
2185 /// Whether this type is a variably-modified type (C99 6.7.5).
2186 bool isVariablyModifiedType() const {
2187 return getDependence() & TypeDependence::VariablyModified;
2188 }
2189
2190 /// Whether this type involves a variable-length array type
2191 /// with a definite size.
2192 bool hasSizedVLAType() const;
2193
2194 /// Whether this type is or contains a local or unnamed type.
2195 bool hasUnnamedOrLocalType() const;
2196
2197 bool isOverloadableType() const;
2198
2199 /// Determine wither this type is a C++ elaborated-type-specifier.
2200 bool isElaboratedTypeSpecifier() const;
2201
2202 bool canDecayToPointerType() const;
2203
2204 /// Whether this type is represented natively as a pointer. This includes
2205 /// pointers, references, block pointers, and Objective-C interface,
2206 /// qualified id, and qualified interface types, as well as nullptr_t.
2207 bool hasPointerRepresentation() const;
2208
2209 /// Whether this type can represent an objective pointer type for the
2210 /// purpose of GC'ability
2211 bool hasObjCPointerRepresentation() const;
2212
2213 /// Determine whether this type has an integer representation
2214 /// of some sort, e.g., it is an integer type or a vector.
2215 bool hasIntegerRepresentation() const;
2216
2217 /// Determine whether this type has an signed integer representation
2218 /// of some sort, e.g., it is an signed integer type or a vector.
2219 bool hasSignedIntegerRepresentation() const;
2220
2221 /// Determine whether this type has an unsigned integer representation
2222 /// of some sort, e.g., it is an unsigned integer type or a vector.
2223 bool hasUnsignedIntegerRepresentation() const;
2224
2225 /// Determine whether this type has a floating-point representation
2226 /// of some sort, e.g., it is a floating-point type or a vector thereof.
2227 bool hasFloatingRepresentation() const;
2228
2229 // Type Checking Functions: Check to see if this type is structurally the
2230 // specified type, ignoring typedefs and qualifiers, and return a pointer to
2231 // the best type we can.
2232 const RecordType *getAsStructureType() const;
2233 /// NOTE: getAs*ArrayType are methods on ASTContext.
2234 const RecordType *getAsUnionType() const;
2235 const ComplexType *getAsComplexIntegerType() const; // GCC complex int type.
2236 const ObjCObjectType *getAsObjCInterfaceType() const;
2237
2238 // The following is a convenience method that returns an ObjCObjectPointerType
2239 // for object declared using an interface.
2240 const ObjCObjectPointerType *getAsObjCInterfacePointerType() const;
2241 const ObjCObjectPointerType *getAsObjCQualifiedIdType() const;
2242 const ObjCObjectPointerType *getAsObjCQualifiedClassType() const;
2243 const ObjCObjectType *getAsObjCQualifiedInterfaceType() const;
2244
2245 /// Retrieves the CXXRecordDecl that this type refers to, either
2246 /// because the type is a RecordType or because it is the injected-class-name
2247 /// type of a class template or class template partial specialization.
2248 CXXRecordDecl *getAsCXXRecordDecl() const;
2249
2250 /// Retrieves the RecordDecl this type refers to.
2251 RecordDecl *getAsRecordDecl() const;
2252
2253 /// Retrieves the TagDecl that this type refers to, either
2254 /// because the type is a TagType or because it is the injected-class-name
2255 /// type of a class template or class template partial specialization.
2256 TagDecl *getAsTagDecl() const;
2257
2258 /// If this is a pointer or reference to a RecordType, return the
2259 /// CXXRecordDecl that the type refers to.
2260 ///
2261 /// If this is not a pointer or reference, or the type being pointed to does
2262 /// not refer to a CXXRecordDecl, returns NULL.
2263 const CXXRecordDecl *getPointeeCXXRecordDecl() const;
2264
2265 /// Get the DeducedType whose type will be deduced for a variable with
2266 /// an initializer of this type. This looks through declarators like pointer
2267 /// types, but not through decltype or typedefs.
2268 DeducedType *getContainedDeducedType() const;
2269
2270 /// Get the AutoType whose type will be deduced for a variable with
2271 /// an initializer of this type. This looks through declarators like pointer
2272 /// types, but not through decltype or typedefs.
2273 AutoType *getContainedAutoType() const {
2274 return dyn_cast_or_null<AutoType>(getContainedDeducedType());
2275 }
2276
2277 /// Determine whether this type was written with a leading 'auto'
2278 /// corresponding to a trailing return type (possibly for a nested
2279 /// function type within a pointer to function type or similar).
2280 bool hasAutoForTrailingReturnType() const;
2281
2282 /// Member-template getAs<specific type>'. Look through sugar for
2283 /// an instance of \<specific type>. This scheme will eventually
2284 /// replace the specific getAsXXXX methods above.
2285 ///
2286 /// There are some specializations of this member template listed
2287 /// immediately following this class.
2288 template <typename T> const T *getAs() const;
2289
2290 /// Member-template getAsAdjusted<specific type>. Look through specific kinds
2291 /// of sugar (parens, attributes, etc) for an instance of \<specific type>.
2292 /// This is used when you need to walk over sugar nodes that represent some
2293 /// kind of type adjustment from a type that was written as a \<specific type>
2294 /// to another type that is still canonically a \<specific type>.
2295 template <typename T> const T *getAsAdjusted() const;
2296
2297 /// A variant of getAs<> for array types which silently discards
2298 /// qualifiers from the outermost type.
2299 const ArrayType *getAsArrayTypeUnsafe() const;
2300
2301 /// Member-template castAs<specific type>. Look through sugar for
2302 /// the underlying instance of \<specific type>.
2303 ///
2304 /// This method has the same relationship to getAs<T> as cast<T> has
2305 /// to dyn_cast<T>; which is to say, the underlying type *must*
2306 /// have the intended type, and this method will never return null.
2307 template <typename T> const T *castAs() const;
2308
2309 /// A variant of castAs<> for array type which silently discards
2310 /// qualifiers from the outermost type.
2311 const ArrayType *castAsArrayTypeUnsafe() const;
2312
2313 /// Determine whether this type had the specified attribute applied to it
2314 /// (looking through top-level type sugar).
2315 bool hasAttr(attr::Kind AK) const;
2316
2317 /// Get the base element type of this type, potentially discarding type
2318 /// qualifiers. This should never be used when type qualifiers
2319 /// are meaningful.
2320 const Type *getBaseElementTypeUnsafe() const;
2321
2322 /// If this is an array type, return the element type of the array,
2323 /// potentially with type qualifiers missing.
2324 /// This should never be used when type qualifiers are meaningful.
2325 const Type *getArrayElementTypeNoTypeQual() const;
2326
2327 /// If this is a pointer type, return the pointee type.
2328 /// If this is an array type, return the array element type.
2329 /// This should never be used when type qualifiers are meaningful.
2330 const Type *getPointeeOrArrayElementType() const;
2331
2332 /// If this is a pointer, ObjC object pointer, or block
2333 /// pointer, this returns the respective pointee.
2334 QualType getPointeeType() const;
2335
2336 /// Return the specified type with any "sugar" removed from the type,
2337 /// removing any typedefs, typeofs, etc., as well as any qualifiers.
2338 const Type *getUnqualifiedDesugaredType() const;
2339
2340 /// More type predicates useful for type checking/promotion
2341 bool isPromotableIntegerType() const; // C99 6.3.1.1p2
2342
2343 /// Return true if this is an integer type that is
2344 /// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
2345 /// or an enum decl which has a signed representation.
2346 bool isSignedIntegerType() const;
2347
2348 /// Return true if this is an integer type that is
2349 /// unsigned, according to C99 6.2.5p6 [which returns true for _Bool],
2350 /// or an enum decl which has an unsigned representation.
2351 bool isUnsignedIntegerType() const;
2352
2353 /// Determines whether this is an integer type that is signed or an
2354 /// enumeration types whose underlying type is a signed integer type.
2355 bool isSignedIntegerOrEnumerationType() const;
2356
2357 /// Determines whether this is an integer type that is unsigned or an
2358 /// enumeration types whose underlying type is a unsigned integer type.
2359 bool isUnsignedIntegerOrEnumerationType() const;
2360
2361 /// Return true if this is a fixed point type according to
2362 /// ISO/IEC JTC1 SC22 WG14 N1169.
2363 bool isFixedPointType() const;
2364
2365 /// Return true if this is a fixed point or integer type.
2366 bool isFixedPointOrIntegerType() const;
2367
2368 /// Return true if this is a saturated fixed point type according to
2369 /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned.
2370 bool isSaturatedFixedPointType() const;
2371
2372 /// Return true if this is a saturated fixed point type according to
2373 /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned.
2374 bool isUnsaturatedFixedPointType() const;
2375
2376 /// Return true if this is a fixed point type that is signed according
2377 /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated.
2378 bool isSignedFixedPointType() const;
2379
2380 /// Return true if this is a fixed point type that is unsigned according
2381 /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated.
2382 bool isUnsignedFixedPointType() const;
2383
2384 /// Return true if this is not a variable sized type,
2385 /// according to the rules of C99 6.7.5p3. It is not legal to call this on
2386 /// incomplete types.
2387 bool isConstantSizeType() const;
2388
2389 /// Returns true if this type can be represented by some
2390 /// set of type specifiers.
2391 bool isSpecifierType() const;
2392
2393 /// Determine the linkage of this type.
2394 Linkage getLinkage() const;
2395
2396 /// Determine the visibility of this type.
2397 Visibility getVisibility() const {
2398 return getLinkageAndVisibility().getVisibility();
2399 }
2400
2401 /// Return true if the visibility was explicitly set is the code.
2402 bool isVisibilityExplicit() const {
2403 return getLinkageAndVisibility().isVisibilityExplicit();
2404 }
2405
2406 /// Determine the linkage and visibility of this type.
2407 LinkageInfo getLinkageAndVisibility() const;
2408
2409 /// True if the computed linkage is valid. Used for consistency
2410 /// checking. Should always return true.
2411 bool isLinkageValid() const;
2412
2413 /// Determine the nullability of the given type.
2414 ///
2415 /// Note that nullability is only captured as sugar within the type
2416 /// system, not as part of the canonical type, so nullability will
2417 /// be lost by canonicalization and desugaring.
2418 Optional<NullabilityKind> getNullability(const ASTContext &context) const;
2419
2420 /// Determine whether the given type can have a nullability
2421 /// specifier applied to it, i.e., if it is any kind of pointer type.
2422 ///
2423 /// \param ResultIfUnknown The value to return if we don't yet know whether
2424 /// this type can have nullability because it is dependent.
2425 bool canHaveNullability(bool ResultIfUnknown = true) const;
2426
2427 /// Retrieve the set of substitutions required when accessing a member
2428 /// of the Objective-C receiver type that is declared in the given context.
2429 ///
2430 /// \c *this is the type of the object we're operating on, e.g., the
2431 /// receiver for a message send or the base of a property access, and is
2432 /// expected to be of some object or object pointer type.
2433 ///
2434 /// \param dc The declaration context for which we are building up a
2435 /// substitution mapping, which should be an Objective-C class, extension,
2436 /// category, or method within.
2437 ///
2438 /// \returns an array of type arguments that can be substituted for
2439 /// the type parameters of the given declaration context in any type described
2440 /// within that context, or an empty optional to indicate that no
2441 /// substitution is required.
2442 Optional<ArrayRef<QualType>>
2443 getObjCSubstitutions(const DeclContext *dc) const;
2444
2445 /// Determines if this is an ObjC interface type that may accept type
2446 /// parameters.
2447 bool acceptsObjCTypeParams() const;
2448
2449 const char *getTypeClassName() const;
2450
2451 QualType getCanonicalTypeInternal() const {
2452 return CanonicalType;
2453 }
2454
2455 CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h
2456 void dump() const;
2457 void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
2458};
2459
2460/// This will check for a TypedefType by removing any existing sugar
2461/// until it reaches a TypedefType or a non-sugared type.
2462template <> const TypedefType *Type::getAs() const;
2463
2464/// This will check for a TemplateSpecializationType by removing any
2465/// existing sugar until it reaches a TemplateSpecializationType or a
2466/// non-sugared type.
2467template <> const TemplateSpecializationType *Type::getAs() const;
2468
2469/// This will check for an AttributedType by removing any existing sugar
2470/// until it reaches an AttributedType or a non-sugared type.
2471template <> const AttributedType *Type::getAs() const;
2472
2473// We can do canonical leaf types faster, because we don't have to
2474// worry about preserving child type decoration.
2475#define TYPE(Class, Base)
2476#define LEAF_TYPE(Class) \
2477template <> inline const Class##Type *Type::getAs() const { \
2478 return dyn_cast<Class##Type>(CanonicalType); \
2479} \
2480template <> inline const Class##Type *Type::castAs() const { \
2481 return cast<Class##Type>(CanonicalType); \
2482}
2483#include "clang/AST/TypeNodes.inc"
2484
2485/// This class is used for builtin types like 'int'. Builtin
2486/// types are always canonical and have a literal name field.
2487class BuiltinType : public Type {
2488public:
2489 enum Kind {
2490// OpenCL image types
2491#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) Id,
2492#include "clang/Basic/OpenCLImageTypes.def"
2493// OpenCL extension types
2494#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) Id,
2495#include "clang/Basic/OpenCLExtensionTypes.def"
2496// SVE Types
2497#define SVE_TYPE(Name, Id, SingletonId) Id,
2498#include "clang/Basic/AArch64SVEACLETypes.def"
2499// PPC MMA Types
2500#define PPC_VECTOR_TYPE(Name, Id, Size) Id,
2501#include "clang/Basic/PPCTypes.def"
2502// RVV Types
2503#define RVV_TYPE(Name, Id, SingletonId) Id,
2504#include "clang/Basic/RISCVVTypes.def"
2505// All other builtin types
2506#define BUILTIN_TYPE(Id, SingletonId) Id,
2507#define LAST_BUILTIN_TYPE(Id) LastKind = Id
2508#include "clang/AST/BuiltinTypes.def"
2509 };
2510
2511private:
2512 friend class ASTContext; // ASTContext creates these.
2513
2514 BuiltinType(Kind K)
2515 : Type(Builtin, QualType(),
2516 K == Dependent ? TypeDependence::DependentInstantiation
2517 : TypeDependence::None) {
2518 BuiltinTypeBits.Kind = K;
2519 }
2520
2521public:
2522 Kind getKind() const { return static_cast<Kind>(BuiltinTypeBits.Kind); }
2523 StringRef getName(const PrintingPolicy &Policy) const;
2524
2525 const char *getNameAsCString(const PrintingPolicy &Policy) const {
2526 // The StringRef is null-terminated.
2527 StringRef str = getName(Policy);
2528 assert(!str.empty() && str.data()[str.size()] == '\0')((void)0);
2529 return str.data();
2530 }
2531
2532 bool isSugared() const { return false; }
2533 QualType desugar() const { return QualType(this, 0); }
2534
2535 bool isInteger() const {
2536 return getKind() >= Bool && getKind() <= Int128;
2537 }
2538
2539 bool isSignedInteger() const {
2540 return getKind() >= Char_S && getKind() <= Int128;
2541 }
2542
2543 bool isUnsignedInteger() const {
2544 return getKind() >= Bool && getKind() <= UInt128;
2545 }
2546
2547 bool isFloatingPoint() const {
2548 return getKind() >= Half && getKind() <= Float128;
2549 }
2550
2551 /// Determines whether the given kind corresponds to a placeholder type.
2552 static bool isPlaceholderTypeKind(Kind K) {
2553 return K >= Overload;
2554 }
2555
2556 /// Determines whether this type is a placeholder type, i.e. a type
2557 /// which cannot appear in arbitrary positions in a fully-formed
2558 /// expression.
2559 bool isPlaceholderType() const {
2560 return isPlaceholderTypeKind(getKind());
2561 }
2562
2563 /// Determines whether this type is a placeholder type other than
2564 /// Overload. Most placeholder types require only syntactic
2565 /// information about their context in order to be resolved (e.g.
2566 /// whether it is a call expression), which means they can (and
2567 /// should) be resolved in an earlier "phase" of analysis.
2568 /// Overload expressions sometimes pick up further information
2569 /// from their context, like whether the context expects a
2570 /// specific function-pointer type, and so frequently need
2571 /// special treatment.
2572 bool isNonOverloadPlaceholderType() const {
2573 return getKind() > Overload;
2574 }
2575
2576 static bool classof(const Type *T) { return T->getTypeClass() == Builtin; }
2577};
2578
2579/// Complex values, per C99 6.2.5p11. This supports the C99 complex
2580/// types (_Complex float etc) as well as the GCC integer complex extensions.
2581class ComplexType : public Type, public llvm::FoldingSetNode {
2582 friend class ASTContext; // ASTContext creates these.
2583
2584 QualType ElementType;
2585
2586 ComplexType(QualType Element, QualType CanonicalPtr)
2587 : Type(Complex, CanonicalPtr, Element->getDependence()),
2588 ElementType(Element) {}
2589
2590public:
2591 QualType getElementType() const { return ElementType; }
2592
2593 bool isSugared() const { return false; }
2594 QualType desugar() const { return QualType(this, 0); }
2595
2596 void Profile(llvm::FoldingSetNodeID &ID) {
2597 Profile(ID, getElementType());
2598 }
2599
2600 static void Profile(llvm::FoldingSetNodeID &ID, QualType Element) {
2601 ID.AddPointer(Element.getAsOpaquePtr());
2602 }
2603
2604 static bool classof(const Type *T) { return T->getTypeClass() == Complex; }
2605};
2606
2607/// Sugar for parentheses used when specifying types.
2608class ParenType : public Type, public llvm::FoldingSetNode {
2609 friend class ASTContext; // ASTContext creates these.
2610
2611 QualType Inner;
2612
2613 ParenType(QualType InnerType, QualType CanonType)
2614 : Type(Paren, CanonType, InnerType->getDependence()), Inner(InnerType) {}
2615
2616public:
2617 QualType getInnerType() const { return Inner; }
2618
2619 bool isSugared() const { return true; }
2620 QualType desugar() const { return getInnerType(); }
2621
2622 void Profile(llvm::FoldingSetNodeID &ID) {
2623 Profile(ID, getInnerType());
2624 }
2625
2626 static void Profile(llvm::FoldingSetNodeID &ID, QualType Inner) {
2627 Inner.Profile(ID);
2628 }
2629
2630 static bool classof(const Type *T) { return T->getTypeClass() == Paren; }
2631};
2632
2633/// PointerType - C99 6.7.5.1 - Pointer Declarators.
2634class PointerType : public Type, public llvm::FoldingSetNode {
2635 friend class ASTContext; // ASTContext creates these.
2636
2637 QualType PointeeType;
2638
2639 PointerType(QualType Pointee, QualType CanonicalPtr)
2640 : Type(Pointer, CanonicalPtr, Pointee->getDependence()),
2641 PointeeType(Pointee) {}
2642
2643public:
2644 QualType getPointeeType() const { return PointeeType; }
2645
2646 bool isSugared() const { return false; }
2647 QualType desugar() const { return QualType(this, 0); }
2648
2649 void Profile(llvm::FoldingSetNodeID &ID) {
2650 Profile(ID, getPointeeType());
2651 }
2652
2653 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
2654 ID.AddPointer(Pointee.getAsOpaquePtr());
2655 }
2656
2657 static bool classof(const Type *T) { return T->getTypeClass() == Pointer; }
2658};
2659
2660/// Represents a type which was implicitly adjusted by the semantic
2661/// engine for arbitrary reasons. For example, array and function types can
2662/// decay, and function types can have their calling conventions adjusted.
2663class AdjustedType : public Type, public llvm::FoldingSetNode {
2664 QualType OriginalTy;
2665 QualType AdjustedTy;
2666
2667protected:
2668 friend class ASTContext; // ASTContext creates these.
2669
2670 AdjustedType(TypeClass TC, QualType OriginalTy, QualType AdjustedTy,
2671 QualType CanonicalPtr)
2672 : Type(TC, CanonicalPtr, OriginalTy->getDependence()),
2673 OriginalTy(OriginalTy), AdjustedTy(AdjustedTy) {}
2674
2675public:
2676 QualType getOriginalType() const { return OriginalTy; }
2677 QualType getAdjustedType() const { return AdjustedTy; }
2678
2679 bool isSugared() const { return true; }
2680 QualType desugar() const { return AdjustedTy; }
2681
2682 void Profile(llvm::FoldingSetNodeID &ID) {
2683 Profile(ID, OriginalTy, AdjustedTy);
2684 }
2685
2686 static void Profile(llvm::FoldingSetNodeID &ID, QualType Orig, QualType New) {
2687 ID.AddPointer(Orig.getAsOpaquePtr());
2688 ID.AddPointer(New.getAsOpaquePtr());
2689 }
2690
2691 static bool classof(const Type *T) {
2692 return T->getTypeClass() == Adjusted || T->getTypeClass() == Decayed;
2693 }
2694};
2695
2696/// Represents a pointer type decayed from an array or function type.
2697class DecayedType : public AdjustedType {
2698 friend class ASTContext; // ASTContext creates these.
2699
2700 inline
2701 DecayedType(QualType OriginalType, QualType Decayed, QualType Canonical);
2702
2703public:
2704 QualType getDecayedType() const { return getAdjustedType(); }
2705
2706 inline QualType getPointeeType() const;
2707
2708 static bool classof(const Type *T) { return T->getTypeClass() == Decayed; }
2709};
2710
2711/// Pointer to a block type.
2712/// This type is to represent types syntactically represented as
2713/// "void (^)(int)", etc. Pointee is required to always be a function type.
2714class BlockPointerType : public Type, public llvm::FoldingSetNode {
2715 friend class ASTContext; // ASTContext creates these.
2716
2717 // Block is some kind of pointer type
2718 QualType PointeeType;
2719
2720 BlockPointerType(QualType Pointee, QualType CanonicalCls)
2721 : Type(BlockPointer, CanonicalCls, Pointee->getDependence()),
2722 PointeeType(Pointee) {}
2723
2724public:
2725 // Get the pointee type. Pointee is required to always be a function type.
2726 QualType getPointeeType() const { return PointeeType; }
2727
2728 bool isSugared() const { return false; }
2729 QualType desugar() const { return QualType(this, 0); }
2730
2731 void Profile(llvm::FoldingSetNodeID &ID) {
2732 Profile(ID, getPointeeType());
2733 }
2734
2735 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
2736 ID.AddPointer(Pointee.getAsOpaquePtr());
2737 }
2738
2739 static bool classof(const Type *T) {
2740 return T->getTypeClass() == BlockPointer;
2741 }
2742};
2743
2744/// Base for LValueReferenceType and RValueReferenceType
2745class ReferenceType : public Type, public llvm::FoldingSetNode {
2746 QualType PointeeType;
2747
2748protected:
2749 ReferenceType(TypeClass tc, QualType Referencee, QualType CanonicalRef,
2750 bool SpelledAsLValue)
2751 : Type(tc, CanonicalRef, Referencee->getDependence()),
2752 PointeeType(Referencee) {
2753 ReferenceTypeBits.SpelledAsLValue = SpelledAsLValue;
2754 ReferenceTypeBits.InnerRef = Referencee->isReferenceType();
2755 }
2756
2757public:
2758 bool isSpelledAsLValue() const { return ReferenceTypeBits.SpelledAsLValue; }
2759 bool isInnerRef() const { return ReferenceTypeBits.InnerRef; }
2760
2761 QualType getPointeeTypeAsWritten() const { return PointeeType; }
2762
2763 QualType getPointeeType() const {
2764 // FIXME: this might strip inner qualifiers; okay?
2765 const ReferenceType *T = this;
2766 while (T->isInnerRef())
2767 T = T->PointeeType->castAs<ReferenceType>();
2768 return T->PointeeType;
2769 }
2770
2771 void Profile(llvm::FoldingSetNodeID &ID) {
2772 Profile(ID, PointeeType, isSpelledAsLValue());
2773 }
2774
2775 static void Profile(llvm::FoldingSetNodeID &ID,
2776 QualType Referencee,
2777 bool SpelledAsLValue) {
2778 ID.AddPointer(Referencee.getAsOpaquePtr());
2779 ID.AddBoolean(SpelledAsLValue);
2780 }
2781
2782 static bool classof(const Type *T) {
2783 return T->getTypeClass() == LValueReference ||
2784 T->getTypeClass() == RValueReference;
2785 }
2786};
2787
2788/// An lvalue reference type, per C++11 [dcl.ref].
2789class LValueReferenceType : public ReferenceType {
2790 friend class ASTContext; // ASTContext creates these
2791
2792 LValueReferenceType(QualType Referencee, QualType CanonicalRef,
2793 bool SpelledAsLValue)
2794 : ReferenceType(LValueReference, Referencee, CanonicalRef,
2795 SpelledAsLValue) {}
2796
2797public:
2798 bool isSugared() const { return false; }
2799 QualType desugar() const { return QualType(this, 0); }
2800
2801 static bool classof(const Type *T) {
2802 return T->getTypeClass() == LValueReference;
2803 }
2804};
2805
2806/// An rvalue reference type, per C++11 [dcl.ref].
2807class RValueReferenceType : public ReferenceType {
2808 friend class ASTContext; // ASTContext creates these
2809
2810 RValueReferenceType(QualType Referencee, QualType CanonicalRef)
2811 : ReferenceType(RValueReference, Referencee, CanonicalRef, false) {}
2812
2813public:
2814 bool isSugared() const { return false; }
2815 QualType desugar() const { return QualType(this, 0); }
2816
2817 static bool classof(const Type *T) {
2818 return T->getTypeClass() == RValueReference;
2819 }
2820};
2821
2822/// A pointer to member type per C++ 8.3.3 - Pointers to members.
2823///
2824/// This includes both pointers to data members and pointer to member functions.
2825class MemberPointerType : public Type, public llvm::FoldingSetNode {
2826 friend class ASTContext; // ASTContext creates these.
2827
2828 QualType PointeeType;
2829
2830 /// The class of which the pointee is a member. Must ultimately be a
2831 /// RecordType, but could be a typedef or a template parameter too.
2832 const Type *Class;
2833
2834 MemberPointerType(QualType Pointee, const Type *Cls, QualType CanonicalPtr)
2835 : Type(MemberPointer, CanonicalPtr,
2836 (Cls->getDependence() & ~TypeDependence::VariablyModified) |
2837 Pointee->getDependence()),
2838 PointeeType(Pointee), Class(Cls) {}
2839
2840public:
2841 QualType getPointeeType() const { return PointeeType; }
2842
2843 /// Returns true if the member type (i.e. the pointee type) is a
2844 /// function type rather than a data-member type.
2845 bool isMemberFunctionPointer() const {
2846 return PointeeType->isFunctionProtoType();
2847 }
2848
2849 /// Returns true if the member type (i.e. the pointee type) is a
2850 /// data type rather than a function type.
2851 bool isMemberDataPointer() const {
2852 return !PointeeType->isFunctionProtoType();
2853 }
2854
2855 const Type *getClass() const { return Class; }
2856 CXXRecordDecl *getMostRecentCXXRecordDecl() const;
2857
2858 bool isSugared() const { return false; }
2859 QualType desugar() const { return QualType(this, 0); }
2860
2861 void Profile(llvm::FoldingSetNodeID &ID) {
2862 Profile(ID, getPointeeType(), getClass());
2863 }
2864
2865 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
2866 const Type *Class) {
2867 ID.AddPointer(Pointee.getAsOpaquePtr());
2868 ID.AddPointer(Class);
2869 }
2870
2871 static bool classof(const Type *T) {
2872 return T->getTypeClass() == MemberPointer;
2873 }
2874};
2875
2876/// Represents an array type, per C99 6.7.5.2 - Array Declarators.
2877class ArrayType : public Type, public llvm::FoldingSetNode {
2878public:
2879 /// Capture whether this is a normal array (e.g. int X[4])
2880 /// an array with a static size (e.g. int X[static 4]), or an array
2881 /// with a star size (e.g. int X[*]).
2882 /// 'static' is only allowed on function parameters.
2883 enum ArraySizeModifier {
2884 Normal, Static, Star
2885 };
2886
2887private:
2888 /// The element type of the array.
2889 QualType ElementType;
2890
2891protected:
2892 friend class ASTContext; // ASTContext creates these.
2893
2894 ArrayType(TypeClass tc, QualType et, QualType can, ArraySizeModifier sm,
2895 unsigned tq, const Expr *sz = nullptr);
2896
2897public:
2898 QualType getElementType() const { return ElementType; }
2899
2900 ArraySizeModifier getSizeModifier() const {
2901 return ArraySizeModifier(ArrayTypeBits.SizeModifier);
2902 }
2903
2904 Qualifiers getIndexTypeQualifiers() const {
2905 return Qualifiers::fromCVRMask(getIndexTypeCVRQualifiers());
2906 }
2907
2908 unsigned getIndexTypeCVRQualifiers() const {
2909 return ArrayTypeBits.IndexTypeQuals;
2910 }
2911
2912 static bool classof(const Type *T) {
2913 return T->getTypeClass() == ConstantArray ||
2914 T->getTypeClass() == VariableArray ||
2915 T->getTypeClass() == IncompleteArray ||
2916 T->getTypeClass() == DependentSizedArray;
2917 }
2918};
2919
2920/// Represents the canonical version of C arrays with a specified constant size.
2921/// For example, the canonical type for 'int A[4 + 4*100]' is a
2922/// ConstantArrayType where the element type is 'int' and the size is 404.
2923class ConstantArrayType final
2924 : public ArrayType,
2925 private llvm::TrailingObjects<ConstantArrayType, const Expr *> {
2926 friend class ASTContext; // ASTContext creates these.
2927 friend TrailingObjects;
2928
2929 llvm::APInt Size; // Allows us to unique the type.
2930
2931 ConstantArrayType(QualType et, QualType can, const llvm::APInt &size,
2932 const Expr *sz, ArraySizeModifier sm, unsigned tq)
2933 : ArrayType(ConstantArray, et, can, sm, tq, sz), Size(size) {
2934 ConstantArrayTypeBits.HasStoredSizeExpr = sz != nullptr;
2935 if (ConstantArrayTypeBits.HasStoredSizeExpr) {
2936 assert(!can.isNull() && "canonical constant array should not have size")((void)0);
2937 *getTrailingObjects<const Expr*>() = sz;
2938 }
2939 }
2940
2941 unsigned numTrailingObjects(OverloadToken<const Expr*>) const {
2942 return ConstantArrayTypeBits.HasStoredSizeExpr;
2943 }
2944
2945public:
2946 const llvm::APInt &getSize() const { return Size; }
2947 const Expr *getSizeExpr() const {
2948 return ConstantArrayTypeBits.HasStoredSizeExpr
2949 ? *getTrailingObjects<const Expr *>()
2950 : nullptr;
2951 }
2952 bool isSugared() const { return false; }
2953 QualType desugar() const { return QualType(this, 0); }
2954
2955 /// Determine the number of bits required to address a member of
2956 // an array with the given element type and number of elements.
2957 static unsigned getNumAddressingBits(const ASTContext &Context,
2958 QualType ElementType,
2959 const llvm::APInt &NumElements);
2960
2961 /// Determine the maximum number of active bits that an array's size
2962 /// can require, which limits the maximum size of the array.
2963 static unsigned getMaxSizeBits(const ASTContext &Context);
2964
2965 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
2966 Profile(ID, Ctx, getElementType(), getSize(), getSizeExpr(),
2967 getSizeModifier(), getIndexTypeCVRQualifiers());
2968 }
2969
2970 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx,
2971 QualType ET, const llvm::APInt &ArraySize,
2972 const Expr *SizeExpr, ArraySizeModifier SizeMod,
2973 unsigned TypeQuals);
2974
2975 static bool classof(const Type *T) {
2976 return T->getTypeClass() == ConstantArray;
2977 }
2978};
2979
2980/// Represents a C array with an unspecified size. For example 'int A[]' has
2981/// an IncompleteArrayType where the element type is 'int' and the size is
2982/// unspecified.
2983class IncompleteArrayType : public ArrayType {
2984 friend class ASTContext; // ASTContext creates these.
2985
2986 IncompleteArrayType(QualType et, QualType can,
2987 ArraySizeModifier sm, unsigned tq)
2988 : ArrayType(IncompleteArray, et, can, sm, tq) {}
2989
2990public:
2991 friend class StmtIteratorBase;
2992
2993 bool isSugared() const { return false; }
2994 QualType desugar() const { return QualType(this, 0); }
2995
2996 static bool classof(const Type *T) {
2997 return T->getTypeClass() == IncompleteArray;
2998 }
2999
3000 void Profile(llvm::FoldingSetNodeID &ID) {
3001 Profile(ID, getElementType(), getSizeModifier(),
3002 getIndexTypeCVRQualifiers());
3003 }
3004
3005 static void Profile(llvm::FoldingSetNodeID &ID, QualType ET,
3006 ArraySizeModifier SizeMod, unsigned TypeQuals) {
3007 ID.AddPointer(ET.getAsOpaquePtr());
3008 ID.AddInteger(SizeMod);
3009 ID.AddInteger(TypeQuals);
3010 }
3011};
3012
3013/// Represents a C array with a specified size that is not an
3014/// integer-constant-expression. For example, 'int s[x+foo()]'.
3015/// Since the size expression is an arbitrary expression, we store it as such.
3016///
3017/// Note: VariableArrayType's aren't uniqued (since the expressions aren't) and
3018/// should not be: two lexically equivalent variable array types could mean
3019/// different things, for example, these variables do not have the same type
3020/// dynamically:
3021///
3022/// void foo(int x) {
3023/// int Y[x];
3024/// ++x;
3025/// int Z[x];
3026/// }
3027class VariableArrayType : public ArrayType {
3028 friend class ASTContext; // ASTContext creates these.
3029
3030 /// An assignment-expression. VLA's are only permitted within
3031 /// a function block.
3032 Stmt *SizeExpr;
3033
3034 /// The range spanned by the left and right array brackets.
3035 SourceRange Brackets;
3036
3037 VariableArrayType(QualType et, QualType can, Expr *e,
3038 ArraySizeModifier sm, unsigned tq,
3039 SourceRange brackets)
3040 : ArrayType(VariableArray, et, can, sm, tq, e),
3041 SizeExpr((Stmt*) e), Brackets(brackets) {}
3042
3043public:
3044 friend class StmtIteratorBase;
3045
3046 Expr *getSizeExpr() const {
3047 // We use C-style casts instead of cast<> here because we do not wish
3048 // to have a dependency of Type.h on Stmt.h/Expr.h.
3049 return (Expr*) SizeExpr;
3050 }
3051
3052 SourceRange getBracketsRange() const { return Brackets; }
3053 SourceLocation getLBracketLoc() const { return Brackets.getBegin(); }
3054 SourceLocation getRBracketLoc() const { return Brackets.getEnd(); }
3055
3056 bool isSugared() const { return false; }
3057 QualType desugar() const { return QualType(this, 0); }
3058
3059 static bool classof(const Type *T) {
3060 return T->getTypeClass() == VariableArray;
3061 }
3062
3063 void Profile(llvm::FoldingSetNodeID &ID) {
3064 llvm_unreachable("Cannot unique VariableArrayTypes.")__builtin_unreachable();
3065 }
3066};
3067
3068/// Represents an array type in C++ whose size is a value-dependent expression.
3069///
3070/// For example:
3071/// \code
3072/// template<typename T, int Size>
3073/// class array {
3074/// T data[Size];
3075/// };
3076/// \endcode
3077///
3078/// For these types, we won't actually know what the array bound is
3079/// until template instantiation occurs, at which point this will
3080/// become either a ConstantArrayType or a VariableArrayType.
3081class DependentSizedArrayType : public ArrayType {
3082 friend class ASTContext; // ASTContext creates these.
3083
3084 const ASTContext &Context;
3085
3086 /// An assignment expression that will instantiate to the
3087 /// size of the array.
3088 ///
3089 /// The expression itself might be null, in which case the array
3090 /// type will have its size deduced from an initializer.
3091 Stmt *SizeExpr;
3092
3093 /// The range spanned by the left and right array brackets.
3094 SourceRange Brackets;
3095
3096 DependentSizedArrayType(const ASTContext &Context, QualType et, QualType can,
3097 Expr *e, ArraySizeModifier sm, unsigned tq,
3098 SourceRange brackets);
3099
3100public:
3101 friend class StmtIteratorBase;
3102
3103 Expr *getSizeExpr() const {
3104 // We use C-style casts instead of cast<> here because we do not wish
3105 // to have a dependency of Type.h on Stmt.h/Expr.h.
3106 return (Expr*) SizeExpr;
3107 }
3108
3109 SourceRange getBracketsRange() const { return Brackets; }
3110 SourceLocation getLBracketLoc() const { return Brackets.getBegin(); }
3111 SourceLocation getRBracketLoc() const { return Brackets.getEnd(); }
3112
3113 bool isSugared() const { return false; }
3114 QualType desugar() const { return QualType(this, 0); }
3115
3116 static bool classof(const Type *T) {
3117 return T->getTypeClass() == DependentSizedArray;
3118 }
3119
3120 void Profile(llvm::FoldingSetNodeID &ID) {
3121 Profile(ID, Context, getElementType(),
3122 getSizeModifier(), getIndexTypeCVRQualifiers(), getSizeExpr());
3123 }
3124
3125 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3126 QualType ET, ArraySizeModifier SizeMod,
3127 unsigned TypeQuals, Expr *E);
3128};
3129
3130/// Represents an extended address space qualifier where the input address space
3131/// value is dependent. Non-dependent address spaces are not represented with a
3132/// special Type subclass; they are stored on an ExtQuals node as part of a QualType.
3133///
3134/// For example:
3135/// \code
3136/// template<typename T, int AddrSpace>
3137/// class AddressSpace {
3138/// typedef T __attribute__((address_space(AddrSpace))) type;
3139/// }
3140/// \endcode
3141class DependentAddressSpaceType : public Type, public llvm::FoldingSetNode {
3142 friend class ASTContext;
3143
3144 const ASTContext &Context;
3145 Expr *AddrSpaceExpr;
3146 QualType PointeeType;
3147 SourceLocation loc;
3148
3149 DependentAddressSpaceType(const ASTContext &Context, QualType PointeeType,
3150 QualType can, Expr *AddrSpaceExpr,
3151 SourceLocation loc);
3152
3153public:
3154 Expr *getAddrSpaceExpr() const { return AddrSpaceExpr; }
3155 QualType getPointeeType() const { return PointeeType; }
3156 SourceLocation getAttributeLoc() const { return loc; }
3157
3158 bool isSugared() const { return false; }
3159 QualType desugar() const { return QualType(this, 0); }
3160
3161 static bool classof(const Type *T) {
3162 return T->getTypeClass() == DependentAddressSpace;
3163 }
3164
3165 void Profile(llvm::FoldingSetNodeID &ID) {
3166 Profile(ID, Context, getPointeeType(), getAddrSpaceExpr());
3167 }
3168
3169 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3170 QualType PointeeType, Expr *AddrSpaceExpr);
3171};
3172
3173/// Represents an extended vector type where either the type or size is
3174/// dependent.
3175///
3176/// For example:
3177/// \code
3178/// template<typename T, int Size>
3179/// class vector {
3180/// typedef T __attribute__((ext_vector_type(Size))) type;
3181/// }
3182/// \endcode
3183class DependentSizedExtVectorType : public Type, public llvm::FoldingSetNode {
3184 friend class ASTContext;
3185
3186 const ASTContext &Context;
3187 Expr *SizeExpr;
3188
3189 /// The element type of the array.
3190 QualType ElementType;
3191
3192 SourceLocation loc;
3193
3194 DependentSizedExtVectorType(const ASTContext &Context, QualType ElementType,
3195 QualType can, Expr *SizeExpr, SourceLocation loc);
3196
3197public:
3198 Expr *getSizeExpr() const { return SizeExpr; }
3199 QualType getElementType() const { return ElementType; }
3200 SourceLocation getAttributeLoc() const { return loc; }
3201
3202 bool isSugared() const { return false; }
3203 QualType desugar() const { return QualType(this, 0); }
3204
3205 static bool classof(const Type *T) {
3206 return T->getTypeClass() == DependentSizedExtVector;
3207 }
3208
3209 void Profile(llvm::FoldingSetNodeID &ID) {
3210 Profile(ID, Context, getElementType(), getSizeExpr());
3211 }
3212
3213 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3214 QualType ElementType, Expr *SizeExpr);
3215};
3216
3217
3218/// Represents a GCC generic vector type. This type is created using
3219/// __attribute__((vector_size(n)), where "n" specifies the vector size in
3220/// bytes; or from an Altivec __vector or vector declaration.
3221/// Since the constructor takes the number of vector elements, the
3222/// client is responsible for converting the size into the number of elements.
3223class VectorType : public Type, public llvm::FoldingSetNode {
3224public:
3225 enum VectorKind {
3226 /// not a target-specific vector type
3227 GenericVector,
3228
3229 /// is AltiVec vector
3230 AltiVecVector,
3231
3232 /// is AltiVec 'vector Pixel'
3233 AltiVecPixel,
3234
3235 /// is AltiVec 'vector bool ...'
3236 AltiVecBool,
3237
3238 /// is ARM Neon vector
3239 NeonVector,
3240
3241 /// is ARM Neon polynomial vector
3242 NeonPolyVector,
3243
3244 /// is AArch64 SVE fixed-length data vector
3245 SveFixedLengthDataVector,
3246
3247 /// is AArch64 SVE fixed-length predicate vector
3248 SveFixedLengthPredicateVector
3249 };
3250
3251protected:
3252 friend class ASTContext; // ASTContext creates these.
3253
3254 /// The element type of the vector.
3255 QualType ElementType;
3256
3257 VectorType(QualType vecType, unsigned nElements, QualType canonType,
3258 VectorKind vecKind);
3259
3260 VectorType(TypeClass tc, QualType vecType, unsigned nElements,
3261 QualType canonType, VectorKind vecKind);
3262
3263public:
3264 QualType getElementType() const { return ElementType; }
3265 unsigned getNumElements() const { return VectorTypeBits.NumElements; }
3266
3267 bool isSugared() const { return false; }
3268 QualType desugar() const { return QualType(this, 0); }
3269
3270 VectorKind getVectorKind() const {
3271 return VectorKind(VectorTypeBits.VecKind);
3272 }
3273
3274 void Profile(llvm::FoldingSetNodeID &ID) {
3275 Profile(ID, getElementType(), getNumElements(),
3276 getTypeClass(), getVectorKind());
3277 }
3278
3279 static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
3280 unsigned NumElements, TypeClass TypeClass,
3281 VectorKind VecKind) {
3282 ID.AddPointer(ElementType.getAsOpaquePtr());
3283 ID.AddInteger(NumElements);
3284 ID.AddInteger(TypeClass);
3285 ID.AddInteger(VecKind);
3286 }
3287
3288 static bool classof(const Type *T) {
3289 return T->getTypeClass() == Vector || T->getTypeClass() == ExtVector;
3290 }
3291};
3292
3293/// Represents a vector type where either the type or size is dependent.
3294////
3295/// For example:
3296/// \code
3297/// template<typename T, int Size>
3298/// class vector {
3299/// typedef T __attribute__((vector_size(Size))) type;
3300/// }
3301/// \endcode
3302class DependentVectorType : public Type, public llvm::FoldingSetNode {
3303 friend class ASTContext;
3304
3305 const ASTContext &Context;
3306 QualType ElementType;
3307 Expr *SizeExpr;
3308 SourceLocation Loc;
3309
3310 DependentVectorType(const ASTContext &Context, QualType ElementType,
3311 QualType CanonType, Expr *SizeExpr,
3312 SourceLocation Loc, VectorType::VectorKind vecKind);
3313
3314public:
3315 Expr *getSizeExpr() const { return SizeExpr; }
3316 QualType getElementType() const { return ElementType; }
3317 SourceLocation getAttributeLoc() const { return Loc; }
3318 VectorType::VectorKind getVectorKind() const {
3319 return VectorType::VectorKind(VectorTypeBits.VecKind);
3320 }
3321
3322 bool isSugared() const { return false; }
3323 QualType desugar() const { return QualType(this, 0); }
3324
3325 static bool classof(const Type *T) {
3326 return T->getTypeClass() == DependentVector;
3327 }
3328
3329 void Profile(llvm::FoldingSetNodeID &ID) {
3330 Profile(ID, Context, getElementType(), getSizeExpr(), getVectorKind());
3331 }
3332
3333 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3334 QualType ElementType, const Expr *SizeExpr,
3335 VectorType::VectorKind VecKind);
3336};
3337
3338/// ExtVectorType - Extended vector type. This type is created using
3339/// __attribute__((ext_vector_type(n)), where "n" is the number of elements.
3340/// Unlike vector_size, ext_vector_type is only allowed on typedef's. This
3341/// class enables syntactic extensions, like Vector Components for accessing
3342/// points (as .xyzw), colors (as .rgba), and textures (modeled after OpenGL
3343/// Shading Language).
3344class ExtVectorType : public VectorType {
3345 friend class ASTContext; // ASTContext creates these.
3346
3347 ExtVectorType(QualType vecType, unsigned nElements, QualType canonType)
3348 : VectorType(ExtVector, vecType, nElements, canonType, GenericVector) {}
3349
3350public:
3351 static int getPointAccessorIdx(char c) {
3352 switch (c) {
3353 default: return -1;
3354 case 'x': case 'r': return 0;
3355 case 'y': case 'g': return 1;
3356 case 'z': case 'b': return 2;
3357 case 'w': case 'a': return 3;
3358 }
3359 }
3360
3361 static int getNumericAccessorIdx(char c) {
3362 switch (c) {
3363 default: return -1;
3364 case '0': return 0;
3365 case '1': return 1;
3366 case '2': return 2;
3367 case '3': return 3;
3368 case '4': return 4;
3369 case '5': return 5;
3370 case '6': return 6;
3371 case '7': return 7;
3372 case '8': return 8;
3373 case '9': return 9;
3374 case 'A':
3375 case 'a': return 10;
3376 case 'B':
3377 case 'b': return 11;
3378 case 'C':
3379 case 'c': return 12;
3380 case 'D':
3381 case 'd': return 13;
3382 case 'E':
3383 case 'e': return 14;
3384 case 'F':
3385 case 'f': return 15;
3386 }
3387 }
3388
3389 static int getAccessorIdx(char c, bool isNumericAccessor) {
3390 if (isNumericAccessor)
3391 return getNumericAccessorIdx(c);
3392 else
3393 return getPointAccessorIdx(c);
3394 }
3395
3396 bool isAccessorWithinNumElements(char c, bool isNumericAccessor) const {
3397 if (int idx = getAccessorIdx(c, isNumericAccessor)+1)
3398 return unsigned(idx-1) < getNumElements();
3399 return false;
3400 }
3401
3402 bool isSugared() const { return false; }
3403 QualType desugar() const { return QualType(this, 0); }
3404
3405 static bool classof(const Type *T) {
3406 return T->getTypeClass() == ExtVector;
3407 }
3408};
3409
3410/// Represents a matrix type, as defined in the Matrix Types clang extensions.
3411/// __attribute__((matrix_type(rows, columns))), where "rows" specifies
3412/// number of rows and "columns" specifies the number of columns.
3413class MatrixType : public Type, public llvm::FoldingSetNode {
3414protected:
3415 friend class ASTContext;
3416
3417 /// The element type of the matrix.
3418 QualType ElementType;
3419
3420 MatrixType(QualType ElementTy, QualType CanonElementTy);
3421
3422 MatrixType(TypeClass TypeClass, QualType ElementTy, QualType CanonElementTy,
3423 const Expr *RowExpr = nullptr, const Expr *ColumnExpr = nullptr);
3424
3425public:
3426 /// Returns type of the elements being stored in the matrix
3427 QualType getElementType() const { return ElementType; }
3428
3429 /// Valid elements types are the following:
3430 /// * an integer type (as in C2x 6.2.5p19), but excluding enumerated types
3431 /// and _Bool
3432 /// * the standard floating types float or double
3433 /// * a half-precision floating point type, if one is supported on the target
3434 static bool isValidElementType(QualType T) {
3435 return T->isDependentType() ||
3436 (T->isRealType() && !T->isBooleanType() && !T->isEnumeralType());
3437 }
3438
3439 bool isSugared() const { return false; }
3440 QualType desugar() const { return QualType(this, 0); }
3441
3442 static bool classof(const Type *T) {
3443 return T->getTypeClass() == ConstantMatrix ||
3444 T->getTypeClass() == DependentSizedMatrix;
3445 }
3446};
3447
3448/// Represents a concrete matrix type with constant number of rows and columns
3449class ConstantMatrixType final : public MatrixType {
3450protected:
3451 friend class ASTContext;
3452
3453 /// The element type of the matrix.
3454 // FIXME: Appears to be unused? There is also MatrixType::ElementType...
3455 QualType ElementType;
3456
3457 /// Number of rows and columns.
3458 unsigned NumRows;
3459 unsigned NumColumns;
3460
3461 static constexpr unsigned MaxElementsPerDimension = (1 << 20) - 1;
3462
3463 ConstantMatrixType(QualType MatrixElementType, unsigned NRows,
3464 unsigned NColumns, QualType CanonElementType);
3465
3466 ConstantMatrixType(TypeClass typeClass, QualType MatrixType, unsigned NRows,
3467 unsigned NColumns, QualType CanonElementType);
3468
3469public:
3470 /// Returns the number of rows in the matrix.
3471 unsigned getNumRows() const { return NumRows; }
3472
3473 /// Returns the number of columns in the matrix.
3474 unsigned getNumColumns() const { return NumColumns; }
3475
3476 /// Returns the number of elements required to embed the matrix into a vector.
3477 unsigned getNumElementsFlattened() const {
3478 return getNumRows() * getNumColumns();
3479 }
3480
3481 /// Returns true if \p NumElements is a valid matrix dimension.
3482 static constexpr bool isDimensionValid(size_t NumElements) {
3483 return NumElements > 0 && NumElements <= MaxElementsPerDimension;
3484 }
3485
3486 /// Returns the maximum number of elements per dimension.
3487 static constexpr unsigned getMaxElementsPerDimension() {
3488 return MaxElementsPerDimension;
3489 }
3490
3491 void Profile(llvm::FoldingSetNodeID &ID) {
3492 Profile(ID, getElementType(), getNumRows(), getNumColumns(),
3493 getTypeClass());
3494 }
3495
3496 static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
3497 unsigned NumRows, unsigned NumColumns,
3498 TypeClass TypeClass) {
3499 ID.AddPointer(ElementType.getAsOpaquePtr());
3500 ID.AddInteger(NumRows);
3501 ID.AddInteger(NumColumns);
3502 ID.AddInteger(TypeClass);
3503 }
3504
3505 static bool classof(const Type *T) {
3506 return T->getTypeClass() == ConstantMatrix;
3507 }
3508};
3509
3510/// Represents a matrix type where the type and the number of rows and columns
3511/// is dependent on a template.
3512class DependentSizedMatrixType final : public MatrixType {
3513 friend class ASTContext;
3514
3515 const ASTContext &Context;
3516 Expr *RowExpr;
3517 Expr *ColumnExpr;
3518
3519 SourceLocation loc;
3520
3521 DependentSizedMatrixType(const ASTContext &Context, QualType ElementType,
3522 QualType CanonicalType, Expr *RowExpr,
3523 Expr *ColumnExpr, SourceLocation loc);
3524
3525public:
3526 QualType getElementType() const { return ElementType; }
3527 Expr *getRowExpr() const { return RowExpr; }
3528 Expr *getColumnExpr() const { return ColumnExpr; }
3529 SourceLocation getAttributeLoc() const { return loc; }
3530
3531 bool isSugared() const { return false; }
3532 QualType desugar() const { return QualType(this, 0); }
3533
3534 static bool classof(const Type *T) {
3535 return T->getTypeClass() == DependentSizedMatrix;
3536 }
3537
3538 void Profile(llvm::FoldingSetNodeID &ID) {
3539 Profile(ID, Context, getElementType(), getRowExpr(), getColumnExpr());
3540 }
3541
3542 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3543 QualType ElementType, Expr *RowExpr, Expr *ColumnExpr);
3544};
3545
3546/// FunctionType - C99 6.7.5.3 - Function Declarators. This is the common base
3547/// class of FunctionNoProtoType and FunctionProtoType.
3548class FunctionType : public Type {
3549 // The type returned by the function.
3550 QualType ResultType;
3551
3552public:
3553 /// Interesting information about a specific parameter that can't simply
3554 /// be reflected in parameter's type. This is only used by FunctionProtoType
3555 /// but is in FunctionType to make this class available during the
3556 /// specification of the bases of FunctionProtoType.
3557 ///
3558 /// It makes sense to model language features this way when there's some
3559 /// sort of parameter-specific override (such as an attribute) that
3560 /// affects how the function is called. For example, the ARC ns_consumed
3561 /// attribute changes whether a parameter is passed at +0 (the default)
3562 /// or +1 (ns_consumed). This must be reflected in the function type,
3563 /// but isn't really a change to the parameter type.
3564 ///
3565 /// One serious disadvantage of modelling language features this way is
3566 /// that they generally do not work with language features that attempt
3567 /// to destructure types. For example, template argument deduction will
3568 /// not be able to match a parameter declared as
3569 /// T (*)(U)
3570 /// against an argument of type
3571 /// void (*)(__attribute__((ns_consumed)) id)
3572 /// because the substitution of T=void, U=id into the former will
3573 /// not produce the latter.
3574 class ExtParameterInfo {
3575 enum {
3576 ABIMask = 0x0F,
3577 IsConsumed = 0x10,
3578 HasPassObjSize = 0x20,
3579 IsNoEscape = 0x40,
3580 };
3581 unsigned char Data = 0;
3582
3583 public:
3584 ExtParameterInfo() = default;
3585
3586 /// Return the ABI treatment of this parameter.
3587 ParameterABI getABI() const { return ParameterABI(Data & ABIMask); }
3588 ExtParameterInfo withABI(ParameterABI kind) const {
3589 ExtParameterInfo copy = *this;
3590 copy.Data = (copy.Data & ~ABIMask) | unsigned(kind);
3591 return copy;
3592 }
3593
3594 /// Is this parameter considered "consumed" by Objective-C ARC?
3595 /// Consumed parameters must have retainable object type.
3596 bool isConsumed() const { return (Data & IsConsumed); }
3597 ExtParameterInfo withIsConsumed(bool consumed) const {
3598 ExtParameterInfo copy = *this;
3599 if (consumed)
3600 copy.Data |= IsConsumed;
3601 else
3602 copy.Data &= ~IsConsumed;
3603 return copy;
3604 }
3605
3606 bool hasPassObjectSize() const { return Data & HasPassObjSize; }
3607 ExtParameterInfo withHasPassObjectSize() const {
3608 ExtParameterInfo Copy = *this;
3609 Copy.Data |= HasPassObjSize;
3610 return Copy;
3611 }
3612
3613 bool isNoEscape() const { return Data & IsNoEscape; }
3614 ExtParameterInfo withIsNoEscape(bool NoEscape) const {
3615 ExtParameterInfo Copy = *this;
3616 if (NoEscape)
3617 Copy.Data |= IsNoEscape;
3618 else
3619 Copy.Data &= ~IsNoEscape;
3620 return Copy;
3621 }
3622
3623 unsigned char getOpaqueValue() const { return Data; }
3624 static ExtParameterInfo getFromOpaqueValue(unsigned char data) {
3625 ExtParameterInfo result;
3626 result.Data = data;
3627 return result;
3628 }
3629
3630 friend bool operator==(ExtParameterInfo lhs, ExtParameterInfo rhs) {
3631 return lhs.Data == rhs.Data;
3632 }
3633
3634 friend bool operator!=(ExtParameterInfo lhs, ExtParameterInfo rhs) {
3635 return lhs.Data != rhs.Data;
3636 }
3637 };
3638
3639 /// A class which abstracts out some details necessary for
3640 /// making a call.
3641 ///
3642 /// It is not actually used directly for storing this information in
3643 /// a FunctionType, although FunctionType does currently use the
3644 /// same bit-pattern.
3645 ///
3646 // If you add a field (say Foo), other than the obvious places (both,
3647 // constructors, compile failures), what you need to update is
3648 // * Operator==
3649 // * getFoo
3650 // * withFoo
3651 // * functionType. Add Foo, getFoo.
3652 // * ASTContext::getFooType
3653 // * ASTContext::mergeFunctionTypes
3654 // * FunctionNoProtoType::Profile
3655 // * FunctionProtoType::Profile
3656 // * TypePrinter::PrintFunctionProto
3657 // * AST read and write
3658 // * Codegen
3659 class ExtInfo {
3660 friend class FunctionType;
3661
3662 // Feel free to rearrange or add bits, but if you go over 16, you'll need to
3663 // adjust the Bits field below, and if you add bits, you'll need to adjust
3664 // Type::FunctionTypeBitfields::ExtInfo as well.
3665
3666 // | CC |noreturn|produces|nocallersavedregs|regparm|nocfcheck|cmsenscall|
3667 // |0 .. 4| 5 | 6 | 7 |8 .. 10| 11 | 12 |
3668 //
3669 // regparm is either 0 (no regparm attribute) or the regparm value+1.
3670 enum { CallConvMask = 0x1F };
3671 enum { NoReturnMask = 0x20 };
3672 enum { ProducesResultMask = 0x40 };
3673 enum { NoCallerSavedRegsMask = 0x80 };
3674 enum {
3675 RegParmMask = 0x700,
3676 RegParmOffset = 8
3677 };
3678 enum { NoCfCheckMask = 0x800 };
3679 enum { CmseNSCallMask = 0x1000 };
3680 uint16_t Bits = CC_C;
3681
3682 ExtInfo(unsigned Bits) : Bits(static_cast<uint16_t>(Bits)) {}
3683
3684 public:
3685 // Constructor with no defaults. Use this when you know that you
3686 // have all the elements (when reading an AST file for example).
3687 ExtInfo(bool noReturn, bool hasRegParm, unsigned regParm, CallingConv cc,
3688 bool producesResult, bool noCallerSavedRegs, bool NoCfCheck,
3689 bool cmseNSCall) {
3690 assert((!hasRegParm || regParm < 7) && "Invalid regparm value")((void)0);
3691 Bits = ((unsigned)cc) | (noReturn ? NoReturnMask : 0) |
3692 (producesResult ? ProducesResultMask : 0) |
3693 (noCallerSavedRegs ? NoCallerSavedRegsMask : 0) |
3694 (hasRegParm ? ((regParm + 1) << RegParmOffset) : 0) |
3695 (NoCfCheck ? NoCfCheckMask : 0) |
3696 (cmseNSCall ? CmseNSCallMask : 0);
3697 }
3698
3699 // Constructor with all defaults. Use when for example creating a
3700 // function known to use defaults.
3701 ExtInfo() = default;
3702
3703 // Constructor with just the calling convention, which is an important part
3704 // of the canonical type.
3705 ExtInfo(CallingConv CC) : Bits(CC) {}
3706
3707 bool getNoReturn() const { return Bits & NoReturnMask; }
3708 bool getProducesResult() const { return Bits & ProducesResultMask; }
3709 bool getCmseNSCall() const { return Bits & CmseNSCallMask; }
3710 bool getNoCallerSavedRegs() const { return Bits & NoCallerSavedRegsMask; }
3711 bool getNoCfCheck() const { return Bits & NoCfCheckMask; }
3712 bool getHasRegParm() const { return ((Bits & RegParmMask) >> RegParmOffset) != 0; }
3713
3714 unsigned getRegParm() const {
3715 unsigned RegParm = (Bits & RegParmMask) >> RegParmOffset;
3716 if (RegParm > 0)
3717 --RegParm;
3718 return RegParm;
3719 }
3720
3721 CallingConv getCC() const { return CallingConv(Bits & CallConvMask); }
3722
3723 bool operator==(ExtInfo Other) const {
3724 return Bits == Other.Bits;
3725 }
3726 bool operator!=(ExtInfo Other) const {
3727 return Bits != Other.Bits;
3728 }
3729
3730 // Note that we don't have setters. That is by design, use
3731 // the following with methods instead of mutating these objects.
3732
3733 ExtInfo withNoReturn(bool noReturn) const {
3734 if (noReturn)
3735 return ExtInfo(Bits | NoReturnMask);
3736 else
3737 return ExtInfo(Bits & ~NoReturnMask);
3738 }
3739
3740 ExtInfo withProducesResult(bool producesResult) const {
3741 if (producesResult)
3742 return ExtInfo(Bits | ProducesResultMask);
3743 else
3744 return ExtInfo(Bits & ~ProducesResultMask);
3745 }
3746
3747 ExtInfo withCmseNSCall(bool cmseNSCall) const {
3748 if (cmseNSCall)
3749 return ExtInfo(Bits | CmseNSCallMask);
3750 else
3751 return ExtInfo(Bits & ~CmseNSCallMask);
3752 }
3753
3754 ExtInfo withNoCallerSavedRegs(bool noCallerSavedRegs) const {
3755 if (noCallerSavedRegs)
3756 return ExtInfo(Bits | NoCallerSavedRegsMask);
3757 else
3758 return ExtInfo(Bits & ~NoCallerSavedRegsMask);
3759 }
3760
3761 ExtInfo withNoCfCheck(bool noCfCheck) const {
3762 if (noCfCheck)
3763 return ExtInfo(Bits | NoCfCheckMask);
3764 else
3765 return ExtInfo(Bits & ~NoCfCheckMask);
3766 }
3767
3768 ExtInfo withRegParm(unsigned RegParm) const {
3769 assert(RegParm < 7 && "Invalid regparm value")((void)0);
3770 return ExtInfo((Bits & ~RegParmMask) |
3771 ((RegParm + 1) << RegParmOffset));
3772 }
3773
3774 ExtInfo withCallingConv(CallingConv cc) const {
3775 return ExtInfo((Bits & ~CallConvMask) | (unsigned) cc);
3776 }
3777
3778 void Profile(llvm::FoldingSetNodeID &ID) const {
3779 ID.AddInteger(Bits);
3780 }
3781 };
3782
3783 /// A simple holder for a QualType representing a type in an
3784 /// exception specification. Unfortunately needed by FunctionProtoType
3785 /// because TrailingObjects cannot handle repeated types.
3786 struct ExceptionType { QualType Type; };
3787
3788 /// A simple holder for various uncommon bits which do not fit in
3789 /// FunctionTypeBitfields. Aligned to alignof(void *) to maintain the
3790 /// alignment of subsequent objects in TrailingObjects. You must update
3791 /// hasExtraBitfields in FunctionProtoType after adding extra data here.
3792 struct alignas(void *) FunctionTypeExtraBitfields {
3793 /// The number of types in the exception specification.
3794 /// A whole unsigned is not needed here and according to
3795 /// [implimits] 8 bits would be enough here.
3796 unsigned NumExceptionType;
3797 };
3798
3799protected:
3800 FunctionType(TypeClass tc, QualType res, QualType Canonical,
3801 TypeDependence Dependence, ExtInfo Info)
3802 : Type(tc, Canonical, Dependence), ResultType(res) {
3803 FunctionTypeBits.ExtInfo = Info.Bits;
3804 }
3805
3806 Qualifiers getFastTypeQuals() const {
3807 return Qualifiers::fromFastMask(FunctionTypeBits.FastTypeQuals);
3808 }
3809
3810public:
3811 QualType getReturnType() const { return ResultType; }
3812
3813 bool getHasRegParm() const { return getExtInfo().getHasRegParm(); }
3814 unsigned getRegParmType() const { return getExtInfo().getRegParm(); }
3815
3816 /// Determine whether this function type includes the GNU noreturn
3817 /// attribute. The C++11 [[noreturn]] attribute does not affect the function
3818 /// type.
3819 bool getNoReturnAttr() const { return getExtInfo().getNoReturn(); }
3820
3821 bool getCmseNSCallAttr() const { return getExtInfo().getCmseNSCall(); }
3822 CallingConv getCallConv() const { return getExtInfo().getCC(); }
3823 ExtInfo getExtInfo() const { return ExtInfo(FunctionTypeBits.ExtInfo); }
3824
3825 static_assert((~Qualifiers::FastMask & Qualifiers::CVRMask) == 0,
3826 "Const, volatile and restrict are assumed to be a subset of "
3827 "the fast qualifiers.");
3828
3829 bool isConst() const { return getFastTypeQuals().hasConst(); }
3830 bool isVolatile() const { return getFastTypeQuals().hasVolatile(); }
3831 bool isRestrict() const { return getFastTypeQuals().hasRestrict(); }
3832
3833 /// Determine the type of an expression that calls a function of
3834 /// this type.
3835 QualType getCallResultType(const ASTContext &Context) const {
3836 return getReturnType().getNonLValueExprType(Context);
3837 }
3838
3839 static StringRef getNameForCallConv(CallingConv CC);
3840
3841 static bool classof(const Type *T) {
3842 return T->getTypeClass() == FunctionNoProto ||
3843 T->getTypeClass() == FunctionProto;
3844 }
3845};
3846
3847/// Represents a K&R-style 'int foo()' function, which has
3848/// no information available about its arguments.
3849class FunctionNoProtoType : public FunctionType, public llvm::FoldingSetNode {
3850 friend class ASTContext; // ASTContext creates these.
3851
3852 FunctionNoProtoType(QualType Result, QualType Canonical, ExtInfo Info)
3853 : FunctionType(FunctionNoProto, Result, Canonical,
3854 Result->getDependence() &
3855 ~(TypeDependence::DependentInstantiation |
3856 TypeDependence::UnexpandedPack),
3857 Info) {}
3858
3859public:
3860 // No additional state past what FunctionType provides.
3861
3862 bool isSugared() const { return false; }
3863 QualType desugar() const { return QualType(this, 0); }
3864
3865 void Profile(llvm::FoldingSetNodeID &ID) {
3866 Profile(ID, getReturnType(), getExtInfo());
3867 }
3868
3869 static void Profile(llvm::FoldingSetNodeID &ID, QualType ResultType,
3870 ExtInfo Info) {
3871 Info.Profile(ID);
3872 ID.AddPointer(ResultType.getAsOpaquePtr());
3873 }
3874
3875 static bool classof(const Type *T) {
3876 return T->getTypeClass() == FunctionNoProto;
3877 }
3878};
3879
3880/// Represents a prototype with parameter type info, e.g.
3881/// 'int foo(int)' or 'int foo(void)'. 'void' is represented as having no
3882/// parameters, not as having a single void parameter. Such a type can have
3883/// an exception specification, but this specification is not part of the
3884/// canonical type. FunctionProtoType has several trailing objects, some of
3885/// which optional. For more information about the trailing objects see
3886/// the first comment inside FunctionProtoType.
3887class FunctionProtoType final
3888 : public FunctionType,
3889 public llvm::FoldingSetNode,
3890 private llvm::TrailingObjects<
3891 FunctionProtoType, QualType, SourceLocation,
3892 FunctionType::FunctionTypeExtraBitfields, FunctionType::ExceptionType,
3893 Expr *, FunctionDecl *, FunctionType::ExtParameterInfo, Qualifiers> {
3894 friend class ASTContext; // ASTContext creates these.
3895 friend TrailingObjects;
3896
3897 // FunctionProtoType is followed by several trailing objects, some of
3898 // which optional. They are in order:
3899 //
3900 // * An array of getNumParams() QualType holding the parameter types.
3901 // Always present. Note that for the vast majority of FunctionProtoType,
3902 // these will be the only trailing objects.
3903 //
3904 // * Optionally if the function is variadic, the SourceLocation of the
3905 // ellipsis.
3906 //
3907 // * Optionally if some extra data is stored in FunctionTypeExtraBitfields
3908 // (see FunctionTypeExtraBitfields and FunctionTypeBitfields):
3909 // a single FunctionTypeExtraBitfields. Present if and only if
3910 // hasExtraBitfields() is true.
3911 //
3912 // * Optionally exactly one of:
3913 // * an array of getNumExceptions() ExceptionType,
3914 // * a single Expr *,
3915 // * a pair of FunctionDecl *,
3916 // * a single FunctionDecl *
3917 // used to store information about the various types of exception
3918 // specification. See getExceptionSpecSize for the details.
3919 //
3920 // * Optionally an array of getNumParams() ExtParameterInfo holding
3921 // an ExtParameterInfo for each of the parameters. Present if and
3922 // only if hasExtParameterInfos() is true.
3923 //
3924 // * Optionally a Qualifiers object to represent extra qualifiers that can't
3925 // be represented by FunctionTypeBitfields.FastTypeQuals. Present if and only
3926 // if hasExtQualifiers() is true.
3927 //
3928 // The optional FunctionTypeExtraBitfields has to be before the data
3929 // related to the exception specification since it contains the number
3930 // of exception types.
3931 //
3932 // We put the ExtParameterInfos last. If all were equal, it would make
3933 // more sense to put these before the exception specification, because
3934 // it's much easier to skip past them compared to the elaborate switch
3935 // required to skip the exception specification. However, all is not
3936 // equal; ExtParameterInfos are used to model very uncommon features,
3937 // and it's better not to burden the more common paths.
3938
3939public:
3940 /// Holds information about the various types of exception specification.
3941 /// ExceptionSpecInfo is not stored as such in FunctionProtoType but is
3942 /// used to group together the various bits of information about the
3943 /// exception specification.
3944 struct ExceptionSpecInfo {
3945 /// The kind of exception specification this is.
3946 ExceptionSpecificationType Type = EST_None;
3947
3948 /// Explicitly-specified list of exception types.
3949 ArrayRef<QualType> Exceptions;
3950
3951 /// Noexcept expression, if this is a computed noexcept specification.
3952 Expr *NoexceptExpr = nullptr;
3953
3954 /// The function whose exception specification this is, for
3955 /// EST_Unevaluated and EST_Uninstantiated.
3956 FunctionDecl *SourceDecl = nullptr;
3957
3958 /// The function template whose exception specification this is instantiated
3959 /// from, for EST_Uninstantiated.
3960 FunctionDecl *SourceTemplate = nullptr;
3961
3962 ExceptionSpecInfo() = default;
3963
3964 ExceptionSpecInfo(ExceptionSpecificationType EST) : Type(EST) {}
3965 };
3966
3967 /// Extra information about a function prototype. ExtProtoInfo is not
3968 /// stored as such in FunctionProtoType but is used to group together
3969 /// the various bits of extra information about a function prototype.
3970 struct ExtProtoInfo {
3971 FunctionType::ExtInfo ExtInfo;
3972 bool Variadic : 1;
3973 bool HasTrailingReturn : 1;
3974 Qualifiers TypeQuals;
3975 RefQualifierKind RefQualifier = RQ_None;
3976 ExceptionSpecInfo ExceptionSpec;
3977 const ExtParameterInfo *ExtParameterInfos = nullptr;
3978 SourceLocation EllipsisLoc;
3979
3980 ExtProtoInfo() : Variadic(false), HasTrailingReturn(false) {}
3981
3982 ExtProtoInfo(CallingConv CC)
3983 : ExtInfo(CC), Variadic(false), HasTrailingReturn(false) {}
3984
3985 ExtProtoInfo withExceptionSpec(const ExceptionSpecInfo &ESI) {
3986 ExtProtoInfo Result(*this);
3987 Result.ExceptionSpec = ESI;
3988 return Result;
3989 }
3990 };
3991
3992private:
3993 unsigned numTrailingObjects(OverloadToken<QualType>) const {
3994 return getNumParams();
3995 }
3996
3997 unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
3998 return isVariadic();
3999 }
4000
4001 unsigned numTrailingObjects(OverloadToken<FunctionTypeExtraBitfields>) const {
4002 return hasExtraBitfields();
4003 }
4004
4005 unsigned numTrailingObjects(OverloadToken<ExceptionType>) const {
4006 return getExceptionSpecSize().NumExceptionType;
4007 }
4008
4009 unsigned numTrailingObjects(OverloadToken<Expr *>) const {
4010 return getExceptionSpecSize().NumExprPtr;
4011 }
4012
4013 unsigned numTrailingObjects(OverloadToken<FunctionDecl *>) const {
4014 return getExceptionSpecSize().NumFunctionDeclPtr;
4015 }
4016
4017 unsigned numTrailingObjects(OverloadToken<ExtParameterInfo>) const {
4018 return hasExtParameterInfos() ? getNumParams() : 0;
4019 }
4020
4021 /// Determine whether there are any argument types that
4022 /// contain an unexpanded parameter pack.
4023 static bool containsAnyUnexpandedParameterPack(const QualType *ArgArray,
4024 unsigned numArgs) {
4025 for (unsigned Idx = 0; Idx < numArgs; ++Idx)
4026 if (ArgArray[Idx]->containsUnexpandedParameterPack())
4027 return true;
4028
4029 return false;
4030 }
4031
4032 FunctionProtoType(QualType result, ArrayRef<QualType> params,
4033 QualType canonical, const ExtProtoInfo &epi);
4034
4035 /// This struct is returned by getExceptionSpecSize and is used to
4036 /// translate an ExceptionSpecificationType to the number and kind
4037 /// of trailing objects related to the exception specification.
4038 struct ExceptionSpecSizeHolder {
4039 unsigned NumExceptionType;
4040 unsigned NumExprPtr;
4041 unsigned NumFunctionDeclPtr;
4042 };
4043
4044 /// Return the number and kind of trailing objects
4045 /// related to the exception specification.
4046 static ExceptionSpecSizeHolder
4047 getExceptionSpecSize(ExceptionSpecificationType EST, unsigned NumExceptions) {
4048 switch (EST) {
4049 case EST_None:
4050 case EST_DynamicNone:
4051 case EST_MSAny:
4052 case EST_BasicNoexcept:
4053 case EST_Unparsed:
4054 case EST_NoThrow:
4055 return {0, 0, 0};
4056
4057 case EST_Dynamic:
4058 return {NumExceptions, 0, 0};
4059
4060 case EST_DependentNoexcept:
4061 case EST_NoexceptFalse:
4062 case EST_NoexceptTrue:
4063 return {0, 1, 0};
4064
4065 case EST_Uninstantiated:
4066 return {0, 0, 2};
4067
4068 case EST_Unevaluated:
4069 return {0, 0, 1};
4070 }
4071 llvm_unreachable("bad exception specification kind")__builtin_unreachable();
4072 }
4073
4074 /// Return the number and kind of trailing objects
4075 /// related to the exception specification.
4076 ExceptionSpecSizeHolder getExceptionSpecSize() const {
4077 return getExceptionSpecSize(getExceptionSpecType(), getNumExceptions());
4078 }
4079
4080 /// Whether the trailing FunctionTypeExtraBitfields is present.
4081 static bool hasExtraBitfields(ExceptionSpecificationType EST) {
4082 // If the exception spec type is EST_Dynamic then we have > 0 exception
4083 // types and the exact number is stored in FunctionTypeExtraBitfields.
4084 return EST == EST_Dynamic;
4085 }
4086
4087 /// Whether the trailing FunctionTypeExtraBitfields is present.
4088 bool hasExtraBitfields() const {
4089 return hasExtraBitfields(getExceptionSpecType());
4090 }
4091
4092 bool hasExtQualifiers() const {
4093 return FunctionTypeBits.HasExtQuals;
4094 }
4095
4096public:
4097 unsigned getNumParams() const { return FunctionTypeBits.NumParams; }
4098
4099 QualType getParamType(unsigned i) const {
4100 assert(i < getNumParams() && "invalid parameter index")((void)0);
4101 return param_type_begin()[i];
4102 }
4103
4104 ArrayRef<QualType> getParamTypes() const {
4105 return llvm::makeArrayRef(param_type_begin(), param_type_end());
4106 }
4107
4108 ExtProtoInfo getExtProtoInfo() const {
4109 ExtProtoInfo EPI;
4110 EPI.ExtInfo = getExtInfo();
4111 EPI.Variadic = isVariadic();
4112 EPI.EllipsisLoc = getEllipsisLoc();
4113 EPI.HasTrailingReturn = hasTrailingReturn();
4114 EPI.ExceptionSpec = getExceptionSpecInfo();
4115 EPI.TypeQuals = getMethodQuals();
4116 EPI.RefQualifier = getRefQualifier();
4117 EPI.ExtParameterInfos = getExtParameterInfosOrNull();
4118 return EPI;
4119 }
4120
4121 /// Get the kind of exception specification on this function.
4122 ExceptionSpecificationType getExceptionSpecType() const {
4123 return static_cast<ExceptionSpecificationType>(
4124 FunctionTypeBits.ExceptionSpecType);
4125 }
4126
4127 /// Return whether this function has any kind of exception spec.
4128 bool hasExceptionSpec() const { return getExceptionSpecType() != EST_None; }
4129
4130 /// Return whether this function has a dynamic (throw) exception spec.
4131 bool hasDynamicExceptionSpec() const {
4132 return isDynamicExceptionSpec(getExceptionSpecType());
4133 }
4134
4135 /// Return whether this function has a noexcept exception spec.
4136 bool hasNoexceptExceptionSpec() const {
4137 return isNoexceptExceptionSpec(getExceptionSpecType());
4138 }
4139
4140 /// Return whether this function has a dependent exception spec.
4141 bool hasDependentExceptionSpec() const;
4142
4143 /// Return whether this function has an instantiation-dependent exception
4144 /// spec.
4145 bool hasInstantiationDependentExceptionSpec() const;
4146
4147 /// Return all the available information about this type's exception spec.
4148 ExceptionSpecInfo getExceptionSpecInfo() const {
4149 ExceptionSpecInfo Result;
4150 Result.Type = getExceptionSpecType();
4151 if (Result.Type == EST_Dynamic) {
4152 Result.Exceptions = exceptions();
4153 } else if (isComputedNoexcept(Result.Type)) {
4154 Result.NoexceptExpr = getNoexceptExpr();
4155 } else if (Result.Type == EST_Uninstantiated) {
4156 Result.SourceDecl = getExceptionSpecDecl();
4157 Result.SourceTemplate = getExceptionSpecTemplate();
4158 } else if (Result.Type == EST_Unevaluated) {
4159 Result.SourceDecl = getExceptionSpecDecl();
4160 }
4161 return Result;
4162 }
4163
4164 /// Return the number of types in the exception specification.
4165 unsigned getNumExceptions() const {
4166 return getExceptionSpecType() == EST_Dynamic
4167 ? getTrailingObjects<FunctionTypeExtraBitfields>()
4168 ->NumExceptionType
4169 : 0;
4170 }
4171
4172 /// Return the ith exception type, where 0 <= i < getNumExceptions().
4173 QualType getExceptionType(unsigned i) const {
4174 assert(i < getNumExceptions() && "Invalid exception number!")((void)0);
4175 return exception_begin()[i];
4176 }
4177
4178 /// Return the expression inside noexcept(expression), or a null pointer
4179 /// if there is none (because the exception spec is not of this form).
4180 Expr *getNoexceptExpr() const {
4181 if (!isComputedNoexcept(getExceptionSpecType()))
4182 return nullptr;
4183 return *getTrailingObjects<Expr *>();
4184 }
4185
4186 /// If this function type has an exception specification which hasn't
4187 /// been determined yet (either because it has not been evaluated or because
4188 /// it has not been instantiated), this is the function whose exception
4189 /// specification is represented by this type.
4190 FunctionDecl *getExceptionSpecDecl() const {
4191 if (getExceptionSpecType() != EST_Uninstantiated &&
4192 getExceptionSpecType() != EST_Unevaluated)
4193 return nullptr;
4194 return getTrailingObjects<FunctionDecl *>()[0];
4195 }
4196
4197 /// If this function type has an uninstantiated exception
4198 /// specification, this is the function whose exception specification
4199 /// should be instantiated to find the exception specification for
4200 /// this type.
4201 FunctionDecl *getExceptionSpecTemplate() const {
4202 if (getExceptionSpecType() != EST_Uninstantiated)
4203 return nullptr;
4204 return getTrailingObjects<FunctionDecl *>()[1];
4205 }
4206
4207 /// Determine whether this function type has a non-throwing exception
4208 /// specification.
4209 CanThrowResult canThrow() const;
4210
4211 /// Determine whether this function type has a non-throwing exception
4212 /// specification. If this depends on template arguments, returns
4213 /// \c ResultIfDependent.
4214 bool isNothrow(bool ResultIfDependent = false) const {
4215 return ResultIfDependent ? canThrow() != CT_Can : canThrow() == CT_Cannot;
4216 }
4217
4218 /// Whether this function prototype is variadic.
4219 bool isVariadic() const { return FunctionTypeBits.Variadic; }
4220
4221 SourceLocation getEllipsisLoc() const {
4222 return isVariadic() ? *getTrailingObjects<SourceLocation>()
4223 : SourceLocation();
4224 }
4225
4226 /// Determines whether this function prototype contains a
4227 /// parameter pack at the end.
4228 ///
4229 /// A function template whose last parameter is a parameter pack can be
4230 /// called with an arbitrary number of arguments, much like a variadic
4231 /// function.
4232 bool isTemplateVariadic() const;
4233
4234 /// Whether this function prototype has a trailing return type.
4235 bool hasTrailingReturn() const { return FunctionTypeBits.HasTrailingReturn; }
4236
4237 Qualifiers getMethodQuals() const {
4238 if (hasExtQualifiers())
4239 return *getTrailingObjects<Qualifiers>();
4240 else
4241 return getFastTypeQuals();
4242 }
4243
4244 /// Retrieve the ref-qualifier associated with this function type.
4245 RefQualifierKind getRefQualifier() const {
4246 return static_cast<RefQualifierKind>(FunctionTypeBits.RefQualifier);
4247 }
4248
4249 using param_type_iterator = const QualType *;
4250 using param_type_range = llvm::iterator_range<param_type_iterator>;
4251
4252 param_type_range param_types() const {
4253 return param_type_range(param_type_begin(), param_type_end());
4254 }
4255
4256 param_type_iterator param_type_begin() const {
4257 return getTrailingObjects<QualType>();
4258 }
4259
4260 param_type_iterator param_type_end() const {
4261 return param_type_begin() + getNumParams();
4262 }
4263
4264 using exception_iterator = const QualType *;
4265
4266 ArrayRef<QualType> exceptions() const {
4267 return llvm::makeArrayRef(exception_begin(), exception_end());
4268 }
4269
4270 exception_iterator exception_begin() const {
4271 return reinterpret_cast<exception_iterator>(
4272 getTrailingObjects<ExceptionType>());
4273 }
4274
4275 exception_iterator exception_end() const {
4276 return exception_begin() + getNumExceptions();
4277 }
4278
4279 /// Is there any interesting extra information for any of the parameters
4280 /// of this function type?
4281 bool hasExtParameterInfos() const {
4282 return FunctionTypeBits.HasExtParameterInfos;
4283 }
4284
4285 ArrayRef<ExtParameterInfo> getExtParameterInfos() const {
4286 assert(hasExtParameterInfos())((void)0);
4287 return ArrayRef<ExtParameterInfo>(getTrailingObjects<ExtParameterInfo>(),
4288 getNumParams());
4289 }
4290
4291 /// Return a pointer to the beginning of the array of extra parameter
4292 /// information, if present, or else null if none of the parameters
4293 /// carry it. This is equivalent to getExtProtoInfo().ExtParameterInfos.
4294 const ExtParameterInfo *getExtParameterInfosOrNull() const {
4295 if (!hasExtParameterInfos())
4296 return nullptr;
4297 return getTrailingObjects<ExtParameterInfo>();
4298 }
4299
4300 ExtParameterInfo getExtParameterInfo(unsigned I) const {
4301 assert(I < getNumParams() && "parameter index out of range")((void)0);
4302 if (hasExtParameterInfos())
4303 return getTrailingObjects<ExtParameterInfo>()[I];
4304 return ExtParameterInfo();
4305 }
4306
4307 ParameterABI getParameterABI(unsigned I) const {
4308 assert(I < getNumParams() && "parameter index out of range")((void)0);
4309 if (hasExtParameterInfos())
4310 return getTrailingObjects<ExtParameterInfo>()[I].getABI();
4311 return ParameterABI::Ordinary;
4312 }
4313
4314 bool isParamConsumed(unsigned I) const {
4315 assert(I < getNumParams() && "parameter index out of range")((void)0);
4316 if (hasExtParameterInfos())
4317 return getTrailingObjects<ExtParameterInfo>()[I].isConsumed();
4318 return false;
4319 }
4320
4321 bool isSugared() const { return false; }
4322 QualType desugar() const { return QualType(this, 0); }
4323
4324 void printExceptionSpecification(raw_ostream &OS,
4325 const PrintingPolicy &Policy) const;
4326
4327 static bool classof(const Type *T) {
4328 return T->getTypeClass() == FunctionProto;
4329 }
4330
4331 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx);
4332 static void Profile(llvm::FoldingSetNodeID &ID, QualType Result,
4333 param_type_iterator ArgTys, unsigned NumArgs,
4334 const ExtProtoInfo &EPI, const ASTContext &Context,
4335 bool Canonical);
4336};
4337
4338/// Represents the dependent type named by a dependently-scoped
4339/// typename using declaration, e.g.
4340/// using typename Base<T>::foo;
4341///
4342/// Template instantiation turns these into the underlying type.
4343class UnresolvedUsingType : public Type {
4344 friend class ASTContext; // ASTContext creates these.
4345
4346 UnresolvedUsingTypenameDecl *Decl;
4347
4348 UnresolvedUsingType(const UnresolvedUsingTypenameDecl *D)
4349 : Type(UnresolvedUsing, QualType(),
4350 TypeDependence::DependentInstantiation),
4351 Decl(const_cast<UnresolvedUsingTypenameDecl *>(D)) {}
4352
4353public:
4354 UnresolvedUsingTypenameDecl *getDecl() const { return Decl; }
4355
4356 bool isSugared() const { return false; }
4357 QualType desugar() const { return QualType(this, 0); }
4358
4359 static bool classof(const Type *T) {
4360 return T->getTypeClass() == UnresolvedUsing;
4361 }
4362
4363 void Profile(llvm::FoldingSetNodeID &ID) {
4364 return Profile(ID, Decl);
4365 }
4366
4367 static void Profile(llvm::FoldingSetNodeID &ID,
4368 UnresolvedUsingTypenameDecl *D) {
4369 ID.AddPointer(D);
4370 }
4371};
4372
4373class TypedefType : public Type {
4374 TypedefNameDecl *Decl;
4375
4376private:
4377 friend class ASTContext; // ASTContext creates these.
4378
4379 TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType underlying,
4380 QualType can);
4381
4382public:
4383 TypedefNameDecl *getDecl() const { return Decl; }
4384
4385 bool isSugared() const { return true; }
4386 QualType desugar() const;
4387
4388 static bool classof(const Type *T) { return T->getTypeClass() == Typedef; }
4389};
4390
4391/// Sugar type that represents a type that was qualified by a qualifier written
4392/// as a macro invocation.
4393class MacroQualifiedType : public Type {
4394 friend class ASTContext; // ASTContext creates these.
4395
4396 QualType UnderlyingTy;
4397 const IdentifierInfo *MacroII;
4398
4399 MacroQualifiedType(QualType UnderlyingTy, QualType CanonTy,
4400 const IdentifierInfo *MacroII)
4401 : Type(MacroQualified, CanonTy, UnderlyingTy->getDependence()),
4402 UnderlyingTy(UnderlyingTy), MacroII(MacroII) {
4403 assert(isa<AttributedType>(UnderlyingTy) &&((void)0)
4404 "Expected a macro qualified type to only wrap attributed types.")((void)0);
4405 }
4406
4407public:
4408 const IdentifierInfo *getMacroIdentifier() const { return MacroII; }
4409 QualType getUnderlyingType() const { return UnderlyingTy; }
4410
4411 /// Return this attributed type's modified type with no qualifiers attached to
4412 /// it.
4413 QualType getModifiedType() const;
4414
4415 bool isSugared() const { return true; }
4416 QualType desugar() const;
4417
4418 static bool classof(const Type *T) {
4419 return T->getTypeClass() == MacroQualified;
4420 }
4421};
4422
4423/// Represents a `typeof` (or __typeof__) expression (a GCC extension).
4424class TypeOfExprType : public Type {
4425 Expr *TOExpr;
4426
4427protected:
4428 friend class ASTContext; // ASTContext creates these.
4429
4430 TypeOfExprType(Expr *E, QualType can = QualType());
4431
4432public:
4433 Expr *getUnderlyingExpr() const { return TOExpr; }
4434
4435 /// Remove a single level of sugar.
4436 QualType desugar() const;
4437
4438 /// Returns whether this type directly provides sugar.
4439 bool isSugared() const;
4440
4441 static bool classof(const Type *T) { return T->getTypeClass() == TypeOfExpr; }
4442};
4443
4444/// Internal representation of canonical, dependent
4445/// `typeof(expr)` types.
4446///
4447/// This class is used internally by the ASTContext to manage
4448/// canonical, dependent types, only. Clients will only see instances
4449/// of this class via TypeOfExprType nodes.
4450class DependentTypeOfExprType
4451 : public TypeOfExprType, public llvm::FoldingSetNode {
4452 const ASTContext &Context;
4453
4454public:
4455 DependentTypeOfExprType(const ASTContext &Context, Expr *E)
4456 : TypeOfExprType(E), Context(Context) {}
4457
4458 void Profile(llvm::FoldingSetNodeID &ID) {
4459 Profile(ID, Context, getUnderlyingExpr());
4460 }
4461
4462 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
4463 Expr *E);
4464};
4465
4466/// Represents `typeof(type)`, a GCC extension.
4467class TypeOfType : public Type {
4468 friend class ASTContext; // ASTContext creates these.
4469
4470 QualType TOType;
4471
4472 TypeOfType(QualType T, QualType can)
4473 : Type(TypeOf, can, T->getDependence()), TOType(T) {
4474 assert(!isa<TypedefType>(can) && "Invalid canonical type")((void)0);
4475 }
4476
4477public:
4478 QualType getUnderlyingType() const { return TOType; }
4479
4480 /// Remove a single level of sugar.
4481 QualType desugar() const { return getUnderlyingType(); }
4482
4483 /// Returns whether this type directly provides sugar.
4484 bool isSugared() const { return true; }
4485
4486 static bool classof(const Type *T) { return T->getTypeClass() == TypeOf; }
4487};
4488
4489/// Represents the type `decltype(expr)` (C++11).
4490class DecltypeType : public Type {
4491 Expr *E;
4492 QualType UnderlyingType;
4493
4494protected:
4495 friend class ASTContext; // ASTContext creates these.
4496
4497 DecltypeType(Expr *E, QualType underlyingType, QualType can = QualType());
4498
4499public:
4500 Expr *getUnderlyingExpr() const { return E; }
4501 QualType getUnderlyingType() const { return UnderlyingType; }
4502
4503 /// Remove a single level of sugar.
4504 QualType desugar() const;
4505
4506 /// Returns whether this type directly provides sugar.
4507 bool isSugared() const;
4508
4509 static bool classof(const Type *T) { return T->getTypeClass() == Decltype; }
4510};
4511
4512/// Internal representation of canonical, dependent
4513/// decltype(expr) types.
4514///
4515/// This class is used internally by the ASTContext to manage
4516/// canonical, dependent types, only. Clients will only see instances
4517/// of this class via DecltypeType nodes.
4518class DependentDecltypeType : public DecltypeType, public llvm::FoldingSetNode {
4519 const ASTContext &Context;
4520
4521public:
4522 DependentDecltypeType(const ASTContext &Context, Expr *E);
4523
4524 void Profile(llvm::FoldingSetNodeID &ID) {
4525 Profile(ID, Context, getUnderlyingExpr());
4526 }
4527
4528 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
4529 Expr *E);
4530};
4531
4532/// A unary type transform, which is a type constructed from another.
4533class UnaryTransformType : public Type {
4534public:
4535 enum UTTKind {
4536 EnumUnderlyingType
4537 };
4538
4539private:
4540 /// The untransformed type.
4541 QualType BaseType;
4542
4543 /// The transformed type if not dependent, otherwise the same as BaseType.
4544 QualType UnderlyingType;
4545
4546 UTTKind UKind;
4547
4548protected:
4549 friend class ASTContext;
4550
4551 UnaryTransformType(QualType BaseTy, QualType UnderlyingTy, UTTKind UKind,
4552 QualType CanonicalTy);
4553
4554public:
4555 bool isSugared() const { return !isDependentType(); }
4556 QualType desugar() const { return UnderlyingType; }
4557
4558 QualType getUnderlyingType() const { return UnderlyingType; }
4559 QualType getBaseType() const { return BaseType; }
4560
4561 UTTKind getUTTKind() const { return UKind; }
4562
4563 static bool classof(const Type *T) {
4564 return T->getTypeClass() == UnaryTransform;
4565 }
4566};
4567
4568/// Internal representation of canonical, dependent
4569/// __underlying_type(type) types.
4570///
4571/// This class is used internally by the ASTContext to manage
4572/// canonical, dependent types, only. Clients will only see instances
4573/// of this class via UnaryTransformType nodes.
4574class DependentUnaryTransformType : public UnaryTransformType,
4575 public llvm::FoldingSetNode {
4576public:
4577 DependentUnaryTransformType(const ASTContext &C, QualType BaseType,
4578 UTTKind UKind);
4579
4580 void Profile(llvm::FoldingSetNodeID &ID) {
4581 Profile(ID, getBaseType(), getUTTKind());
4582 }
4583
4584 static void Profile(llvm::FoldingSetNodeID &ID, QualType BaseType,
4585 UTTKind UKind) {
4586 ID.AddPointer(BaseType.getAsOpaquePtr());
4587 ID.AddInteger((unsigned)UKind);
4588 }
4589};
4590
4591class TagType : public Type {
4592 friend class ASTReader;
4593 template <class T> friend class serialization::AbstractTypeReader;
4594
4595 /// Stores the TagDecl associated with this type. The decl may point to any
4596 /// TagDecl that declares the entity.
4597 TagDecl *decl;
4598
4599protected:
4600 TagType(TypeClass TC, const TagDecl *D, QualType can);
4601
4602public:
4603 TagDecl *getDecl() const;
4604
4605 /// Determines whether this type is in the process of being defined.
4606 bool isBeingDefined() const;
4607
4608 static bool classof(const Type *T) {
4609 return T->getTypeClass() == Enum || T->getTypeClass() == Record;
4610 }
4611};
4612
4613/// A helper class that allows the use of isa/cast/dyncast
4614/// to detect TagType objects of structs/unions/classes.
4615class RecordType : public TagType {
4616protected:
4617 friend class ASTContext; // ASTContext creates these.
4618
4619 explicit RecordType(const RecordDecl *D)
4620 : TagType(Record, reinterpret_cast<const TagDecl*>(D), QualType()) {}
4621 explicit RecordType(TypeClass TC, RecordDecl *D)
4622 : TagType(TC, reinterpret_cast<const TagDecl*>(D), QualType()) {}
4623
4624public:
4625 RecordDecl *getDecl() const {
4626 return reinterpret_cast<RecordDecl*>(TagType::getDecl());
4627 }
4628
4629 /// Recursively check all fields in the record for const-ness. If any field
4630 /// is declared const, return true. Otherwise, return false.
4631 bool hasConstFields() const;
4632
4633 bool isSugared() const { return false; }
4634 QualType desugar() const { return QualType(this, 0); }
4635
4636 static bool classof(const Type *T) { return T->getTypeClass() == Record; }
4637};
4638
4639/// A helper class that allows the use of isa/cast/dyncast
4640/// to detect TagType objects of enums.
4641class EnumType : public TagType {
4642 friend class ASTContext; // ASTContext creates these.
4643
4644 explicit EnumType(const EnumDecl *D)
4645 : TagType(Enum, reinterpret_cast<const TagDecl*>(D), QualType()) {}
4646
4647public:
4648 EnumDecl *getDecl() const {
4649 return reinterpret_cast<EnumDecl*>(TagType::getDecl());
4650 }
4651
4652 bool isSugared() const { return false; }
4653 QualType desugar() const { return QualType(this, 0); }
4654
4655 static bool classof(const Type *T) { return T->getTypeClass() == Enum; }
4656};
4657
4658/// An attributed type is a type to which a type attribute has been applied.
4659///
4660/// The "modified type" is the fully-sugared type to which the attributed
4661/// type was applied; generally it is not canonically equivalent to the
4662/// attributed type. The "equivalent type" is the minimally-desugared type
4663/// which the type is canonically equivalent to.
4664///
4665/// For example, in the following attributed type:
4666/// int32_t __attribute__((vector_size(16)))
4667/// - the modified type is the TypedefType for int32_t
4668/// - the equivalent type is VectorType(16, int32_t)
4669/// - the canonical type is VectorType(16, int)
4670class AttributedType : public Type, public llvm::FoldingSetNode {
4671public:
4672 using Kind = attr::Kind;
4673
4674private:
4675 friend class ASTContext; // ASTContext creates these
4676
4677 QualType ModifiedType;
4678 QualType EquivalentType;
4679
4680 AttributedType(QualType canon, attr::Kind attrKind, QualType modified,
4681 QualType equivalent)
4682 : Type(Attributed, canon, equivalent->getDependence()),
4683 ModifiedType(modified), EquivalentType(equivalent) {
4684 AttributedTypeBits.AttrKind = attrKind;
4685 }
4686
4687public:
4688 Kind getAttrKind() const {
4689 return static_cast<Kind>(AttributedTypeBits.AttrKind);
4690 }
4691
4692 QualType getModifiedType() const { return ModifiedType; }
4693 QualType getEquivalentType() const { return EquivalentType; }
4694
4695 bool isSugared() const { return true; }
4696 QualType desugar() const { return getEquivalentType(); }
4697
4698 /// Does this attribute behave like a type qualifier?
4699 ///
4700 /// A type qualifier adjusts a type to provide specialized rules for
4701 /// a specific object, like the standard const and volatile qualifiers.
4702 /// This includes attributes controlling things like nullability,
4703 /// address spaces, and ARC ownership. The value of the object is still
4704 /// largely described by the modified type.
4705 ///
4706 /// In contrast, many type attributes "rewrite" their modified type to
4707 /// produce a fundamentally different type, not necessarily related in any
4708 /// formalizable way to the original type. For example, calling convention
4709 /// and vector attributes are not simple type qualifiers.
4710 ///
4711 /// Type qualifiers are often, but not always, reflected in the canonical
4712 /// type.
4713 bool isQualifier() const;
4714
4715 bool isMSTypeSpec() const;
4716
4717 bool isCallingConv() const;
4718
4719 llvm::Optional<NullabilityKind> getImmediateNullability() const;
4720
4721 /// Retrieve the attribute kind corresponding to the given
4722 /// nullability kind.
4723 static Kind getNullabilityAttrKind(NullabilityKind kind) {
4724 switch (kind) {
4725 case NullabilityKind::NonNull:
4726 return attr::TypeNonNull;
4727
4728 case NullabilityKind::Nullable:
4729 return attr::TypeNullable;
4730
4731 case NullabilityKind::NullableResult:
4732 return attr::TypeNullableResult;
4733
4734 case NullabilityKind::Unspecified:
4735 return attr::TypeNullUnspecified;
4736 }
4737 llvm_unreachable("Unknown nullability kind.")__builtin_unreachable();
4738 }
4739
4740 /// Strip off the top-level nullability annotation on the given
4741 /// type, if it's there.
4742 ///
4743 /// \param T The type to strip. If the type is exactly an
4744 /// AttributedType specifying nullability (without looking through
4745 /// type sugar), the nullability is returned and this type changed
4746 /// to the underlying modified type.
4747 ///
4748 /// \returns the top-level nullability, if present.
4749 static Optional<NullabilityKind> stripOuterNullability(QualType &T);
4750
4751 void Profile(llvm::FoldingSetNodeID &ID) {
4752 Profile(ID, getAttrKind(), ModifiedType, EquivalentType);
4753 }
4754
4755 static void Profile(llvm::FoldingSetNodeID &ID, Kind attrKind,
4756 QualType modified, QualType equivalent) {
4757 ID.AddInteger(attrKind);
4758 ID.AddPointer(modified.getAsOpaquePtr());
4759 ID.AddPointer(equivalent.getAsOpaquePtr());
4760 }
4761
4762 static bool classof(const Type *T) {
4763 return T->getTypeClass() == Attributed;
4764 }
4765};
4766
4767class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
4768 friend class ASTContext; // ASTContext creates these
4769
4770 // Helper data collector for canonical types.
4771 struct CanonicalTTPTInfo {
4772 unsigned Depth : 15;
4773 unsigned ParameterPack : 1;
4774 unsigned Index : 16;
4775 };
4776
4777 union {
4778 // Info for the canonical type.
4779 CanonicalTTPTInfo CanTTPTInfo;
4780
4781 // Info for the non-canonical type.
4782 TemplateTypeParmDecl *TTPDecl;
4783 };
4784
4785 /// Build a non-canonical type.
4786 TemplateTypeParmType(TemplateTypeParmDecl *TTPDecl, QualType Canon)
4787 : Type(TemplateTypeParm, Canon,
4788 TypeDependence::DependentInstantiation |
4789 (Canon->getDependence() & TypeDependence::UnexpandedPack)),
4790 TTPDecl(TTPDecl) {}
4791
4792 /// Build the canonical type.
4793 TemplateTypeParmType(unsigned D, unsigned I, bool PP)
4794 : Type(TemplateTypeParm, QualType(this, 0),
4795 TypeDependence::DependentInstantiation |
4796 (PP ? TypeDependence::UnexpandedPack : TypeDependence::None)) {
4797 CanTTPTInfo.Depth = D;
4798 CanTTPTInfo.Index = I;
4799 CanTTPTInfo.ParameterPack = PP;
4800 }
4801
4802 const CanonicalTTPTInfo& getCanTTPTInfo() const {
4803 QualType Can = getCanonicalTypeInternal();
4804 return Can->castAs<TemplateTypeParmType>()->CanTTPTInfo;
4805 }
4806
4807public:
4808 unsigned getDepth() const { return getCanTTPTInfo().Depth; }
4809 unsigned getIndex() const { return getCanTTPTInfo().Index; }
4810 bool isParameterPack() const { return getCanTTPTInfo().ParameterPack; }
4811
4812 TemplateTypeParmDecl *getDecl() const {
4813 return isCanonicalUnqualified() ? nullptr : TTPDecl;
4814 }
4815
4816 IdentifierInfo *getIdentifier() const;
4817
4818 bool isSugared() const { return false; }
4819 QualType desugar() const { return QualType(this, 0); }
4820
4821 void Profile(llvm::FoldingSetNodeID &ID) {
4822 Profile(ID, getDepth(), getIndex(), isParameterPack(), getDecl());
4823 }
4824
4825 static void Profile(llvm::FoldingSetNodeID &ID, unsigned Depth,
4826 unsigned Index, bool ParameterPack,
4827 TemplateTypeParmDecl *TTPDecl) {
4828 ID.AddInteger(Depth);
4829 ID.AddInteger(Index);
4830 ID.AddBoolean(ParameterPack);
4831 ID.AddPointer(TTPDecl);
4832 }
4833
4834 static bool classof(const Type *T) {
4835 return T->getTypeClass() == TemplateTypeParm;
4836 }
4837};
4838
4839/// Represents the result of substituting a type for a template
4840/// type parameter.
4841///
4842/// Within an instantiated template, all template type parameters have
4843/// been replaced with these. They are used solely to record that a
4844/// type was originally written as a template type parameter;
4845/// therefore they are never canonical.
4846class SubstTemplateTypeParmType : public Type, public llvm::FoldingSetNode {
4847 friend class ASTContext;
4848
4849 // The original type parameter.
4850 const TemplateTypeParmType *Replaced;
4851
4852 SubstTemplateTypeParmType(const TemplateTypeParmType *Param, QualType Canon)
4853 : Type(SubstTemplateTypeParm, Canon, Canon->getDependence()),
4854 Replaced(Param) {}
4855
4856public:
4857 /// Gets the template parameter that was substituted for.
4858 const TemplateTypeParmType *getReplacedParameter() const {
4859 return Replaced;
4860 }
4861
4862 /// Gets the type that was substituted for the template
4863 /// parameter.
4864 QualType getReplacementType() const {
4865 return getCanonicalTypeInternal();
4866 }
4867
4868 bool isSugared() const { return true; }
4869 QualType desugar() const { return getReplacementType(); }
4870
4871 void Profile(llvm::FoldingSetNodeID &ID) {
4872 Profile(ID, getReplacedParameter(), getReplacementType());
4873 }
4874
4875 static void Profile(llvm::FoldingSetNodeID &ID,
4876 const TemplateTypeParmType *Replaced,
4877 QualType Replacement) {
4878 ID.AddPointer(Replaced);
4879 ID.AddPointer(Replacement.getAsOpaquePtr());
4880 }
4881
4882 static bool classof(const Type *T) {
4883 return T->getTypeClass() == SubstTemplateTypeParm;
4884 }
4885};
4886
4887/// Represents the result of substituting a set of types for a template
4888/// type parameter pack.
4889///
4890/// When a pack expansion in the source code contains multiple parameter packs
4891/// and those parameter packs correspond to different levels of template
4892/// parameter lists, this type node is used to represent a template type
4893/// parameter pack from an outer level, which has already had its argument pack
4894/// substituted but that still lives within a pack expansion that itself
4895/// could not be instantiated. When actually performing a substitution into
4896/// that pack expansion (e.g., when all template parameters have corresponding
4897/// arguments), this type will be replaced with the \c SubstTemplateTypeParmType
4898/// at the current pack substitution index.
4899class SubstTemplateTypeParmPackType : public Type, public llvm::FoldingSetNode {
4900 friend class ASTContext;
4901
4902 /// The original type parameter.
4903 const TemplateTypeParmType *Replaced;
4904
4905 /// A pointer to the set of template arguments that this
4906 /// parameter pack is instantiated with.
4907 const TemplateArgument *Arguments;
4908
4909 SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
4910 QualType Canon,
4911 const TemplateArgument &ArgPack);
4912
4913public:
4914 IdentifierInfo *getIdentifier() const { return Replaced->getIdentifier(); }
4915
4916 /// Gets the template parameter that was substituted for.
4917 const TemplateTypeParmType *getReplacedParameter() const {
4918 return Replaced;
4919 }
4920
4921 unsigned getNumArgs() const {
4922 return SubstTemplateTypeParmPackTypeBits.NumArgs;
4923 }
4924
4925 bool isSugared() const { return false; }
4926 QualType desugar() const { return QualType(this, 0); }
4927
4928 TemplateArgument getArgumentPack() const;
4929
4930 void Profile(llvm::FoldingSetNodeID &ID);
4931 static void Profile(llvm::FoldingSetNodeID &ID,
4932 const TemplateTypeParmType *Replaced,
4933 const TemplateArgument &ArgPack);
4934
4935 static bool classof(const Type *T) {
4936 return T->getTypeClass() == SubstTemplateTypeParmPack;
4937 }
4938};
4939
4940/// Common base class for placeholders for types that get replaced by
4941/// placeholder type deduction: C++11 auto, C++14 decltype(auto), C++17 deduced
4942/// class template types, and constrained type names.
4943///
4944/// These types are usually a placeholder for a deduced type. However, before
4945/// the initializer is attached, or (usually) if the initializer is
4946/// type-dependent, there is no deduced type and the type is canonical. In
4947/// the latter case, it is also a dependent type.
4948class DeducedType : public Type {
4949protected:
4950 DeducedType(TypeClass TC, QualType DeducedAsType,
4951 TypeDependence ExtraDependence)
4952 : Type(TC,
4953 // FIXME: Retain the sugared deduced type?
4954 DeducedAsType.isNull() ? QualType(this, 0)
4955 : DeducedAsType.getCanonicalType(),
4956 ExtraDependence | (DeducedAsType.isNull()
4957 ? TypeDependence::None
4958 : DeducedAsType->getDependence() &
4959 ~TypeDependence::VariablyModified)) {}
4960
4961public:
4962 bool isSugared() const { return !isCanonicalUnqualified(); }
4963 QualType desugar() const { return getCanonicalTypeInternal(); }
4964
4965 /// Get the type deduced for this placeholder type, or null if it's
4966 /// either not been deduced or was deduced to a dependent type.
4967 QualType getDeducedType() const {
4968 return !isCanonicalUnqualified() ? getCanonicalTypeInternal() : QualType();
4969 }
4970 bool isDeduced() const {
4971 return !isCanonicalUnqualified() || isDependentType();
4972 }
4973
4974 static bool classof(const Type *T) {
4975 return T->getTypeClass() == Auto ||
4976 T->getTypeClass() == DeducedTemplateSpecialization;
4977 }
4978};
4979
4980/// Represents a C++11 auto or C++14 decltype(auto) type, possibly constrained
4981/// by a type-constraint.
4982class alignas(8) AutoType : public DeducedType, public llvm::FoldingSetNode {
4983 friend class ASTContext; // ASTContext creates these
4984
4985 ConceptDecl *TypeConstraintConcept;
4986
4987 AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
4988 TypeDependence ExtraDependence, ConceptDecl *CD,
4989 ArrayRef<TemplateArgument> TypeConstraintArgs);
4990
4991 const TemplateArgument *getArgBuffer() const {
4992 return reinterpret_cast<const TemplateArgument*>(this+1);
4993 }
4994
4995 TemplateArgument *getArgBuffer() {
4996 return reinterpret_cast<TemplateArgument*>(this+1);
4997 }
4998
4999public:
5000 /// Retrieve the template arguments.
5001 const TemplateArgument *getArgs() const {
5002 return getArgBuffer();
5003 }
5004
5005 /// Retrieve the number of template arguments.
5006 unsigned getNumArgs() const {
5007 return AutoTypeBits.NumArgs;
5008 }
5009
5010 const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
5011
5012 ArrayRef<TemplateArgument> getTypeConstraintArguments() const {
5013 return {getArgs(), getNumArgs()};
5014 }
5015
5016 ConceptDecl *getTypeConstraintConcept() const {
5017 return TypeConstraintConcept;
5018 }
5019
5020 bool isConstrained() const {
5021 return TypeConstraintConcept != nullptr;
5022 }
5023
5024 bool isDecltypeAuto() const {
5025 return getKeyword() == AutoTypeKeyword::DecltypeAuto;
5026 }
5027
5028 AutoTypeKeyword getKeyword() const {
5029 return (AutoTypeKeyword)AutoTypeBits.Keyword;
5030 }
5031
5032 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
5033 Profile(ID, Context, getDeducedType(), getKeyword(), isDependentType(),
5034 getTypeConstraintConcept(), getTypeConstraintArguments());
5035 }
5036
5037 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
5038 QualType Deduced, AutoTypeKeyword Keyword,
5039 bool IsDependent, ConceptDecl *CD,
5040 ArrayRef<TemplateArgument> Arguments);
5041
5042 static bool classof(const Type *T) {
5043 return T->getTypeClass() == Auto;
5044 }
5045};
5046
5047/// Represents a C++17 deduced template specialization type.
5048class DeducedTemplateSpecializationType : public DeducedType,
5049 public llvm::FoldingSetNode {
5050 friend class ASTContext; // ASTContext creates these
5051
5052 /// The name of the template whose arguments will be deduced.
5053 TemplateName Template;
5054
5055 DeducedTemplateSpecializationType(TemplateName Template,
5056 QualType DeducedAsType,
5057 bool IsDeducedAsDependent)
5058 : DeducedType(DeducedTemplateSpecialization, DeducedAsType,
5059 toTypeDependence(Template.getDependence()) |
5060 (IsDeducedAsDependent
5061 ? TypeDependence::DependentInstantiation
5062 : TypeDependence::None)),
5063 Template(Template) {}
5064
5065public:
5066 /// Retrieve the name of the template that we are deducing.
5067 TemplateName getTemplateName() const { return Template;}
5068
5069 void Profile(llvm::FoldingSetNodeID &ID) {
5070 Profile(ID, getTemplateName(), getDeducedType(), isDependentType());
5071 }
5072
5073 static void Profile(llvm::FoldingSetNodeID &ID, TemplateName Template,
5074 QualType Deduced, bool IsDependent) {
5075 Template.Profile(ID);
5076 ID.AddPointer(Deduced.getAsOpaquePtr());
5077 ID.AddBoolean(IsDependent);
5078 }
5079
5080 static bool classof(const Type *T) {
5081 return T->getTypeClass() == DeducedTemplateSpecialization;
5082 }
5083};
5084
5085/// Represents a type template specialization; the template
5086/// must be a class template, a type alias template, or a template
5087/// template parameter. A template which cannot be resolved to one of
5088/// these, e.g. because it is written with a dependent scope
5089/// specifier, is instead represented as a
5090/// @c DependentTemplateSpecializationType.
5091///
5092/// A non-dependent template specialization type is always "sugar",
5093/// typically for a \c RecordType. For example, a class template
5094/// specialization type of \c vector<int> will refer to a tag type for
5095/// the instantiation \c std::vector<int, std::allocator<int>>
5096///
5097/// Template specializations are dependent if either the template or
5098/// any of the template arguments are dependent, in which case the
5099/// type may also be canonical.
5100///
5101/// Instances of this type are allocated with a trailing array of
5102/// TemplateArguments, followed by a QualType representing the
5103/// non-canonical aliased type when the template is a type alias
5104/// template.
5105class alignas(8) TemplateSpecializationType
5106 : public Type,
5107 public llvm::FoldingSetNode {
5108 friend class ASTContext; // ASTContext creates these
5109
5110 /// The name of the template being specialized. This is
5111 /// either a TemplateName::Template (in which case it is a
5112 /// ClassTemplateDecl*, a TemplateTemplateParmDecl*, or a
5113 /// TypeAliasTemplateDecl*), a
5114 /// TemplateName::SubstTemplateTemplateParmPack, or a
5115 /// TemplateName::SubstTemplateTemplateParm (in which case the
5116 /// replacement must, recursively, be one of these).
5117 TemplateName Template;
5118
5119 TemplateSpecializationType(TemplateName T,
5120 ArrayRef<TemplateArgument> Args,
5121 QualType Canon,
5122 QualType Aliased);
5123
5124public:
5125 /// Determine whether any of the given template arguments are dependent.
5126 ///
5127 /// The converted arguments should be supplied when known; whether an
5128 /// argument is dependent can depend on the conversions performed on it
5129 /// (for example, a 'const int' passed as a template argument might be
5130 /// dependent if the parameter is a reference but non-dependent if the
5131 /// parameter is an int).
5132 ///
5133 /// Note that the \p Args parameter is unused: this is intentional, to remind
5134 /// the caller that they need to pass in the converted arguments, not the
5135 /// specified arguments.
5136 static bool
5137 anyDependentTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
5138 ArrayRef<TemplateArgument> Converted);
5139 static bool
5140 anyDependentTemplateArguments(const TemplateArgumentListInfo &,
5141 ArrayRef<TemplateArgument> Converted);
5142 static bool anyInstantiationDependentTemplateArguments(
5143 ArrayRef<TemplateArgumentLoc> Args);
5144
5145 /// True if this template specialization type matches a current
5146 /// instantiation in the context in which it is found.
5147 bool isCurrentInstantiation() const {
5148 return isa<InjectedClassNameType>(getCanonicalTypeInternal());
5149 }
5150
5151 /// Determine if this template specialization type is for a type alias
5152 /// template that has been substituted.
5153 ///
5154 /// Nearly every template specialization type whose template is an alias
5155 /// template will be substituted. However, this is not the case when
5156 /// the specialization contains a pack expansion but the template alias
5157 /// does not have a corresponding parameter pack, e.g.,
5158 ///
5159 /// \code
5160 /// template<typename T, typename U, typename V> struct S;
5161 /// template<typename T, typename U> using A = S<T, int, U>;
5162 /// template<typename... Ts> struct X {
5163 /// typedef A<Ts...> type; // not a type alias
5164 /// };
5165 /// \endcode
5166 bool isTypeAlias() const { return TemplateSpecializationTypeBits.TypeAlias; }
5167
5168 /// Get the aliased type, if this is a specialization of a type alias
5169 /// template.
5170 QualType getAliasedType() const {
5171 assert(isTypeAlias() && "not a type alias template specialization")((void)0);
5172 return *reinterpret_cast<const QualType*>(end());
5173 }
5174
5175 using iterator = const TemplateArgument *;
5176
5177 iterator begin() const { return getArgs(); }
5178 iterator end() const; // defined inline in TemplateBase.h
5179
5180 /// Retrieve the name of the template that we are specializing.
5181 TemplateName getTemplateName() const { return Template; }
5182
5183 /// Retrieve the template arguments.
5184 const TemplateArgument *getArgs() const {
5185 return reinterpret_cast<const TemplateArgument *>(this + 1);
5186 }
5187
5188 /// Retrieve the number of template arguments.
5189 unsigned getNumArgs() const {
5190 return TemplateSpecializationTypeBits.NumArgs;
5191 }
5192
5193 /// Retrieve a specific template argument as a type.
5194 /// \pre \c isArgType(Arg)
5195 const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
5196
5197 ArrayRef<TemplateArgument> template_arguments() const {
5198 return {getArgs(), getNumArgs()};
5199 }
5200
5201 bool isSugared() const {
5202 return !isDependentType() || isCurrentInstantiation() || isTypeAlias();
5203 }
5204
5205 QualType desugar() const {
5206 return isTypeAlias() ? getAliasedType() : getCanonicalTypeInternal();
5207 }
5208
5209 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
5210 Profile(ID, Template, template_arguments(), Ctx);
5211 if (isTypeAlias())
5212 getAliasedType().Profile(ID);
5213 }
5214
5215 static void Profile(llvm::FoldingSetNodeID &ID, TemplateName T,
5216 ArrayRef<TemplateArgument> Args,
5217 const ASTContext &Context);
5218
5219 static bool classof(const Type *T) {
5220 return T->getTypeClass() == TemplateSpecialization;
5221 }
5222};
5223
5224/// Print a template argument list, including the '<' and '>'
5225/// enclosing the template arguments.
5226void printTemplateArgumentList(raw_ostream &OS,
5227 ArrayRef<TemplateArgument> Args,
5228 const PrintingPolicy &Policy,
5229 const TemplateParameterList *TPL = nullptr);
5230
5231void printTemplateArgumentList(raw_ostream &OS,
5232 ArrayRef<TemplateArgumentLoc> Args,
5233 const PrintingPolicy &Policy,
5234 const TemplateParameterList *TPL = nullptr);
5235
5236void printTemplateArgumentList(raw_ostream &OS,
5237 const TemplateArgumentListInfo &Args,
5238 const PrintingPolicy &Policy,
5239 const TemplateParameterList *TPL = nullptr);
5240
5241/// The injected class name of a C++ class template or class
5242/// template partial specialization. Used to record that a type was
5243/// spelled with a bare identifier rather than as a template-id; the
5244/// equivalent for non-templated classes is just RecordType.
5245///
5246/// Injected class name types are always dependent. Template
5247/// instantiation turns these into RecordTypes.
5248///
5249/// Injected class name types are always canonical. This works
5250/// because it is impossible to compare an injected class name type
5251/// with the corresponding non-injected template type, for the same
5252/// reason that it is impossible to directly compare template
5253/// parameters from different dependent contexts: injected class name
5254/// types can only occur within the scope of a particular templated
5255/// declaration, and within that scope every template specialization
5256/// will canonicalize to the injected class name (when appropriate
5257/// according to the rules of the language).
5258class InjectedClassNameType : public Type {
5259 friend class ASTContext; // ASTContext creates these.
5260 friend class ASTNodeImporter;
5261 friend class ASTReader; // FIXME: ASTContext::getInjectedClassNameType is not
5262 // currently suitable for AST reading, too much
5263 // interdependencies.
5264 template <class T> friend class serialization::AbstractTypeReader;
5265
5266 CXXRecordDecl *Decl;
5267
5268 /// The template specialization which this type represents.
5269 /// For example, in
5270 /// template <class T> class A { ... };
5271 /// this is A<T>, whereas in
5272 /// template <class X, class Y> class A<B<X,Y> > { ... };
5273 /// this is A<B<X,Y> >.
5274 ///
5275 /// It is always unqualified, always a template specialization type,
5276 /// and always dependent.
5277 QualType InjectedType;
5278
5279 InjectedClassNameType(CXXRecordDecl *D, QualType TST)
5280 : Type(InjectedClassName, QualType(),
5281 TypeDependence::DependentInstantiation),
5282 Decl(D), InjectedType(TST) {
5283 assert(isa<TemplateSpecializationType>(TST))((void)0);
5284 assert(!TST.hasQualifiers())((void)0);
5285 assert(TST->isDependentType())((void)0);
5286 }
5287
5288public:
5289 QualType getInjectedSpecializationType() const { return InjectedType; }
5290
5291 const TemplateSpecializationType *getInjectedTST() const {
5292 return cast<TemplateSpecializationType>(InjectedType.getTypePtr());
5293 }
5294
5295 TemplateName getTemplateName() const {
5296 return getInjectedTST()->getTemplateName();
5297 }
5298
5299 CXXRecordDecl *getDecl() const;
5300
5301 bool isSugared() const { return false; }
5302 QualType desugar() const { return QualType(this, 0); }
5303
5304 static bool classof(const Type *T) {
5305 return T->getTypeClass() == InjectedClassName;
5306 }
5307};
5308
5309/// The kind of a tag type.
5310enum TagTypeKind {
5311 /// The "struct" keyword.
5312 TTK_Struct,
5313
5314 /// The "__interface" keyword.
5315 TTK_Interface,
5316
5317 /// The "union" keyword.
5318 TTK_Union,
5319
5320 /// The "class" keyword.
5321 TTK_Class,
5322
5323 /// The "enum" keyword.
5324 TTK_Enum
5325};
5326
5327/// The elaboration keyword that precedes a qualified type name or
5328/// introduces an elaborated-type-specifier.
5329enum ElaboratedTypeKeyword {
5330 /// The "struct" keyword introduces the elaborated-type-specifier.
5331 ETK_Struct,
5332
5333 /// The "__interface" keyword introduces the elaborated-type-specifier.
5334 ETK_Interface,
5335
5336 /// The "union" keyword introduces the elaborated-type-specifier.
5337 ETK_Union,
5338
5339 /// The "class" keyword introduces the elaborated-type-specifier.
5340 ETK_Class,
5341
5342 /// The "enum" keyword introduces the elaborated-type-specifier.
5343 ETK_Enum,
5344
5345 /// The "typename" keyword precedes the qualified type name, e.g.,
5346 /// \c typename T::type.
5347 ETK_Typename,
5348
5349 /// No keyword precedes the qualified type name.
5350 ETK_None
5351};
5352
5353/// A helper class for Type nodes having an ElaboratedTypeKeyword.
5354/// The keyword in stored in the free bits of the base class.
5355/// Also provides a few static helpers for converting and printing
5356/// elaborated type keyword and tag type kind enumerations.
5357class TypeWithKeyword : public Type {
5358protected:
5359 TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
5360 QualType Canonical, TypeDependence Dependence)
5361 : Type(tc, Canonical, Dependence) {
5362 TypeWithKeywordBits.Keyword = Keyword;
5363 }
5364
5365public:
5366 ElaboratedTypeKeyword getKeyword() const {
5367 return static_cast<ElaboratedTypeKeyword>(TypeWithKeywordBits.Keyword);
5368 }
5369
5370 /// Converts a type specifier (DeclSpec::TST) into an elaborated type keyword.
5371 static ElaboratedTypeKeyword getKeywordForTypeSpec(unsigned TypeSpec);
5372
5373 /// Converts a type specifier (DeclSpec::TST) into a tag type kind.
5374 /// It is an error to provide a type specifier which *isn't* a tag kind here.
5375 static TagTypeKind getTagTypeKindForTypeSpec(unsigned TypeSpec);
5376
5377 /// Converts a TagTypeKind into an elaborated type keyword.
5378 static ElaboratedTypeKeyword getKeywordForTagTypeKind(TagTypeKind Tag);
5379
5380 /// Converts an elaborated type keyword into a TagTypeKind.
5381 /// It is an error to provide an elaborated type keyword
5382 /// which *isn't* a tag kind here.
5383 static TagTypeKind getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword);
5384
5385 static bool KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword);
5386
5387 static StringRef getKeywordName(ElaboratedTypeKeyword Keyword);
5388
5389 static StringRef getTagTypeKindName(TagTypeKind Kind) {
5390 return getKeywordName(getKeywordForTagTypeKind(Kind));
5391 }
5392
5393 class CannotCastToThisType {};
5394 static CannotCastToThisType classof(const Type *);
5395};
5396
5397/// Represents a type that was referred to using an elaborated type
5398/// keyword, e.g., struct S, or via a qualified name, e.g., N::M::type,
5399/// or both.
5400///
5401/// This type is used to keep track of a type name as written in the
5402/// source code, including tag keywords and any nested-name-specifiers.
5403/// The type itself is always "sugar", used to express what was written
5404/// in the source code but containing no additional semantic information.
5405class ElaboratedType final
5406 : public TypeWithKeyword,
5407 public llvm::FoldingSetNode,
5408 private llvm::TrailingObjects<ElaboratedType, TagDecl *> {
5409 friend class ASTContext; // ASTContext creates these
5410 friend TrailingObjects;
5411
5412 /// The nested name specifier containing the qualifier.
5413 NestedNameSpecifier *NNS;
5414
5415 /// The type that this qualified name refers to.
5416 QualType NamedType;
5417
5418 /// The (re)declaration of this tag type owned by this occurrence is stored
5419 /// as a trailing object if there is one. Use getOwnedTagDecl to obtain
5420 /// it, or obtain a null pointer if there is none.
5421
5422 ElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
5423 QualType NamedType, QualType CanonType, TagDecl *OwnedTagDecl)
5424 : TypeWithKeyword(Keyword, Elaborated, CanonType,
5425 // Any semantic dependence on the qualifier will have
5426 // been incorporated into NamedType. We still need to
5427 // track syntactic (instantiation / error / pack)
5428 // dependence on the qualifier.
5429 NamedType->getDependence() |
5430 (NNS ? toSyntacticDependence(
5431 toTypeDependence(NNS->getDependence()))
5432 : TypeDependence::None)),
5433 NNS(NNS), NamedType(NamedType) {
5434 ElaboratedTypeBits.HasOwnedTagDecl = false;
5435 if (OwnedTagDecl) {
5436 ElaboratedTypeBits.HasOwnedTagDecl = true;
5437 *getTrailingObjects<TagDecl *>() = OwnedTagDecl;
5438 }
5439 assert(!(Keyword == ETK_None && NNS == nullptr) &&((void)0)
5440 "ElaboratedType cannot have elaborated type keyword "((void)0)
5441 "and name qualifier both null.")((void)0);
5442 }
5443
5444public:
5445 /// Retrieve the qualification on this type.
5446 NestedNameSpecifier *getQualifier() const { return NNS; }
5447
5448 /// Retrieve the type named by the qualified-id.
5449 QualType getNamedType() const { return NamedType; }
5450
5451 /// Remove a single level of sugar.
5452 QualType desugar() const { return getNamedType(); }
5453
5454 /// Returns whether this type directly provides sugar.
5455 bool isSugared() const { return true; }
5456
5457 /// Return the (re)declaration of this type owned by this occurrence of this
5458 /// type, or nullptr if there is none.
5459 TagDecl *getOwnedTagDecl() const {
5460 return ElaboratedTypeBits.HasOwnedTagDecl ? *getTrailingObjects<TagDecl *>()
5461 : nullptr;
5462 }
5463
5464 void Profile(llvm::FoldingSetNodeID &ID) {
5465 Profile(ID, getKeyword(), NNS, NamedType, getOwnedTagDecl());
5466 }
5467
5468 static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
5469 NestedNameSpecifier *NNS, QualType NamedType,
5470 TagDecl *OwnedTagDecl) {
5471 ID.AddInteger(Keyword);
5472 ID.AddPointer(NNS);
5473 NamedType.Profile(ID);
5474 ID.AddPointer(OwnedTagDecl);
5475 }
5476
5477 static bool classof(const Type *T) { return T->getTypeClass() == Elaborated; }
5478};
5479
5480/// Represents a qualified type name for which the type name is
5481/// dependent.
5482///
5483/// DependentNameType represents a class of dependent types that involve a
5484/// possibly dependent nested-name-specifier (e.g., "T::") followed by a
5485/// name of a type. The DependentNameType may start with a "typename" (for a
5486/// typename-specifier), "class", "struct", "union", or "enum" (for a
5487/// dependent elaborated-type-specifier), or nothing (in contexts where we
5488/// know that we must be referring to a type, e.g., in a base class specifier).
5489/// Typically the nested-name-specifier is dependent, but in MSVC compatibility
5490/// mode, this type is used with non-dependent names to delay name lookup until
5491/// instantiation.
5492class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
5493 friend class ASTContext; // ASTContext creates these
5494
5495 /// The nested name specifier containing the qualifier.
5496 NestedNameSpecifier *NNS;
5497
5498 /// The type that this typename specifier refers to.
5499 const IdentifierInfo *Name;
5500
5501 DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
5502 const IdentifierInfo *Name, QualType CanonType)
5503 : TypeWithKeyword(Keyword, DependentName, CanonType,
5504 TypeDependence::DependentInstantiation |
5505 toTypeDependence(NNS->getDependence())),
5506 NNS(NNS), Name(Name) {}
5507
5508public:
5509 /// Retrieve the qualification on this type.
5510 NestedNameSpecifier *getQualifier() const { return NNS; }
5511
5512 /// Retrieve the type named by the typename specifier as an identifier.
5513 ///
5514 /// This routine will return a non-NULL identifier pointer when the
5515 /// form of the original typename was terminated by an identifier,
5516 /// e.g., "typename T::type".
5517 const IdentifierInfo *getIdentifier() const {
5518 return Name;
5519 }
5520
5521 bool isSugared() const { return false; }
5522 QualType desugar() const { return QualType(this, 0); }
5523
5524 void Profile(llvm::FoldingSetNodeID &ID) {
5525 Profile(ID, getKeyword(), NNS, Name);
5526 }
5527
5528 static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
5529 NestedNameSpecifier *NNS, const IdentifierInfo *Name) {
5530 ID.AddInteger(Keyword);
5531 ID.AddPointer(NNS);
5532 ID.AddPointer(Name);
5533 }
5534
5535 static bool classof(const Type *T) {
5536 return T->getTypeClass() == DependentName;
5537 }
5538};
5539
5540/// Represents a template specialization type whose template cannot be
5541/// resolved, e.g.
5542/// A<T>::template B<T>
5543class alignas(8) DependentTemplateSpecializationType
5544 : public TypeWithKeyword,
5545 public llvm::FoldingSetNode {
5546 friend class ASTContext; // ASTContext creates these
5547
5548 /// The nested name specifier containing the qualifier.
5549 NestedNameSpecifier *NNS;
5550
5551 /// The identifier of the template.
5552 const IdentifierInfo *Name;
5553
5554 DependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
5555 NestedNameSpecifier *NNS,
5556 const IdentifierInfo *Name,
5557 ArrayRef<TemplateArgument> Args,
5558 QualType Canon);
5559
5560 const TemplateArgument *getArgBuffer() const {
5561 return reinterpret_cast<const TemplateArgument*>(this+1);
5562 }
5563
5564 TemplateArgument *getArgBuffer() {
5565 return reinterpret_cast<TemplateArgument*>(this+1);
5566 }
5567
5568public:
5569 NestedNameSpecifier *getQualifier() const { return NNS; }
5570 const IdentifierInfo *getIdentifier() const { return Name; }
5571
5572 /// Retrieve the template arguments.
5573 const TemplateArgument *getArgs() const {
5574 return getArgBuffer();
5575 }
5576
5577 /// Retrieve the number of template arguments.
5578 unsigned getNumArgs() const {
5579 return DependentTemplateSpecializationTypeBits.NumArgs;
5580 }
5581
5582 const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
5583
5584 ArrayRef<TemplateArgument> template_arguments() const {
5585 return {getArgs(), getNumArgs()};
5586 }
5587
5588 using iterator = const TemplateArgument *;
5589
5590 iterator begin() const { return getArgs(); }
5591 iterator end() const; // inline in TemplateBase.h
5592
5593 bool isSugared() const { return false; }
5594 QualType desugar() const { return QualType(this, 0); }
5595
5596 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
5597 Profile(ID, Context, getKeyword(), NNS, Name, {getArgs(), getNumArgs()});
5598 }
5599
5600 static void Profile(llvm::FoldingSetNodeID &ID,
5601 const ASTContext &Context,
5602 ElaboratedTypeKeyword Keyword,
5603 NestedNameSpecifier *Qualifier,
5604 const IdentifierInfo *Name,
5605 ArrayRef<TemplateArgument> Args);
5606
5607 static bool classof(const Type *T) {
5608 return T->getTypeClass() == DependentTemplateSpecialization;
5609 }
5610};
5611
5612/// Represents a pack expansion of types.
5613///
5614/// Pack expansions are part of C++11 variadic templates. A pack
5615/// expansion contains a pattern, which itself contains one or more
5616/// "unexpanded" parameter packs. When instantiated, a pack expansion
5617/// produces a series of types, each instantiated from the pattern of
5618/// the expansion, where the Ith instantiation of the pattern uses the
5619/// Ith arguments bound to each of the unexpanded parameter packs. The
5620/// pack expansion is considered to "expand" these unexpanded
5621/// parameter packs.
5622///
5623/// \code
5624/// template<typename ...Types> struct tuple;
5625///
5626/// template<typename ...Types>
5627/// struct tuple_of_references {
5628/// typedef tuple<Types&...> type;
5629/// };
5630/// \endcode
5631///
5632/// Here, the pack expansion \c Types&... is represented via a
5633/// PackExpansionType whose pattern is Types&.
5634class PackExpansionType : public Type, public llvm::FoldingSetNode {
5635 friend class ASTContext; // ASTContext creates these
5636
5637 /// The pattern of the pack expansion.
5638 QualType Pattern;
5639
5640 PackExpansionType(QualType Pattern, QualType Canon,
5641 Optional<unsigned> NumExpansions)
5642 : Type(PackExpansion, Canon,
5643 (Pattern->getDependence() | TypeDependence::Dependent |
5644 TypeDependence::Instantiation) &
5645 ~TypeDependence::UnexpandedPack),
5646 Pattern(Pattern) {
5647 PackExpansionTypeBits.NumExpansions =
5648 NumExpansions ? *NumExpansions + 1 : 0;
5649 }
5650
5651public:
5652 /// Retrieve the pattern of this pack expansion, which is the
5653 /// type that will be repeatedly instantiated when instantiating the
5654 /// pack expansion itself.
5655 QualType getPattern() const { return Pattern; }
5656
5657 /// Retrieve the number of expansions that this pack expansion will
5658 /// generate, if known.
5659 Optional<unsigned> getNumExpansions() const {
5660 if (PackExpansionTypeBits.NumExpansions)
5661 return PackExpansionTypeBits.NumExpansions - 1;
5662 return None;
5663 }
5664
5665 bool isSugared() const { return false; }
5666 QualType desugar() const { return QualType(this, 0); }
5667
5668 void Profile(llvm::FoldingSetNodeID &ID) {
5669 Profile(ID, getPattern(), getNumExpansions());
5670 }
5671
5672 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pattern,
5673 Optional<unsigned> NumExpansions) {
5674 ID.AddPointer(Pattern.getAsOpaquePtr());
5675 ID.AddBoolean(NumExpansions.hasValue());
5676 if (NumExpansions)
5677 ID.AddInteger(*NumExpansions);
5678 }
5679
5680 static bool classof(const Type *T) {
5681 return T->getTypeClass() == PackExpansion;
5682 }
5683};
5684
5685/// This class wraps the list of protocol qualifiers. For types that can
5686/// take ObjC protocol qualifers, they can subclass this class.
5687template <class T>
5688class ObjCProtocolQualifiers {
5689protected:
5690 ObjCProtocolQualifiers() = default;
5691
5692 ObjCProtocolDecl * const *getProtocolStorage() const {
5693 return const_cast<ObjCProtocolQualifiers*>(this)->getProtocolStorage();
5694 }
5695
5696 ObjCProtocolDecl **getProtocolStorage() {
5697 return static_cast<T*>(this)->getProtocolStorageImpl();
5698 }
5699
5700 void setNumProtocols(unsigned N) {
5701 static_cast<T*>(this)->setNumProtocolsImpl(N);
5702 }
5703
5704 void initialize(ArrayRef<ObjCProtocolDecl *> protocols) {
5705 setNumProtocols(protocols.size());
5706 assert(getNumProtocols() == protocols.size() &&((void)0)
5707 "bitfield overflow in protocol count")((void)0);
5708 if (!protocols.empty())
5709 memcpy(getProtocolStorage(), protocols.data(),
5710 protocols.size() * sizeof(ObjCProtocolDecl*));
5711 }
5712
5713public:
5714 using qual_iterator = ObjCProtocolDecl * const *;
5715 using qual_range = llvm::iterator_range<qual_iterator>;
5716
5717 qual_range quals() const { return qual_range(qual_begin(), qual_end()); }
5718 qual_iterator qual_begin() const { return getProtocolStorage(); }
5719 qual_iterator qual_end() const { return qual_begin() + getNumProtocols(); }
5720
5721 bool qual_empty() const { return getNumProtocols() == 0; }
5722
5723 /// Return the number of qualifying protocols in this type, or 0 if
5724 /// there are none.
5725 unsigned getNumProtocols() const {
5726 return static_cast<const T*>(this)->getNumProtocolsImpl();
5727 }
5728
5729 /// Fetch a protocol by index.
5730 ObjCProtocolDecl *getProtocol(unsigned I) const {
5731 assert(I < getNumProtocols() && "Out-of-range protocol access")((void)0);
5732 return qual_begin()[I];
5733 }
5734
5735 /// Retrieve all of the protocol qualifiers.
5736 ArrayRef<ObjCProtocolDecl *> getProtocols() const {
5737 return ArrayRef<ObjCProtocolDecl *>(qual_begin(), getNumProtocols());
5738 }
5739};
5740
5741/// Represents a type parameter type in Objective C. It can take
5742/// a list of protocols.
5743class ObjCTypeParamType : public Type,
5744 public ObjCProtocolQualifiers<ObjCTypeParamType>,
5745 public llvm::FoldingSetNode {
5746 friend class ASTContext;
5747 friend class ObjCProtocolQualifiers<ObjCTypeParamType>;
5748
5749 /// The number of protocols stored on this type.
5750 unsigned NumProtocols : 6;
5751
5752 ObjCTypeParamDecl *OTPDecl;
5753
5754 /// The protocols are stored after the ObjCTypeParamType node. In the
5755 /// canonical type, the list of protocols are sorted alphabetically
5756 /// and uniqued.
5757 ObjCProtocolDecl **getProtocolStorageImpl();
5758
5759 /// Return the number of qualifying protocols in this interface type,
5760 /// or 0 if there are none.
5761 unsigned getNumProtocolsImpl() const {
5762 return NumProtocols;
5763 }
5764
5765 void setNumProtocolsImpl(unsigned N) {
5766 NumProtocols = N;
5767 }
5768
5769 ObjCTypeParamType(const ObjCTypeParamDecl *D,
5770 QualType can,
5771 ArrayRef<ObjCProtocolDecl *> protocols);
5772
5773public:
5774 bool isSugared() const { return true; }
5775 QualType desugar() const { return getCanonicalTypeInternal(); }
5776
5777 static bool classof(const Type *T) {
5778 return T->getTypeClass() == ObjCTypeParam;
5779 }
5780
5781 void Profile(llvm::FoldingSetNodeID &ID);
5782 static void Profile(llvm::FoldingSetNodeID &ID,
5783 const ObjCTypeParamDecl *OTPDecl,
5784 QualType CanonicalType,
5785 ArrayRef<ObjCProtocolDecl *> protocols);
5786
5787 ObjCTypeParamDecl *getDecl() const { return OTPDecl; }
5788};
5789
5790/// Represents a class type in Objective C.
5791///
5792/// Every Objective C type is a combination of a base type, a set of
5793/// type arguments (optional, for parameterized classes) and a list of
5794/// protocols.
5795///
5796/// Given the following declarations:
5797/// \code
5798/// \@class C<T>;
5799/// \@protocol P;
5800/// \endcode
5801///
5802/// 'C' is an ObjCInterfaceType C. It is sugar for an ObjCObjectType
5803/// with base C and no protocols.
5804///
5805/// 'C<P>' is an unspecialized ObjCObjectType with base C and protocol list [P].
5806/// 'C<C*>' is a specialized ObjCObjectType with type arguments 'C*' and no
5807/// protocol list.
5808/// 'C<C*><P>' is a specialized ObjCObjectType with base C, type arguments 'C*',
5809/// and protocol list [P].
5810///
5811/// 'id' is a TypedefType which is sugar for an ObjCObjectPointerType whose
5812/// pointee is an ObjCObjectType with base BuiltinType::ObjCIdType
5813/// and no protocols.
5814///
5815/// 'id<P>' is an ObjCObjectPointerType whose pointee is an ObjCObjectType
5816/// with base BuiltinType::ObjCIdType and protocol list [P]. Eventually
5817/// this should get its own sugar class to better represent the source.
5818class ObjCObjectType : public Type,
5819 public ObjCProtocolQualifiers<ObjCObjectType> {
5820 friend class ObjCProtocolQualifiers<ObjCObjectType>;
5821
5822 // ObjCObjectType.NumTypeArgs - the number of type arguments stored
5823 // after the ObjCObjectPointerType node.
5824 // ObjCObjectType.NumProtocols - the number of protocols stored
5825 // after the type arguments of ObjCObjectPointerType node.
5826 //
5827 // These protocols are those written directly on the type. If
5828 // protocol qualifiers ever become additive, the iterators will need
5829 // to get kindof complicated.
5830 //
5831 // In the canonical object type, these are sorted alphabetically
5832 // and uniqued.
5833
5834 /// Either a BuiltinType or an InterfaceType or sugar for either.
5835 QualType BaseType;
5836
5837 /// Cached superclass type.
5838 mutable llvm::PointerIntPair<const ObjCObjectType *, 1, bool>
5839 CachedSuperClassType;
5840
5841 QualType *getTypeArgStorage();
5842 const QualType *getTypeArgStorage() const {
5843 return const_cast<ObjCObjectType *>(this)->getTypeArgStorage();
5844 }
5845
5846 ObjCProtocolDecl **getProtocolStorageImpl();
5847 /// Return the number of qualifying protocols in this interface type,
5848 /// or 0 if there are none.
5849 unsigned getNumProtocolsImpl() const {
5850 return ObjCObjectTypeBits.NumProtocols;
5851 }
5852 void setNumProtocolsImpl(unsigned N) {
5853 ObjCObjectTypeBits.NumProtocols = N;
5854 }
5855
5856protected:
5857 enum Nonce_ObjCInterface { Nonce_ObjCInterface };
5858
5859 ObjCObjectType(QualType Canonical, QualType Base,
5860 ArrayRef<QualType> typeArgs,
5861 ArrayRef<ObjCProtocolDecl *> protocols,
5862 bool isKindOf);
5863
5864 ObjCObjectType(enum Nonce_ObjCInterface)
5865 : Type(ObjCInterface, QualType(), TypeDependence::None),
5866 BaseType(QualType(this_(), 0)) {
5867 ObjCObjectTypeBits.NumProtocols = 0;
5868 ObjCObjectTypeBits.NumTypeArgs = 0;
5869 ObjCObjectTypeBits.IsKindOf = 0;
5870 }
5871
5872 void computeSuperClassTypeSlow() const;
5873
5874public:
5875 /// Gets the base type of this object type. This is always (possibly
5876 /// sugar for) one of:
5877 /// - the 'id' builtin type (as opposed to the 'id' type visible to the
5878 /// user, which is a typedef for an ObjCObjectPointerType)
5879 /// - the 'Class' builtin type (same caveat)
5880 /// - an ObjCObjectType (currently always an ObjCInterfaceType)
5881 QualType getBaseType() const { return BaseType; }
5882
5883 bool isObjCId() const {
5884 return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCId);
5885 }
5886
5887 bool isObjCClass() const {
5888 return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCClass);
5889 }
5890
5891 bool isObjCUnqualifiedId() const { return qual_empty() && isObjCId(); }
5892 bool isObjCUnqualifiedClass() const { return qual_empty() && isObjCClass(); }
5893 bool isObjCUnqualifiedIdOrClass() const {
5894 if (!qual_empty()) return false;
5895 if (const BuiltinType *T = getBaseType()->getAs<BuiltinType>())
5896 return T->getKind() == BuiltinType::ObjCId ||
5897 T->getKind() == BuiltinType::ObjCClass;
5898 return false;
5899 }
5900 bool isObjCQualifiedId() const { return !qual_empty() && isObjCId(); }
5901 bool isObjCQualifiedClass() const { return !qual_empty() && isObjCClass(); }
5902
5903 /// Gets the interface declaration for this object type, if the base type
5904 /// really is an interface.
5905 ObjCInterfaceDecl *getInterface() const;
5906
5907 /// Determine whether this object type is "specialized", meaning
5908 /// that it has type arguments.
5909 bool isSpecialized() const;
5910
5911 /// Determine whether this object type was written with type arguments.
5912 bool isSpecializedAsWritten() const {
5913 return ObjCObjectTypeBits.NumTypeArgs > 0;
5914 }
5915
5916 /// Determine whether this object type is "unspecialized", meaning
5917 /// that it has no type arguments.
5918 bool isUnspecialized() const { return !isSpecialized(); }
5919
5920 /// Determine whether this object type is "unspecialized" as
5921 /// written, meaning that it has no type arguments.
5922 bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); }
5923
5924 /// Retrieve the type arguments of this object type (semantically).
5925 ArrayRef<QualType> getTypeArgs() const;
5926
5927 /// Retrieve the type arguments of this object type as they were
5928 /// written.
5929 ArrayRef<QualType> getTypeArgsAsWritten() const {
5930 return llvm::makeArrayRef(getTypeArgStorage(),
5931 ObjCObjectTypeBits.NumTypeArgs);
5932 }
5933
5934 /// Whether this is a "__kindof" type as written.
5935 bool isKindOfTypeAsWritten() const { return ObjCObjectTypeBits.IsKindOf; }
5936
5937 /// Whether this ia a "__kindof" type (semantically).
5938 bool isKindOfType() const;
5939
5940 /// Retrieve the type of the superclass of this object type.
5941 ///
5942 /// This operation substitutes any type arguments into the
5943 /// superclass of the current class type, potentially producing a
5944 /// specialization of the superclass type. Produces a null type if
5945 /// there is no superclass.
5946 QualType getSuperClassType() const {
5947 if (!CachedSuperClassType.getInt())
5948 computeSuperClassTypeSlow();
5949
5950 assert(CachedSuperClassType.getInt() && "Superclass not set?")((void)0);
5951 return QualType(CachedSuperClassType.getPointer(), 0);
5952 }
5953
5954 /// Strip off the Objective-C "kindof" type and (with it) any
5955 /// protocol qualifiers.
5956 QualType stripObjCKindOfTypeAndQuals(const ASTContext &ctx) const;
5957
5958 bool isSugared() const { return false; }
5959 QualType desugar() const { return QualType(this, 0); }
5960
5961 static bool classof(const Type *T) {
5962 return T->getTypeClass() == ObjCObject ||
5963 T->getTypeClass() == ObjCInterface;
5964 }
5965};
5966
5967/// A class providing a concrete implementation
5968/// of ObjCObjectType, so as to not increase the footprint of
5969/// ObjCInterfaceType. Code outside of ASTContext and the core type
5970/// system should not reference this type.
5971class ObjCObjectTypeImpl : public ObjCObjectType, public llvm::FoldingSetNode {
5972 friend class ASTContext;
5973
5974 // If anyone adds fields here, ObjCObjectType::getProtocolStorage()
5975 // will need to be modified.
5976
5977 ObjCObjectTypeImpl(QualType Canonical, QualType Base,
5978 ArrayRef<QualType> typeArgs,
5979 ArrayRef<ObjCProtocolDecl *> protocols,
5980 bool isKindOf)
5981 : ObjCObjectType(Canonical, Base, typeArgs, protocols, isKindOf) {}
5982
5983public:
5984 void Profile(llvm::FoldingSetNodeID &ID);
5985 static void Profile(llvm::FoldingSetNodeID &ID,
5986 QualType Base,
5987 ArrayRef<QualType> typeArgs,
5988 ArrayRef<ObjCProtocolDecl *> protocols,
5989 bool isKindOf);
5990};
5991
5992inline QualType *ObjCObjectType::getTypeArgStorage() {
5993 return reinterpret_cast<QualType *>(static_cast<ObjCObjectTypeImpl*>(this)+1);
5994}
5995
5996inline ObjCProtocolDecl **ObjCObjectType::getProtocolStorageImpl() {
5997 return reinterpret_cast<ObjCProtocolDecl**>(
5998 getTypeArgStorage() + ObjCObjectTypeBits.NumTypeArgs);
5999}
6000
6001inline ObjCProtocolDecl **ObjCTypeParamType::getProtocolStorageImpl() {
6002 return reinterpret_cast<ObjCProtocolDecl**>(
6003 static_cast<ObjCTypeParamType*>(this)+1);
6004}
6005
6006/// Interfaces are the core concept in Objective-C for object oriented design.
6007/// They basically correspond to C++ classes. There are two kinds of interface
6008/// types: normal interfaces like `NSString`, and qualified interfaces, which
6009/// are qualified with a protocol list like `NSString<NSCopyable, NSAmazing>`.
6010///
6011/// ObjCInterfaceType guarantees the following properties when considered
6012/// as a subtype of its superclass, ObjCObjectType:
6013/// - There are no protocol qualifiers. To reinforce this, code which
6014/// tries to invoke the protocol methods via an ObjCInterfaceType will
6015/// fail to compile.
6016/// - It is its own base type. That is, if T is an ObjCInterfaceType*,
6017/// T->getBaseType() == QualType(T, 0).
6018class ObjCInterfaceType : public ObjCObjectType {
6019 friend class ASTContext; // ASTContext creates these.
6020 friend class ASTReader;
6021 friend class ObjCInterfaceDecl;
6022 template <class T> friend class serialization::AbstractTypeReader;
6023
6024 mutable ObjCInterfaceDecl *Decl;
6025
6026 ObjCInterfaceType(const ObjCInterfaceDecl *D)
6027 : ObjCObjectType(Nonce_ObjCInterface),
6028 Decl(const_cast<ObjCInterfaceDecl*>(D)) {}
6029
6030public:
6031 /// Get the declaration of this interface.
6032 ObjCInterfaceDecl *getDecl() const { return Decl; }
6033
6034 bool isSugared() const { return false; }
6035 QualType desugar() const { return QualType(this, 0); }
6036
6037 static bool classof(const Type *T) {
6038 return T->getTypeClass() == ObjCInterface;
6039 }
6040
6041 // Nonsense to "hide" certain members of ObjCObjectType within this
6042 // class. People asking for protocols on an ObjCInterfaceType are
6043 // not going to get what they want: ObjCInterfaceTypes are
6044 // guaranteed to have no protocols.
6045 enum {
6046 qual_iterator,
6047 qual_begin,
6048 qual_end,
6049 getNumProtocols,
6050 getProtocol
6051 };
6052};
6053
6054inline ObjCInterfaceDecl *ObjCObjectType::getInterface() const {
6055 QualType baseType = getBaseType();
6056 while (const auto *ObjT = baseType->getAs<ObjCObjectType>()) {
6057 if (const auto *T = dyn_cast<ObjCInterfaceType>(ObjT))
6058 return T->getDecl();
6059
6060 baseType = ObjT->getBaseType();
6061 }
6062
6063 return nullptr;
6064}
6065
6066/// Represents a pointer to an Objective C object.
6067///
6068/// These are constructed from pointer declarators when the pointee type is
6069/// an ObjCObjectType (or sugar for one). In addition, the 'id' and 'Class'
6070/// types are typedefs for these, and the protocol-qualified types 'id<P>'
6071/// and 'Class<P>' are translated into these.
6072///
6073/// Pointers to pointers to Objective C objects are still PointerTypes;
6074/// only the first level of pointer gets it own type implementation.
6075class ObjCObjectPointerType : public Type, public llvm::FoldingSetNode {
6076 friend class ASTContext; // ASTContext creates these.
6077
6078 QualType PointeeType;
6079
6080 ObjCObjectPointerType(QualType Canonical, QualType Pointee)
6081 : Type(ObjCObjectPointer, Canonical, Pointee->getDependence()),
6082 PointeeType(Pointee) {}
6083
6084public:
6085 /// Gets the type pointed to by this ObjC pointer.
6086 /// The result will always be an ObjCObjectType or sugar thereof.
6087 QualType getPointeeType() const { return PointeeType; }
6088
6089 /// Gets the type pointed to by this ObjC pointer. Always returns non-null.
6090 ///
6091 /// This method is equivalent to getPointeeType() except that
6092 /// it discards any typedefs (or other sugar) between this
6093 /// type and the "outermost" object type. So for:
6094 /// \code
6095 /// \@class A; \@protocol P; \@protocol Q;
6096 /// typedef A<P> AP;
6097 /// typedef A A1;
6098 /// typedef A1<P> A1P;
6099 /// typedef A1P<Q> A1PQ;
6100 /// \endcode
6101 /// For 'A*', getObjectType() will return 'A'.
6102 /// For 'A<P>*', getObjectType() will return 'A<P>'.
6103 /// For 'AP*', getObjectType() will return 'A<P>'.
6104 /// For 'A1*', getObjectType() will return 'A'.
6105 /// For 'A1<P>*', getObjectType() will return 'A1<P>'.
6106 /// For 'A1P*', getObjectType() will return 'A1<P>'.
6107 /// For 'A1PQ*', getObjectType() will return 'A1<Q>', because
6108 /// adding protocols to a protocol-qualified base discards the
6109 /// old qualifiers (for now). But if it didn't, getObjectType()
6110 /// would return 'A1P<Q>' (and we'd have to make iterating over
6111 /// qualifiers more complicated).
6112 const ObjCObjectType *getObjectType() const {
6113 return PointeeType->castAs<ObjCObjectType>();
6114 }
6115
6116 /// If this pointer points to an Objective C
6117 /// \@interface type, gets the type for that interface. Any protocol
6118 /// qualifiers on the interface are ignored.
6119 ///
6120 /// \return null if the base type for this pointer is 'id' or 'Class'
6121 const ObjCInterfaceType *getInterfaceType() const;
6122
6123 /// If this pointer points to an Objective \@interface
6124 /// type, gets the declaration for that interface.
6125 ///
6126 /// \return null if the base type for this pointer is 'id' or 'Class'
6127 ObjCInterfaceDecl *getInterfaceDecl() const {
6128 return getObjectType()->getInterface();
6129 }
6130
6131 /// True if this is equivalent to the 'id' type, i.e. if
6132 /// its object type is the primitive 'id' type with no protocols.
6133 bool isObjCIdType() const {
6134 return getObjectType()->isObjCUnqualifiedId();
6135 }
6136
6137 /// True if this is equivalent to the 'Class' type,
6138 /// i.e. if its object tive is the primitive 'Class' type with no protocols.
6139 bool isObjCClassType() const {
6140 return getObjectType()->isObjCUnqualifiedClass();
6141 }
6142
6143 /// True if this is equivalent to the 'id' or 'Class' type,
6144 bool isObjCIdOrClassType() const {
6145 return getObjectType()->isObjCUnqualifiedIdOrClass();
6146 }
6147
6148 /// True if this is equivalent to 'id<P>' for some non-empty set of
6149 /// protocols.
6150 bool isObjCQualifiedIdType() const {
6151 return getObjectType()->isObjCQualifiedId();
6152 }
6153
6154 /// True if this is equivalent to 'Class<P>' for some non-empty set of
6155 /// protocols.
6156 bool isObjCQualifiedClassType() const {
6157 return getObjectType()->isObjCQualifiedClass();
6158 }
6159
6160 /// Whether this is a "__kindof" type.
6161 bool isKindOfType() const { return getObjectType()->isKindOfType(); }
6162
6163 /// Whether this type is specialized, meaning that it has type arguments.
6164 bool isSpecialized() const { return getObjectType()->isSpecialized(); }
6165
6166 /// Whether this type is specialized, meaning that it has type arguments.
6167 bool isSpecializedAsWritten() const {
6168 return getObjectType()->isSpecializedAsWritten();
6169 }
6170
6171 /// Whether this type is unspecialized, meaning that is has no type arguments.
6172 bool isUnspecialized() const { return getObjectType()->isUnspecialized(); }
6173
6174 /// Determine whether this object type is "unspecialized" as
6175 /// written, meaning that it has no type arguments.
6176 bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); }
6177
6178 /// Retrieve the type arguments for this type.
6179 ArrayRef<QualType> getTypeArgs() const {
6180 return getObjectType()->getTypeArgs();
6181 }
6182
6183 /// Retrieve the type arguments for this type.
6184 ArrayRef<QualType> getTypeArgsAsWritten() const {
6185 return getObjectType()->getTypeArgsAsWritten();
6186 }
6187
6188 /// An iterator over the qualifiers on the object type. Provided
6189 /// for convenience. This will always iterate over the full set of
6190 /// protocols on a type, not just those provided directly.
6191 using qual_iterator = ObjCObjectType::qual_iterator;
6192 using qual_range = llvm::iterator_range<qual_iterator>;
6193
6194 qual_range quals() const { return qual_range(qual_begin(), qual_end()); }
6195
6196 qual_iterator qual_begin() const {
6197 return getObjectType()->qual_begin();
6198 }
6199
6200 qual_iterator qual_end() const {
6201 return getObjectType()->qual_end();
6202 }
6203
6204 bool qual_empty() const { return getObjectType()->qual_empty(); }
6205
6206 /// Return the number of qualifying protocols on the object type.
6207 unsigned getNumProtocols() const {
6208 return getObjectType()->getNumProtocols();
6209 }
6210
6211 /// Retrieve a qualifying protocol by index on the object type.
6212 ObjCProtocolDecl *getProtocol(unsigned I) const {
6213 return getObjectType()->getProtocol(I);
6214 }
6215
6216 bool isSugared() const { return false; }
6217 QualType desugar() const { return QualType(this, 0); }
6218
6219 /// Retrieve the type of the superclass of this object pointer type.
6220 ///
6221 /// This operation substitutes any type arguments into the
6222 /// superclass of the current class type, potentially producing a
6223 /// pointer to a specialization of the superclass type. Produces a
6224 /// null type if there is no superclass.
6225 QualType getSuperClassType() const;
6226
6227 /// Strip off the Objective-C "kindof" type and (with it) any
6228 /// protocol qualifiers.
6229 const ObjCObjectPointerType *stripObjCKindOfTypeAndQuals(
6230 const ASTContext &ctx) const;
6231
6232 void Profile(llvm::FoldingSetNodeID &ID) {
6233 Profile(ID, getPointeeType());
6234 }
6235
6236 static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
6237 ID.AddPointer(T.getAsOpaquePtr());
6238 }
6239
6240 static bool classof(const Type *T) {
6241 return T->getTypeClass() == ObjCObjectPointer;
6242 }
6243};
6244
6245class AtomicType : public Type, public llvm::FoldingSetNode {
6246 friend class ASTContext; // ASTContext creates these.
6247
6248 QualType ValueType;
6249
6250 AtomicType(QualType ValTy, QualType Canonical)
6251 : Type(Atomic, Canonical, ValTy->getDependence()), ValueType(ValTy) {}
6252
6253public:
6254 /// Gets the type contained by this atomic type, i.e.
6255 /// the type returned by performing an atomic load of this atomic type.
6256 QualType getValueType() const { return ValueType; }
6257
6258 bool isSugared() const { return false; }
6259 QualType desugar() const { return QualType(this, 0); }
6260
6261 void Profile(llvm::FoldingSetNodeID &ID) {
6262 Profile(ID, getValueType());
6263 }
6264
6265 static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
6266 ID.AddPointer(T.getAsOpaquePtr());
6267 }
6268
6269 static bool classof(const Type *T) {
6270 return T->getTypeClass() == Atomic;
6271 }
6272};
6273
6274/// PipeType - OpenCL20.
6275class PipeType : public Type, public llvm::FoldingSetNode {
6276 friend class ASTContext; // ASTContext creates these.
6277
6278 QualType ElementType;
6279 bool isRead;
6280
6281 PipeType(QualType elemType, QualType CanonicalPtr, bool isRead)
6282 : Type(Pipe, CanonicalPtr, elemType->getDependence()),
6283 ElementType(elemType), isRead(isRead) {}
6284
6285public:
6286 QualType getElementType() const { return ElementType; }
6287
6288 bool isSugared() const { return false; }
6289
6290 QualType desugar() const { return QualType(this, 0); }
6291
6292 void Profile(llvm::FoldingSetNodeID &ID) {
6293 Profile(ID, getElementType(), isReadOnly());
6294 }
6295
6296 static void Profile(llvm::FoldingSetNodeID &ID, QualType T, bool isRead) {
6297 ID.AddPointer(T.getAsOpaquePtr());
6298 ID.AddBoolean(isRead);
6299 }
6300
6301 static bool classof(const Type *T) {
6302 return T->getTypeClass() == Pipe;
6303 }
6304
6305 bool isReadOnly() const { return isRead; }
6306};
6307
6308/// A fixed int type of a specified bitwidth.
6309class ExtIntType final : public Type, public llvm::FoldingSetNode {
6310 friend class ASTContext;
6311 unsigned IsUnsigned : 1;
6312 unsigned NumBits : 24;
6313
6314protected:
6315 ExtIntType(bool isUnsigned, unsigned NumBits);
6316
6317public:
6318 bool isUnsigned() const { return IsUnsigned; }
6319 bool isSigned() const { return !IsUnsigned; }
6320 unsigned getNumBits() const { return NumBits; }
6321
6322 bool isSugared() const { return false; }
6323 QualType desugar() const { return QualType(this, 0); }
6324
6325 void Profile(llvm::FoldingSetNodeID &ID) {
6326 Profile(ID, isUnsigned(), getNumBits());
6327 }
6328
6329 static void Profile(llvm::FoldingSetNodeID &ID, bool IsUnsigned,
6330 unsigned NumBits) {
6331 ID.AddBoolean(IsUnsigned);
6332 ID.AddInteger(NumBits);
6333 }
6334
6335 static bool classof(const Type *T) { return T->getTypeClass() == ExtInt; }
6336};
6337
6338class DependentExtIntType final : public Type, public llvm::FoldingSetNode {
6339 friend class ASTContext;
6340 const ASTContext &Context;
6341 llvm::PointerIntPair<Expr*, 1, bool> ExprAndUnsigned;
6342
6343protected:
6344 DependentExtIntType(const ASTContext &Context, bool IsUnsigned,
6345 Expr *NumBits);
6346
6347public:
6348 bool isUnsigned() const;
6349 bool isSigned() const { return !isUnsigned(); }
6350 Expr *getNumBitsExpr() const;
6351
6352 bool isSugared() const { return false; }
6353 QualType desugar() const { return QualType(this, 0); }
6354
6355 void Profile(llvm::FoldingSetNodeID &ID) {
6356 Profile(ID, Context, isUnsigned(), getNumBitsExpr());
6357 }
6358 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
6359 bool IsUnsigned, Expr *NumBitsExpr);
6360
6361 static bool classof(const Type *T) {
6362 return T->getTypeClass() == DependentExtInt;
6363 }
6364};
6365
6366/// A qualifier set is used to build a set of qualifiers.
6367class QualifierCollector : public Qualifiers {
6368public:
6369 QualifierCollector(Qualifiers Qs = Qualifiers()) : Qualifiers(Qs) {}
6370
6371 /// Collect any qualifiers on the given type and return an
6372 /// unqualified type. The qualifiers are assumed to be consistent
6373 /// with those already in the type.
6374 const Type *strip(QualType type) {
6375 addFastQualifiers(type.getLocalFastQualifiers());
6376 if (!type.hasLocalNonFastQualifiers())
6377 return type.getTypePtrUnsafe();
6378
6379 const ExtQuals *extQuals = type.getExtQualsUnsafe();
6380 addConsistentQualifiers(extQuals->getQualifiers());
6381 return extQuals->getBaseType();
6382 }
6383
6384 /// Apply the collected qualifiers to the given type.
6385 QualType apply(const ASTContext &Context, QualType QT) const;
6386
6387 /// Apply the collected qualifiers to the given type.
6388 QualType apply(const ASTContext &Context, const Type* T) const;
6389};
6390
6391/// A container of type source information.
6392///
6393/// A client can read the relevant info using TypeLoc wrappers, e.g:
6394/// @code
6395/// TypeLoc TL = TypeSourceInfo->getTypeLoc();
6396/// TL.getBeginLoc().print(OS, SrcMgr);
6397/// @endcode
6398class alignas(8) TypeSourceInfo {
6399 // Contains a memory block after the class, used for type source information,
6400 // allocated by ASTContext.
6401 friend class ASTContext;
6402
6403 QualType Ty;
6404
6405 TypeSourceInfo(QualType ty) : Ty(ty) {}
6406
6407public:
6408 /// Return the type wrapped by this type source info.
6409 QualType getType() const { return Ty; }
6410
6411 /// Return the TypeLoc wrapper for the type source info.
6412 TypeLoc getTypeLoc() const; // implemented in TypeLoc.h
6413
6414 /// Override the type stored in this TypeSourceInfo. Use with caution!
6415 void overrideType(QualType T) { Ty = T; }
6416};
6417
6418// Inline function definitions.
6419
6420inline SplitQualType SplitQualType::getSingleStepDesugaredType() const {
6421 SplitQualType desugar =
6422 Ty->getLocallyUnqualifiedSingleStepDesugaredType().split();
6423 desugar.Quals.addConsistentQualifiers(Quals);
6424 return desugar;
6425}
6426
6427inline const Type *QualType::getTypePtr() const {
6428 return getCommonPtr()->BaseType;
6429}
6430
6431inline const Type *QualType::getTypePtrOrNull() const {
6432 return (isNull() ? nullptr : getCommonPtr()->BaseType);
6433}
6434
6435inline SplitQualType QualType::split() const {
6436 if (!hasLocalNonFastQualifiers())
6437 return SplitQualType(getTypePtrUnsafe(),
6438 Qualifiers::fromFastMask(getLocalFastQualifiers()));
6439
6440 const ExtQuals *eq = getExtQualsUnsafe();
6441 Qualifiers qs = eq->getQualifiers();
6442 qs.addFastQualifiers(getLocalFastQualifiers());
6443 return SplitQualType(eq->getBaseType(), qs);
6444}
6445
6446inline Qualifiers QualType::getLocalQualifiers() const {
6447 Qualifiers Quals;
6448 if (hasLocalNonFastQualifiers())
6449 Quals = getExtQualsUnsafe()->getQualifiers();
6450 Quals.addFastQualifiers(getLocalFastQualifiers());
6451 return Quals;
6452}
6453
6454inline Qualifiers QualType::getQualifiers() const {
6455 Qualifiers quals = getCommonPtr()->CanonicalType.getLocalQualifiers();
6456 quals.addFastQualifiers(getLocalFastQualifiers());
6457 return quals;
6458}
6459
6460inline unsigned QualType::getCVRQualifiers() const {
6461 unsigned cvr = getCommonPtr()->CanonicalType.getLocalCVRQualifiers();
6462 cvr |= getLocalCVRQualifiers();
6463 return cvr;
6464}
6465
6466inline QualType QualType::getCanonicalType() const {
6467 QualType canon = getCommonPtr()->CanonicalType;
6468 return canon.withFastQualifiers(getLocalFastQualifiers());
6469}
6470
6471inline bool QualType::isCanonical() const {
6472 return getTypePtr()->isCanonicalUnqualified();
6473}
6474
6475inline bool QualType::isCanonicalAsParam() const {
6476 if (!isCanonical()) return false;
6477 if (hasLocalQualifiers()) return false;
6478
6479 const Type *T = getTypePtr();
6480 if (T->isVariablyModifiedType() && T->hasSizedVLAType())
6481 return false;
6482
6483 return !isa<FunctionType>(T) && !isa<ArrayType>(T);
6484}
6485
6486inline bool QualType::isConstQualified() const {
6487 return isLocalConstQualified() ||
6488 getCommonPtr()->CanonicalType.isLocalConstQualified();
6489}
6490
6491inline bool QualType::isRestrictQualified() const {
6492 return isLocalRestrictQualified() ||
6493 getCommonPtr()->CanonicalType.isLocalRestrictQualified();
6494}
6495
6496
6497inline bool QualType::isVolatileQualified() const {
6498 return isLocalVolatileQualified() ||
6499 getCommonPtr()->CanonicalType.isLocalVolatileQualified();
6500}
6501
6502inline bool QualType::hasQualifiers() const {
6503 return hasLocalQualifiers() ||
6504 getCommonPtr()->CanonicalType.hasLocalQualifiers();
6505}
6506
6507inline QualType QualType::getUnqualifiedType() const {
6508 if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
6509 return QualType(getTypePtr(), 0);
6510
6511 return QualType(getSplitUnqualifiedTypeImpl(*this).Ty, 0);
6512}
6513
6514inline SplitQualType QualType::getSplitUnqualifiedType() const {
6515 if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
6516 return split();
6517
6518 return getSplitUnqualifiedTypeImpl(*this);
6519}
6520
6521inline void QualType::removeLocalConst() {
6522 removeLocalFastQualifiers(Qualifiers::Const);
6523}
6524
6525inline void QualType::removeLocalRestrict() {
6526 removeLocalFastQualifiers(Qualifiers::Restrict);
6527}
6528
6529inline void QualType::removeLocalVolatile() {
6530 removeLocalFastQualifiers(Qualifiers::Volatile);
6531}
6532
6533inline void QualType::removeLocalCVRQualifiers(unsigned Mask) {
6534 assert(!(Mask & ~Qualifiers::CVRMask) && "mask has non-CVR bits")((void)0);
6535 static_assert((int)Qualifiers::CVRMask == (int)Qualifiers::FastMask,
6536 "Fast bits differ from CVR bits!");
6537
6538 // Fast path: we don't need to touch the slow qualifiers.
6539 removeLocalFastQualifiers(Mask);
6540}
6541
6542/// Check if this type has any address space qualifier.
6543inline bool QualType::hasAddressSpace() const {
6544 return getQualifiers().hasAddressSpace();
6545}
6546
6547/// Return the address space of this type.
6548inline LangAS QualType::getAddressSpace() const {
6549 return getQualifiers().getAddressSpace();
6550}
6551
6552/// Return the gc attribute of this type.
6553inline Qualifiers::GC QualType::getObjCGCAttr() const {
6554 return getQualifiers().getObjCGCAttr();
6555}
6556
6557inline bool QualType::hasNonTrivialToPrimitiveDefaultInitializeCUnion() const {
6558 if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
6559 return hasNonTrivialToPrimitiveDefaultInitializeCUnion(RD);
6560 return false;
6561}
6562
6563inline bool QualType::hasNonTrivialToPrimitiveDestructCUnion() const {
6564 if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
6565 return hasNonTrivialToPrimitiveDestructCUnion(RD);
6566 return false;
6567}
6568
6569inline bool QualType::hasNonTrivialToPrimitiveCopyCUnion() const {
6570 if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
6571 return hasNonTrivialToPrimitiveCopyCUnion(RD);
6572 return false;
6573}
6574
6575inline FunctionType::ExtInfo getFunctionExtInfo(const Type &t) {
6576 if (const auto *PT = t.getAs<PointerType>()) {
6577 if (const auto *FT = PT->getPointeeType()->getAs<FunctionType>())
6578 return FT->getExtInfo();
6579 } else if (const auto *FT = t.getAs<FunctionType>())
6580 return FT->getExtInfo();
6581
6582 return FunctionType::ExtInfo();
6583}
6584
6585inline FunctionType::ExtInfo getFunctionExtInfo(QualType t) {
6586 return getFunctionExtInfo(*t);
6587}
6588
6589/// Determine whether this type is more
6590/// qualified than the Other type. For example, "const volatile int"
6591/// is more qualified than "const int", "volatile int", and
6592/// "int". However, it is not more qualified than "const volatile
6593/// int".
6594inline bool QualType::isMoreQualifiedThan(QualType other) const {
6595 Qualifiers MyQuals = getQualifiers();
6596 Qualifiers OtherQuals = other.getQualifiers();
6597 return (MyQuals != OtherQuals && MyQuals.compatiblyIncludes(OtherQuals));
6598}
6599
6600/// Determine whether this type is at last
6601/// as qualified as the Other type. For example, "const volatile
6602/// int" is at least as qualified as "const int", "volatile int",
6603/// "int", and "const volatile int".
6604inline bool QualType::isAtLeastAsQualifiedAs(QualType other) const {
6605 Qualifiers OtherQuals = other.getQualifiers();
6606
6607 // Ignore __unaligned qualifier if this type is a void.
6608 if (getUnqualifiedType()->isVoidType())
6609 OtherQuals.removeUnaligned();
6610
6611 return getQualifiers().compatiblyIncludes(OtherQuals);
6612}
6613
6614/// If Type is a reference type (e.g., const
6615/// int&), returns the type that the reference refers to ("const
6616/// int"). Otherwise, returns the type itself. This routine is used
6617/// throughout Sema to implement C++ 5p6:
6618///
6619/// If an expression initially has the type "reference to T" (8.3.2,
6620/// 8.5.3), the type is adjusted to "T" prior to any further
6621/// analysis, the expression designates the object or function
6622/// denoted by the reference, and the expression is an lvalue.
6623inline QualType QualType::getNonReferenceType() const {
6624 if (const auto *RefType = (*this)->getAs<ReferenceType>())
6625 return RefType->getPointeeType();
6626 else
6627 return *this;
6628}
6629
6630inline bool QualType::isCForbiddenLValueType() const {
6631 return ((getTypePtr()->isVoidType() && !hasQualifiers()) ||
6632 getTypePtr()->isFunctionType());
6633}
6634
6635/// Tests whether the type is categorized as a fundamental type.
6636///
6637/// \returns True for types specified in C++0x [basic.fundamental].
6638inline bool Type::isFundamentalType() const {
6639 return isVoidType() ||
6640 isNullPtrType() ||
6641 // FIXME: It's really annoying that we don't have an
6642 // 'isArithmeticType()' which agrees with the standard definition.
6643 (isArithmeticType() && !isEnumeralType());
6644}
6645
6646/// Tests whether the type is categorized as a compound type.
6647///
6648/// \returns True for types specified in C++0x [basic.compound].
6649inline bool Type::isCompoundType() const {
6650 // C++0x [basic.compound]p1:
6651 // Compound types can be constructed in the following ways:
6652 // -- arrays of objects of a given type [...];
6653 return isArrayType() ||
6654 // -- functions, which have parameters of given types [...];
6655 isFunctionType() ||
6656 // -- pointers to void or objects or functions [...];
6657 isPointerType() ||
6658 // -- references to objects or functions of a given type. [...]
6659 isReferenceType() ||
6660 // -- classes containing a sequence of objects of various types, [...];
6661 isRecordType() ||
6662 // -- unions, which are classes capable of containing objects of different
6663 // types at different times;
6664 isUnionType() ||
6665 // -- enumerations, which comprise a set of named constant values. [...];
6666 isEnumeralType() ||
6667 // -- pointers to non-static class members, [...].
6668 isMemberPointerType();
6669}
6670
6671inline bool Type::isFunctionType() const {
6672 return isa<FunctionType>(CanonicalType);
6673}
6674
6675inline bool Type::isPointerType() const {
6676 return isa<PointerType>(CanonicalType);
6677}
6678
6679inline bool Type::isAnyPointerType() const {
6680 return isPointerType() || isObjCObjectPointerType();
6681}
6682
6683inline bool Type::isBlockPointerType() const {
6684 return isa<BlockPointerType>(CanonicalType);
6685}
6686
6687inline bool Type::isReferenceType() const {
6688 return isa<ReferenceType>(CanonicalType);
6689}
6690
6691inline bool Type::isLValueReferenceType() const {
6692 return isa<LValueReferenceType>(CanonicalType);
6693}
6694
6695inline bool Type::isRValueReferenceType() const {
6696 return isa<RValueReferenceType>(CanonicalType);
6697}
6698
6699inline bool Type::isObjectPointerType() const {
6700 // Note: an "object pointer type" is not the same thing as a pointer to an
6701 // object type; rather, it is a pointer to an object type or a pointer to cv
6702 // void.
6703 if (const auto *T = getAs<PointerType>())
6704 return !T->getPointeeType()->isFunctionType();
6705 else
6706 return false;
6707}
6708
6709inline bool Type::isFunctionPointerType() const {
6710 if (const auto *T = getAs<PointerType>())
6711 return T->getPointeeType()->isFunctionType();
6712 else
6713 return false;
6714}
6715
6716inline bool Type::isFunctionReferenceType() const {
6717 if (const auto *T = getAs<ReferenceType>())
6718 return T->getPointeeType()->isFunctionType();
6719 else
6720 return false;
6721}
6722
6723inline bool Type::isMemberPointerType() const {
6724 return isa<MemberPointerType>(CanonicalType);
6725}
6726
6727inline bool Type::isMemberFunctionPointerType() const {
6728 if (const auto *T = getAs<MemberPointerType>())
6729 return T->isMemberFunctionPointer();
6730 else
6731 return false;
6732}
6733
6734inline bool Type::isMemberDataPointerType() const {
6735 if (const auto *T = getAs<MemberPointerType>())
6736 return T->isMemberDataPointer();
6737 else
6738 return false;
6739}
6740
6741inline bool Type::isArrayType() const {
6742 return isa<ArrayType>(CanonicalType);
6743}
6744
6745inline bool Type::isConstantArrayType() const {
6746 return isa<ConstantArrayType>(CanonicalType);
6747}
6748
6749inline bool Type::isIncompleteArrayType() const {
6750 return isa<IncompleteArrayType>(CanonicalType);
6751}
6752
6753inline bool Type::isVariableArrayType() const {
6754 return isa<VariableArrayType>(CanonicalType);
6755}
6756
6757inline bool Type::isDependentSizedArrayType() const {
6758 return isa<DependentSizedArrayType>(CanonicalType);
6759}
6760
6761inline bool Type::isBuiltinType() const {
6762 return isa<BuiltinType>(CanonicalType);
6763}
6764
6765inline bool Type::isRecordType() const {
6766 return isa<RecordType>(CanonicalType);
6767}
6768
6769inline bool Type::isEnumeralType() const {
6770 return isa<EnumType>(CanonicalType);
6771}
6772
6773inline bool Type::isAnyComplexType() const {
6774 return isa<ComplexType>(CanonicalType);
6775}
6776
6777inline bool Type::isVectorType() const {
6778 return isa<VectorType>(CanonicalType);
6779}
6780
6781inline bool Type::isExtVectorType() const {
6782 return isa<ExtVectorType>(CanonicalType);
6783}
6784
6785inline bool Type::isMatrixType() const {
6786 return isa<MatrixType>(CanonicalType);
6787}
6788
6789inline bool Type::isConstantMatrixType() const {
6790 return isa<ConstantMatrixType>(CanonicalType);
6791}
6792
6793inline bool Type::isDependentAddressSpaceType() const {
6794 return isa<DependentAddressSpaceType>(CanonicalType);
6795}
6796
6797inline bool Type::isObjCObjectPointerType() const {
6798 return isa<ObjCObjectPointerType>(CanonicalType);
6799}
6800
6801inline bool Type::isObjCObjectType() const {
6802 return isa<ObjCObjectType>(CanonicalType);
6803}
6804
6805inline bool Type::isObjCObjectOrInterfaceType() const {
6806 return isa<ObjCInterfaceType>(CanonicalType) ||
6807 isa<ObjCObjectType>(CanonicalType);
6808}
6809
6810inline bool Type::isAtomicType() const {
6811 return isa<AtomicType>(CanonicalType);
6812}
6813
6814inline bool Type::isUndeducedAutoType() const {
6815 return isa<AutoType>(CanonicalType);
6816}
6817
6818inline bool Type::isObjCQualifiedIdType() const {
6819 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6820 return OPT->isObjCQualifiedIdType();
6821 return false;
6822}
6823
6824inline bool Type::isObjCQualifiedClassType() const {
6825 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6826 return OPT->isObjCQualifiedClassType();
6827 return false;
6828}
6829
6830inline bool Type::isObjCIdType() const {
6831 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6832 return OPT->isObjCIdType();
6833 return false;
6834}
6835
6836inline bool Type::isObjCClassType() const {
6837 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6838 return OPT->isObjCClassType();
6839 return false;
6840}
6841
6842inline bool Type::isObjCSelType() const {
6843 if (const auto *OPT = getAs<PointerType>())
6844 return OPT->getPointeeType()->isSpecificBuiltinType(BuiltinType::ObjCSel);
6845 return false;
6846}
6847
6848inline bool Type::isObjCBuiltinType() const {
6849 return isObjCIdType() || isObjCClassType() || isObjCSelType();
6850}
6851
6852inline bool Type::isDecltypeType() const {
6853 return isa<DecltypeType>(this);
6854}
6855
6856#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
6857 inline bool Type::is##Id##Type() const { \
6858 return isSpecificBuiltinType(BuiltinType::Id); \
6859 }
6860#include "clang/Basic/OpenCLImageTypes.def"
6861
6862inline bool Type::isSamplerT() const {
6863 return isSpecificBuiltinType(BuiltinType::OCLSampler);
6864}
6865
6866inline bool Type::isEventT() const {
6867 return isSpecificBuiltinType(BuiltinType::OCLEvent);
6868}
6869
6870inline bool Type::isClkEventT() const {
6871 return isSpecificBuiltinType(BuiltinType::OCLClkEvent);
6872}
6873
6874inline bool Type::isQueueT() const {
6875 return isSpecificBuiltinType(BuiltinType::OCLQueue);
6876}
6877
6878inline bool Type::isReserveIDT() const {
6879 return isSpecificBuiltinType(BuiltinType::OCLReserveID);
6880}
6881
6882inline bool Type::isImageType() const {
6883#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) is##Id##Type() ||
6884 return
6885#include "clang/Basic/OpenCLImageTypes.def"
6886 false; // end boolean or operation
6887}
6888
6889inline bool Type::isPipeType() const {
6890 return isa<PipeType>(CanonicalType);
6891}
6892
6893inline bool Type::isExtIntType() const {
6894 return isa<ExtIntType>(CanonicalType);
6895}
6896
6897#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
6898 inline bool Type::is##Id##Type() const { \
6899 return isSpecificBuiltinType(BuiltinType::Id); \
6900 }
6901#include "clang/Basic/OpenCLExtensionTypes.def"
6902
6903inline bool Type::isOCLIntelSubgroupAVCType() const {
6904#define INTEL_SUBGROUP_AVC_TYPE(ExtType, Id) \
6905 isOCLIntelSubgroupAVC##Id##Type() ||
6906 return
6907#include "clang/Basic/OpenCLExtensionTypes.def"
6908 false; // end of boolean or operation
6909}
6910
6911inline bool Type::isOCLExtOpaqueType() const {
6912#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) is##Id##Type() ||
6913 return
6914#include "clang/Basic/OpenCLExtensionTypes.def"
6915 false; // end of boolean or operation
6916}
6917
6918inline bool Type::isOpenCLSpecificType() const {
6919 return isSamplerT() || isEventT() || isImageType() || isClkEventT() ||
6920 isQueueT() || isReserveIDT() || isPipeType() || isOCLExtOpaqueType();
6921}
6922
6923inline bool Type::isTemplateTypeParmType() const {
6924 return isa<TemplateTypeParmType>(CanonicalType);
6925}
6926
6927inline bool Type::isSpecificBuiltinType(unsigned K) const {
6928 if (const BuiltinType *BT = getAs<BuiltinType>()) {
6929 return BT->getKind() == static_cast<BuiltinType::Kind>(K);
6930 }
6931 return false;
6932}
6933
6934inline bool Type::isPlaceholderType() const {
6935 if (const auto *BT = dyn_cast<BuiltinType>(this))
6936 return BT->isPlaceholderType();
6937 return false;
6938}
6939
6940inline const BuiltinType *Type::getAsPlaceholderType() const {
6941 if (const auto *BT = dyn_cast<BuiltinType>(this))
6942 if (BT->isPlaceholderType())
6943 return BT;
6944 return nullptr;
6945}
6946
6947inline bool Type::isSpecificPlaceholderType(unsigned K) const {
6948 assert(BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K))((void)0);
6949 return isSpecificBuiltinType(K);
6950}
6951
6952inline bool Type::isNonOverloadPlaceholderType() const {
6953 if (const auto *BT = dyn_cast<BuiltinType>(this))
6954 return BT->isNonOverloadPlaceholderType();
6955 return false;
6956}
6957
6958inline bool Type::isVoidType() const {
6959 return isSpecificBuiltinType(BuiltinType::Void);
6960}
6961
6962inline bool Type::isHalfType() const {
6963 // FIXME: Should we allow complex __fp16? Probably not.
6964 return isSpecificBuiltinType(BuiltinType::Half);
6965}
6966
6967inline bool Type::isFloat16Type() const {
6968 return isSpecificBuiltinType(BuiltinType::Float16);
6969}
6970
6971inline bool Type::isBFloat16Type() const {
6972 return isSpecificBuiltinType(BuiltinType::BFloat16);
6973}
6974
6975inline bool Type::isFloat128Type() const {
6976 return isSpecificBuiltinType(BuiltinType::Float128);
6977}
6978
6979inline bool Type::isNullPtrType() const {
6980 return isSpecificBuiltinType(BuiltinType::NullPtr);
6981}
6982
6983bool IsEnumDeclComplete(EnumDecl *);
6984bool IsEnumDeclScoped(EnumDecl *);
6985
6986inline bool Type::isIntegerType() const {
6987 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
6988 return BT->getKind() >= BuiltinType::Bool &&
6989 BT->getKind() <= BuiltinType::Int128;
6990 if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
6991 // Incomplete enum types are not treated as integer types.
6992 // FIXME: In C++, enum types are never integer types.
6993 return IsEnumDeclComplete(ET->getDecl()) &&
6994 !IsEnumDeclScoped(ET->getDecl());
6995 }
6996 return isExtIntType();
6997}
6998
6999inline bool Type::isFixedPointType() const {
7000 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
7001 return BT->getKind() >= BuiltinType::ShortAccum &&
7002 BT->getKind() <= BuiltinType::SatULongFract;
7003 }
7004 return false;
7005}
7006
7007inline bool Type::isFixedPointOrIntegerType() const {
7008 return isFixedPointType() || isIntegerType();
7009}
7010
7011inline bool Type::isSaturatedFixedPointType() const {
7012 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
7013 return BT->getKind() >= BuiltinType::SatShortAccum &&
7014 BT->getKind() <= BuiltinType::SatULongFract;
7015 }
7016 return false;
7017}
7018
7019inline bool Type::isUnsaturatedFixedPointType() const {
7020 return isFixedPointType() && !isSaturatedFixedPointType();
7021}
7022
7023inline bool Type::isSignedFixedPointType() const {
7024 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
7025 return ((BT->getKind() >= BuiltinType::ShortAccum &&
7026 BT->getKind() <= BuiltinType::LongAccum) ||
7027 (BT->getKind() >= BuiltinType::ShortFract &&
7028 BT->getKind() <= BuiltinType::LongFract) ||
7029 (BT->getKind() >= BuiltinType::SatShortAccum &&
7030 BT->getKind() <= BuiltinType::SatLongAccum) ||
7031 (BT->getKind() >= BuiltinType::SatShortFract &&
7032 BT->getKind() <= BuiltinType::SatLongFract));
7033 }
7034 return false;
7035}
7036
7037inline bool Type::isUnsignedFixedPointType() const {
7038 return isFixedPointType() && !isSignedFixedPointType();
7039}
7040
7041inline bool Type::isScalarType() const {
7042 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
7043 return BT->getKind() > BuiltinType::Void &&
7044 BT->getKind() <= BuiltinType::NullPtr;
7045 if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
7046 // Enums are scalar types, but only if they are defined. Incomplete enums
7047 // are not treated as scalar types.
7048 return IsEnumDeclComplete(ET->getDecl());
7049 return isa<PointerType>(CanonicalType) ||
7050 isa<BlockPointerType>(CanonicalType) ||
7051 isa<MemberPointerType>(CanonicalType) ||
7052 isa<ComplexType>(CanonicalType) ||
7053 isa<ObjCObjectPointerType>(CanonicalType) ||
7054 isExtIntType();
7055}
7056
7057inline bool Type::isIntegralOrEnumerationType() const {
7058 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
7059 return BT->getKind() >= BuiltinType::Bool &&
7060 BT->getKind() <= BuiltinType::Int128;
7061
7062 // Check for a complete enum type; incomplete enum types are not properly an
7063 // enumeration type in the sense required here.
7064 if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
7065 return IsEnumDeclComplete(ET->getDecl());
7066
7067 return isExtIntType();
7068}
7069
7070inline bool Type::isBooleanType() const {
7071 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
7072 return BT->getKind() == BuiltinType::Bool;
7073 return false;
7074}
7075
7076inline bool Type::isUndeducedType() const {
7077 auto *DT = getContainedDeducedType();
7078 return DT && !DT->isDeduced();
7079}
7080
7081/// Determines whether this is a type for which one can define
7082/// an overloaded operator.
7083inline bool Type::isOverloadableType() const {
7084 return isDependentType() || isRecordType() || isEnumeralType();
7085}
7086
7087/// Determines whether this type is written as a typedef-name.
7088inline bool Type::isTypedefNameType() const {
7089 if (getAs<TypedefType>())
7090 return true;
7091 if (auto *TST = getAs<TemplateSpecializationType>())
7092 return TST->isTypeAlias();
7093 return false;
7094}
7095
7096/// Determines whether this type can decay to a pointer type.
7097inline bool Type::canDecayToPointerType() const {
7098 return isFunctionType() || isArrayType();
7099}
7100
7101inline bool Type::hasPointerRepresentation() const {
7102 return (isPointerType() || isReferenceType() || isBlockPointerType() ||
7103 isObjCObjectPointerType() || isNullPtrType());
7104}
7105
7106inline bool Type::hasObjCPointerRepresentation() const {
7107 return isObjCObjectPointerType();
7108}
7109
7110inline const Type *Type::getBaseElementTypeUnsafe() const {
7111 const Type *type = this;
7112 while (const ArrayType *arrayType = type->getAsArrayTypeUnsafe())
7113 type = arrayType->getElementType().getTypePtr();
7114 return type;
7115}
7116
7117inline const Type *Type::getPointeeOrArrayElementType() const {
7118 const Type *type = this;
7119 if (type->isAnyPointerType())
7120 return type->getPointeeType().getTypePtr();
7121 else if (type->isArrayType())
7122 return type->getBaseElementTypeUnsafe();
7123 return type;
7124}
7125/// Insertion operator for partial diagnostics. This allows sending adress
7126/// spaces into a diagnostic with <<.
7127inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
7128 LangAS AS) {
7129 PD.AddTaggedVal(static_cast<std::underlying_type_t<LangAS>>(AS),
7130 DiagnosticsEngine::ArgumentKind::ak_addrspace);
7131 return PD;
7132}
7133
7134/// Insertion operator for partial diagnostics. This allows sending Qualifiers
7135/// into a diagnostic with <<.
7136inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
7137 Qualifiers Q) {
7138 PD.AddTaggedVal(Q.getAsOpaqueValue(),
7139 DiagnosticsEngine::ArgumentKind::ak_qual);
7140 return PD;
7141}
7142
7143/// Insertion operator for partial diagnostics. This allows sending QualType's
7144/// into a diagnostic with <<.
7145inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
7146 QualType T) {
7147 PD.AddTaggedVal(reinterpret_cast<intptr_t>(T.getAsOpaquePtr()),
7148 DiagnosticsEngine::ak_qualtype);
7149 return PD;
7150}
7151
7152// Helper class template that is used by Type::getAs to ensure that one does
7153// not try to look through a qualified type to get to an array type.
7154template <typename T>
7155using TypeIsArrayType =
7156 std::integral_constant<bool, std::is_same<T, ArrayType>::value ||
7157 std::is_base_of<ArrayType, T>::value>;
7158
7159// Member-template getAs<specific type>'.
7160template <typename T> const T *Type::getAs() const {
7161 static_assert(!TypeIsArrayType<T>::value,
7162 "ArrayType cannot be used with getAs!");
7163
7164 // If this is directly a T type, return it.
7165 if (const auto *Ty = dyn_cast<T>(this))
7166 return Ty;
7167
7168 // If the canonical form of this type isn't the right kind, reject it.
7169 if (!isa<T>(CanonicalType))
7170 return nullptr;
7171
7172 // If this is a typedef for the type, strip the typedef off without
7173 // losing all typedef information.
7174 return cast<T>(getUnqualifiedDesugaredType());
7175}
7176
7177template <typename T> const T *Type::getAsAdjusted() const {
7178 static_assert(!TypeIsArrayType<T>::value, "ArrayType cannot be used with getAsAdjusted!");
7179
7180 // If this is directly a T type, return it.
7181 if (const auto *Ty = dyn_cast<T>(this))
7182 return Ty;
7183
7184 // If the canonical form of this type isn't the right kind, reject it.
7185 if (!isa<T>(CanonicalType))
7186 return nullptr;
7187
7188 // Strip off type adjustments that do not modify the underlying nature of the
7189 // type.
7190 const Type *Ty = this;
7191 while (Ty) {
7192 if (const auto *A = dyn_cast<AttributedType>(Ty))
7193 Ty = A->getModifiedType().getTypePtr();
7194 else if (const auto *E = dyn_cast<ElaboratedType>(Ty))
7195 Ty = E->desugar().getTypePtr();
7196 else if (const auto *P = dyn_cast<ParenType>(Ty))
7197 Ty = P->desugar().getTypePtr();
7198 else if (const auto *A = dyn_cast<AdjustedType>(Ty))
7199 Ty = A->desugar().getTypePtr();
7200 else if (const auto *M = dyn_cast<MacroQualifiedType>(Ty))
7201 Ty = M->desugar().getTypePtr();
7202 else
7203 break;
7204 }
7205
7206 // Just because the canonical type is correct does not mean we can use cast<>,
7207 // since we may not have stripped off all the sugar down to the base type.
7208 return dyn_cast<T>(Ty);
7209}
7210
7211inline const ArrayType *Type::getAsArrayTypeUnsafe() const {
7212 // If this is directly an array type, return it.
7213 if (const auto *arr = dyn_cast<ArrayType>(this))
7214 return arr;
7215
7216 // If the canonical form of this type isn't the right kind, reject it.
7217 if (!isa<ArrayType>(CanonicalType))
7218 return nullptr;
7219
7220 // If this is a typedef for the type, strip the typedef off without
7221 // losing all typedef information.
7222 return cast<ArrayType>(getUnqualifiedDesugaredType());
7223}
7224
7225template <typename T> const T *Type::castAs() const {
7226 static_assert(!TypeIsArrayType<T>::value,
7227 "ArrayType cannot be used with castAs!");
7228
7229 if (const auto *ty = dyn_cast<T>(this)) return ty;
7230 assert(isa<T>(CanonicalType))((void)0);
7231 return cast<T>(getUnqualifiedDesugaredType());
7232}
7233
7234inline const ArrayType *Type::castAsArrayTypeUnsafe() const {
7235 assert(isa<ArrayType>(CanonicalType))((void)0);
7236 if (const auto *arr = dyn_cast<ArrayType>(this)) return arr;
7237 return cast<ArrayType>(getUnqualifiedDesugaredType());
7238}
7239
7240DecayedType::DecayedType(QualType OriginalType, QualType DecayedPtr,
7241 QualType CanonicalPtr)
7242 : AdjustedType(Decayed, OriginalType, DecayedPtr, CanonicalPtr) {
7243#ifndef NDEBUG1
7244 QualType Adjusted = getAdjustedType();
7245 (void)AttributedType::stripOuterNullability(Adjusted);
7246 assert(isa<PointerType>(Adjusted))((void)0);
7247#endif
7248}
7249
7250QualType DecayedType::getPointeeType() const {
7251 QualType Decayed = getDecayedType();
7252 (void)AttributedType::stripOuterNullability(Decayed);
7253 return cast<PointerType>(Decayed)->getPointeeType();
7254}
7255
7256// Get the decimal string representation of a fixed point type, represented
7257// as a scaled integer.
7258// TODO: At some point, we should change the arguments to instead just accept an
7259// APFixedPoint instead of APSInt and scale.
7260void FixedPointValueToString(SmallVectorImpl<char> &Str, llvm::APSInt Val,
7261 unsigned Scale);
7262
7263} // namespace clang
7264
7265#endif // LLVM_CLANG_AST_TYPE_H

/usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include/llvm/ADT/PointerUnion.h

1//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the PointerUnion class, which is a discriminated union of
10// pointer types.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_POINTERUNION_H
15#define LLVM_ADT_POINTERUNION_H
16
17#include "llvm/ADT/DenseMapInfo.h"
18#include "llvm/ADT/PointerIntPair.h"
19#include "llvm/Support/PointerLikeTypeTraits.h"
20#include <cassert>
21#include <cstddef>
22#include <cstdint>
23
24namespace llvm {
25
26template <typename T> struct PointerUnionTypeSelectorReturn {
27 using Return = T;
28};
29
30/// Get a type based on whether two types are the same or not.
31///
32/// For:
33///
34/// \code
35/// using Ret = typename PointerUnionTypeSelector<T1, T2, EQ, NE>::Return;
36/// \endcode
37///
38/// Ret will be EQ type if T1 is same as T2 or NE type otherwise.
39template <typename T1, typename T2, typename RET_EQ, typename RET_NE>
40struct PointerUnionTypeSelector {
41 using Return = typename PointerUnionTypeSelectorReturn<RET_NE>::Return;
42};
43
44template <typename T, typename RET_EQ, typename RET_NE>
45struct PointerUnionTypeSelector<T, T, RET_EQ, RET_NE> {
46 using Return = typename PointerUnionTypeSelectorReturn<RET_EQ>::Return;
47};
48
49template <typename T1, typename T2, typename RET_EQ, typename RET_NE>
50struct PointerUnionTypeSelectorReturn<
51 PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>> {
52 using Return =
53 typename PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>::Return;
54};
55
56namespace pointer_union_detail {
57 /// Determine the number of bits required to store integers with values < n.
58 /// This is ceil(log2(n)).
59 constexpr int bitsRequired(unsigned n) {
60 return n > 1 ? 1 + bitsRequired((n + 1) / 2) : 0;
61 }
62
63 template <typename... Ts> constexpr int lowBitsAvailable() {
64 return std::min<int>({PointerLikeTypeTraits<Ts>::NumLowBitsAvailable...});
65 }
66
67 /// Find the index of a type in a list of types. TypeIndex<T, Us...>::Index
68 /// is the index of T in Us, or sizeof...(Us) if T does not appear in the
69 /// list.
70 template <typename T, typename ...Us> struct TypeIndex;
71 template <typename T, typename ...Us> struct TypeIndex<T, T, Us...> {
72 static constexpr int Index = 0;
73 };
74 template <typename T, typename U, typename... Us>
75 struct TypeIndex<T, U, Us...> {
76 static constexpr int Index = 1 + TypeIndex<T, Us...>::Index;
77 };
78 template <typename T> struct TypeIndex<T> {
79 static constexpr int Index = 0;
80 };
81
82 /// Find the first type in a list of types.
83 template <typename T, typename...> struct GetFirstType {
84 using type = T;
85 };
86
87 /// Provide PointerLikeTypeTraits for void* that is used by PointerUnion
88 /// for the template arguments.
89 template <typename ...PTs> class PointerUnionUIntTraits {
90 public:
91 static inline void *getAsVoidPointer(void *P) { return P; }
92 static inline void *getFromVoidPointer(void *P) { return P; }
93 static constexpr int NumLowBitsAvailable = lowBitsAvailable<PTs...>();
94 };
95
96 template <typename Derived, typename ValTy, int I, typename ...Types>
97 class PointerUnionMembers;
98
99 template <typename Derived, typename ValTy, int I>
100 class PointerUnionMembers<Derived, ValTy, I> {
101 protected:
102 ValTy Val;
103 PointerUnionMembers() = default;
104 PointerUnionMembers(ValTy Val) : Val(Val) {}
105
106 friend struct PointerLikeTypeTraits<Derived>;
107 };
108
109 template <typename Derived, typename ValTy, int I, typename Type,
110 typename ...Types>
111 class PointerUnionMembers<Derived, ValTy, I, Type, Types...>
112 : public PointerUnionMembers<Derived, ValTy, I + 1, Types...> {
113 using Base = PointerUnionMembers<Derived, ValTy, I + 1, Types...>;
114 public:
115 using Base::Base;
116 PointerUnionMembers() = default;
117 PointerUnionMembers(Type V)
118 : Base(ValTy(const_cast<void *>(
119 PointerLikeTypeTraits<Type>::getAsVoidPointer(V)),
120 I)) {}
121
122 using Base::operator=;
123 Derived &operator=(Type V) {
124 this->Val = ValTy(
125 const_cast<void *>(PointerLikeTypeTraits<Type>::getAsVoidPointer(V)),
126 I);
127 return static_cast<Derived &>(*this);
128 };
129 };
130}
131
132/// A discriminated union of two or more pointer types, with the discriminator
133/// in the low bit of the pointer.
134///
135/// This implementation is extremely efficient in space due to leveraging the
136/// low bits of the pointer, while exposing a natural and type-safe API.
137///
138/// Common use patterns would be something like this:
139/// PointerUnion<int*, float*> P;
140/// P = (int*)0;
141/// printf("%d %d", P.is<int*>(), P.is<float*>()); // prints "1 0"
142/// X = P.get<int*>(); // ok.
143/// Y = P.get<float*>(); // runtime assertion failure.
144/// Z = P.get<double*>(); // compile time failure.
145/// P = (float*)0;
146/// Y = P.get<float*>(); // ok.
147/// X = P.get<int*>(); // runtime assertion failure.
148template <typename... PTs>
149class PointerUnion
150 : public pointer_union_detail::PointerUnionMembers<
151 PointerUnion<PTs...>,
152 PointerIntPair<
153 void *, pointer_union_detail::bitsRequired(sizeof...(PTs)), int,
154 pointer_union_detail::PointerUnionUIntTraits<PTs...>>,
155 0, PTs...> {
156 // The first type is special because we want to directly cast a pointer to a
157 // default-initialized union to a pointer to the first type. But we don't
158 // want PointerUnion to be a 'template <typename First, typename ...Rest>'
159 // because it's much more convenient to have a name for the whole pack. So
160 // split off the first type here.
161 using First = typename pointer_union_detail::GetFirstType<PTs...>::type;
162 using Base = typename PointerUnion::PointerUnionMembers;
163
164public:
165 PointerUnion() = default;
166
167 PointerUnion(std::nullptr_t) : PointerUnion() {}
168 using Base::Base;
169
170 /// Test if the pointer held in the union is null, regardless of
171 /// which type it is.
172 bool isNull() const { return !this->Val.getPointer(); }
23
Assuming the condition is true
24
Returning the value 1, which participates in a condition later
173
174 explicit operator bool() const { return !isNull(); }
175
176 /// Test if the Union currently holds the type matching T.
177 template <typename T> bool is() const {
178 constexpr int Index = pointer_union_detail::TypeIndex<T, PTs...>::Index;
179 static_assert(Index < sizeof...(PTs),
180 "PointerUnion::is<T> given type not in the union");
181 return this->Val.getInt() == Index;
182 }
183
184 /// Returns the value of the specified pointer type.
185 ///
186 /// If the specified pointer type is incorrect, assert.
187 template <typename T> T get() const {
188 assert(is<T>() && "Invalid accessor called")((void)0);
189 return PointerLikeTypeTraits<T>::getFromVoidPointer(this->Val.getPointer());
190 }
191
192 /// Returns the current pointer if it is of the specified pointer type,
193 /// otherwise returns null.
194 template <typename T> T dyn_cast() const {
195 if (is<T>())
196 return get<T>();
197 return T();
198 }
199
200 /// If the union is set to the first pointer type get an address pointing to
201 /// it.
202 First const *getAddrOfPtr1() const {
203 return const_cast<PointerUnion *>(this)->getAddrOfPtr1();
204 }
205
206 /// If the union is set to the first pointer type get an address pointing to
207 /// it.
208 First *getAddrOfPtr1() {
209 assert(is<First>() && "Val is not the first pointer")((void)0);
210 assert(((void)0)
211 PointerLikeTypeTraits<First>::getAsVoidPointer(get<First>()) ==((void)0)
212 this->Val.getPointer() &&((void)0)
213 "Can't get the address because PointerLikeTypeTraits changes the ptr")((void)0);
214 return const_cast<First *>(
215 reinterpret_cast<const First *>(this->Val.getAddrOfPointer()));
216 }
217
218 /// Assignment from nullptr which just clears the union.
219 const PointerUnion &operator=(std::nullptr_t) {
220 this->Val.initWithPointer(nullptr);
221 return *this;
222 }
223
224 /// Assignment from elements of the union.
225 using Base::operator=;
226
227 void *getOpaqueValue() const { return this->Val.getOpaqueValue(); }
228 static inline PointerUnion getFromOpaqueValue(void *VP) {
229 PointerUnion V;
230 V.Val = decltype(V.Val)::getFromOpaqueValue(VP);
231 return V;
232 }
233};
234
235template <typename ...PTs>
236bool operator==(PointerUnion<PTs...> lhs, PointerUnion<PTs...> rhs) {
237 return lhs.getOpaqueValue() == rhs.getOpaqueValue();
238}
239
240template <typename ...PTs>
241bool operator!=(PointerUnion<PTs...> lhs, PointerUnion<PTs...> rhs) {
242 return lhs.getOpaqueValue() != rhs.getOpaqueValue();
243}
244
245template <typename ...PTs>
246bool operator<(PointerUnion<PTs...> lhs, PointerUnion<PTs...> rhs) {
247 return lhs.getOpaqueValue() < rhs.getOpaqueValue();
248}
249
250// Teach SmallPtrSet that PointerUnion is "basically a pointer", that has
251// # low bits available = min(PT1bits,PT2bits)-1.
252template <typename ...PTs>
253struct PointerLikeTypeTraits<PointerUnion<PTs...>> {
254 static inline void *getAsVoidPointer(const PointerUnion<PTs...> &P) {
255 return P.getOpaqueValue();
256 }
257
258 static inline PointerUnion<PTs...> getFromVoidPointer(void *P) {
259 return PointerUnion<PTs...>::getFromOpaqueValue(P);
260 }
261
262 // The number of bits available are the min of the pointer types minus the
263 // bits needed for the discriminator.
264 static constexpr int NumLowBitsAvailable = PointerLikeTypeTraits<decltype(
265 PointerUnion<PTs...>::Val)>::NumLowBitsAvailable;
266};
267
268// Teach DenseMap how to use PointerUnions as keys.
269template <typename ...PTs> struct DenseMapInfo<PointerUnion<PTs...>> {
270 using Union = PointerUnion<PTs...>;
271 using FirstInfo =
272 DenseMapInfo<typename pointer_union_detail::GetFirstType<PTs...>::type>;
273
274 static inline Union getEmptyKey() { return Union(FirstInfo::getEmptyKey()); }
275
276 static inline Union getTombstoneKey() {
277 return Union(FirstInfo::getTombstoneKey());
278 }
279
280 static unsigned getHashValue(const Union &UnionVal) {
281 intptr_t key = (intptr_t)UnionVal.getOpaqueValue();
282 return DenseMapInfo<intptr_t>::getHashValue(key);
283 }
284
285 static bool isEqual(const Union &LHS, const Union &RHS) {
286 return LHS == RHS;
287 }
288};
289
290} // end namespace llvm
291
292#endif // LLVM_ADT_POINTERUNION_H