Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/CodeGen/RegAllocPBQP.h
Warning:line 73, column 7
Use of zero-allocated memory

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name RegAllocPBQP.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -D PIC -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -D_RET_PROTECTOR -ret-protector -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/RegAllocPBQP.cpp

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/RegAllocPBQP.cpp

1//===- RegAllocPBQP.cpp ---- PBQP Register Allocator ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a Partitioned Boolean Quadratic Programming (PBQP) based
10// register allocator for LLVM. This allocator works by constructing a PBQP
11// problem representing the register allocation problem under consideration,
12// solving this using a PBQP solver, and mapping the solution back to a
13// register assignment. If any variables are selected for spilling then spill
14// code is inserted and the process repeated.
15//
16// The PBQP solver (pbqp.c) provided for this allocator uses a heuristic tuned
17// for register allocation. For more information on PBQP for register
18// allocation, see the following papers:
19//
20// (1) Hames, L. and Scholz, B. 2006. Nearly optimal register allocation with
21// PBQP. In Proceedings of the 7th Joint Modular Languages Conference
22// (JMLC'06). LNCS, vol. 4228. Springer, New York, NY, USA. 346-361.
23//
24// (2) Scholz, B., Eckstein, E. 2002. Register allocation for irregular
25// architectures. In Proceedings of the Joint Conference on Languages,
26// Compilers and Tools for Embedded Systems (LCTES'02), ACM Press, New York,
27// NY, USA, 139-148.
28//
29//===----------------------------------------------------------------------===//
30
31#include "llvm/CodeGen/RegAllocPBQP.h"
32#include "RegisterCoalescer.h"
33#include "llvm/ADT/ArrayRef.h"
34#include "llvm/ADT/BitVector.h"
35#include "llvm/ADT/DenseMap.h"
36#include "llvm/ADT/DenseSet.h"
37#include "llvm/ADT/STLExtras.h"
38#include "llvm/ADT/SmallPtrSet.h"
39#include "llvm/ADT/SmallVector.h"
40#include "llvm/ADT/StringRef.h"
41#include "llvm/Analysis/AliasAnalysis.h"
42#include "llvm/CodeGen/CalcSpillWeights.h"
43#include "llvm/CodeGen/LiveInterval.h"
44#include "llvm/CodeGen/LiveIntervals.h"
45#include "llvm/CodeGen/LiveRangeEdit.h"
46#include "llvm/CodeGen/LiveStacks.h"
47#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
48#include "llvm/CodeGen/MachineDominators.h"
49#include "llvm/CodeGen/MachineFunction.h"
50#include "llvm/CodeGen/MachineFunctionPass.h"
51#include "llvm/CodeGen/MachineInstr.h"
52#include "llvm/CodeGen/MachineLoopInfo.h"
53#include "llvm/CodeGen/MachineRegisterInfo.h"
54#include "llvm/CodeGen/PBQP/Graph.h"
55#include "llvm/CodeGen/PBQP/Math.h"
56#include "llvm/CodeGen/PBQP/Solution.h"
57#include "llvm/CodeGen/PBQPRAConstraint.h"
58#include "llvm/CodeGen/RegAllocRegistry.h"
59#include "llvm/CodeGen/SlotIndexes.h"
60#include "llvm/CodeGen/Spiller.h"
61#include "llvm/CodeGen/TargetRegisterInfo.h"
62#include "llvm/CodeGen/TargetSubtargetInfo.h"
63#include "llvm/CodeGen/VirtRegMap.h"
64#include "llvm/Config/llvm-config.h"
65#include "llvm/IR/Function.h"
66#include "llvm/IR/Module.h"
67#include "llvm/MC/MCRegisterInfo.h"
68#include "llvm/Pass.h"
69#include "llvm/Support/CommandLine.h"
70#include "llvm/Support/Compiler.h"
71#include "llvm/Support/Debug.h"
72#include "llvm/Support/FileSystem.h"
73#include "llvm/Support/Printable.h"
74#include "llvm/Support/raw_ostream.h"
75#include <algorithm>
76#include <cassert>
77#include <cstddef>
78#include <limits>
79#include <map>
80#include <memory>
81#include <queue>
82#include <set>
83#include <sstream>
84#include <string>
85#include <system_error>
86#include <tuple>
87#include <utility>
88#include <vector>
89
90using namespace llvm;
91
92#define DEBUG_TYPE"regalloc" "regalloc"
93
94static RegisterRegAlloc
95RegisterPBQPRepAlloc("pbqp", "PBQP register allocator",
96 createDefaultPBQPRegisterAllocator);
97
98static cl::opt<bool>
99PBQPCoalescing("pbqp-coalescing",
100 cl::desc("Attempt coalescing during PBQP register allocation."),
101 cl::init(false), cl::Hidden);
102
103#ifndef NDEBUG1
104static cl::opt<bool>
105PBQPDumpGraphs("pbqp-dump-graphs",
106 cl::desc("Dump graphs for each function/round in the compilation unit."),
107 cl::init(false), cl::Hidden);
108#endif
109
110namespace {
111
112///
113/// PBQP based allocators solve the register allocation problem by mapping
114/// register allocation problems to Partitioned Boolean Quadratic
115/// Programming problems.
116class RegAllocPBQP : public MachineFunctionPass {
117public:
118 static char ID;
119
120 /// Construct a PBQP register allocator.
121 RegAllocPBQP(char *cPassID = nullptr)
122 : MachineFunctionPass(ID), customPassID(cPassID) {
123 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
124 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
125 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
126 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
127 }
128
129 /// Return the pass name.
130 StringRef getPassName() const override { return "PBQP Register Allocator"; }
131
132 /// PBQP analysis usage.
133 void getAnalysisUsage(AnalysisUsage &au) const override;
134
135 /// Perform register allocation
136 bool runOnMachineFunction(MachineFunction &MF) override;
137
138 MachineFunctionProperties getRequiredProperties() const override {
139 return MachineFunctionProperties().set(
140 MachineFunctionProperties::Property::NoPHIs);
141 }
142
143 MachineFunctionProperties getClearedProperties() const override {
144 return MachineFunctionProperties().set(
145 MachineFunctionProperties::Property::IsSSA);
146 }
147
148private:
149 using RegSet = std::set<Register>;
150
151 char *customPassID;
152
153 RegSet VRegsToAlloc, EmptyIntervalVRegs;
154
155 /// Inst which is a def of an original reg and whose defs are already all
156 /// dead after remat is saved in DeadRemats. The deletion of such inst is
157 /// postponed till all the allocations are done, so its remat expr is
158 /// always available for the remat of all the siblings of the original reg.
159 SmallPtrSet<MachineInstr *, 32> DeadRemats;
160
161 /// Finds the initial set of vreg intervals to allocate.
162 void findVRegIntervalsToAlloc(const MachineFunction &MF, LiveIntervals &LIS);
163
164 /// Constructs an initial graph.
165 void initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM, Spiller &VRegSpiller);
166
167 /// Spill the given VReg.
168 void spillVReg(Register VReg, SmallVectorImpl<Register> &NewIntervals,
169 MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM,
170 Spiller &VRegSpiller);
171
172 /// Given a solved PBQP problem maps this solution back to a register
173 /// assignment.
174 bool mapPBQPToRegAlloc(const PBQPRAGraph &G,
175 const PBQP::Solution &Solution,
176 VirtRegMap &VRM,
177 Spiller &VRegSpiller);
178
179 /// Postprocessing before final spilling. Sets basic block "live in"
180 /// variables.
181 void finalizeAlloc(MachineFunction &MF, LiveIntervals &LIS,
182 VirtRegMap &VRM) const;
183
184 void postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS);
185};
186
187char RegAllocPBQP::ID = 0;
188
189/// Set spill costs for each node in the PBQP reg-alloc graph.
190class SpillCosts : public PBQPRAConstraint {
191public:
192 void apply(PBQPRAGraph &G) override {
193 LiveIntervals &LIS = G.getMetadata().LIS;
194
195 // A minimum spill costs, so that register constraints can can be set
196 // without normalization in the [0.0:MinSpillCost( interval.
197 const PBQP::PBQPNum MinSpillCost = 10.0;
198
199 for (auto NId : G.nodeIds()) {
200 PBQP::PBQPNum SpillCost =
201 LIS.getInterval(G.getNodeMetadata(NId).getVReg()).weight();
202 if (SpillCost == 0.0)
203 SpillCost = std::numeric_limits<PBQP::PBQPNum>::min();
204 else
205 SpillCost += MinSpillCost;
206 PBQPRAGraph::RawVector NodeCosts(G.getNodeCosts(NId));
207 NodeCosts[PBQP::RegAlloc::getSpillOptionIdx()] = SpillCost;
208 G.setNodeCosts(NId, std::move(NodeCosts));
209 }
210 }
211};
212
213/// Add interference edges between overlapping vregs.
214class Interference : public PBQPRAConstraint {
215private:
216 using AllowedRegVecPtr = const PBQP::RegAlloc::AllowedRegVector *;
217 using IKey = std::pair<AllowedRegVecPtr, AllowedRegVecPtr>;
218 using IMatrixCache = DenseMap<IKey, PBQPRAGraph::MatrixPtr>;
219 using DisjointAllowedRegsCache = DenseSet<IKey>;
220 using IEdgeKey = std::pair<PBQP::GraphBase::NodeId, PBQP::GraphBase::NodeId>;
221 using IEdgeCache = DenseSet<IEdgeKey>;
222
223 bool haveDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
224 PBQPRAGraph::NodeId MId,
225 const DisjointAllowedRegsCache &D) const {
226 const auto *NRegs = &G.getNodeMetadata(NId).getAllowedRegs();
227 const auto *MRegs = &G.getNodeMetadata(MId).getAllowedRegs();
228
229 if (NRegs == MRegs)
230 return false;
231
232 if (NRegs < MRegs)
233 return D.contains(IKey(NRegs, MRegs));
234
235 return D.contains(IKey(MRegs, NRegs));
236 }
237
238 void setDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
239 PBQPRAGraph::NodeId MId,
240 DisjointAllowedRegsCache &D) {
241 const auto *NRegs = &G.getNodeMetadata(NId).getAllowedRegs();
242 const auto *MRegs = &G.getNodeMetadata(MId).getAllowedRegs();
243
244 assert(NRegs != MRegs && "AllowedRegs can not be disjoint with itself")((void)0);
245
246 if (NRegs < MRegs)
247 D.insert(IKey(NRegs, MRegs));
248 else
249 D.insert(IKey(MRegs, NRegs));
250 }
251
252 // Holds (Interval, CurrentSegmentID, and NodeId). The first two are required
253 // for the fast interference graph construction algorithm. The last is there
254 // to save us from looking up node ids via the VRegToNode map in the graph
255 // metadata.
256 using IntervalInfo =
257 std::tuple<LiveInterval*, size_t, PBQP::GraphBase::NodeId>;
258
259 static SlotIndex getStartPoint(const IntervalInfo &I) {
260 return std::get<0>(I)->segments[std::get<1>(I)].start;
261 }
262
263 static SlotIndex getEndPoint(const IntervalInfo &I) {
264 return std::get<0>(I)->segments[std::get<1>(I)].end;
265 }
266
267 static PBQP::GraphBase::NodeId getNodeId(const IntervalInfo &I) {
268 return std::get<2>(I);
269 }
270
271 static bool lowestStartPoint(const IntervalInfo &I1,
272 const IntervalInfo &I2) {
273 // Condition reversed because priority queue has the *highest* element at
274 // the front, rather than the lowest.
275 return getStartPoint(I1) > getStartPoint(I2);
276 }
277
278 static bool lowestEndPoint(const IntervalInfo &I1,
279 const IntervalInfo &I2) {
280 SlotIndex E1 = getEndPoint(I1);
281 SlotIndex E2 = getEndPoint(I2);
282
283 if (E1 < E2)
284 return true;
285
286 if (E1 > E2)
287 return false;
288
289 // If two intervals end at the same point, we need a way to break the tie or
290 // the set will assume they're actually equal and refuse to insert a
291 // "duplicate". Just compare the vregs - fast and guaranteed unique.
292 return std::get<0>(I1)->reg() < std::get<0>(I2)->reg();
293 }
294
295 static bool isAtLastSegment(const IntervalInfo &I) {
296 return std::get<1>(I) == std::get<0>(I)->size() - 1;
297 }
298
299 static IntervalInfo nextSegment(const IntervalInfo &I) {
300 return std::make_tuple(std::get<0>(I), std::get<1>(I) + 1, std::get<2>(I));
301 }
302
303public:
304 void apply(PBQPRAGraph &G) override {
305 // The following is loosely based on the linear scan algorithm introduced in
306 // "Linear Scan Register Allocation" by Poletto and Sarkar. This version
307 // isn't linear, because the size of the active set isn't bound by the
308 // number of registers, but rather the size of the largest clique in the
309 // graph. Still, we expect this to be better than N^2.
310 LiveIntervals &LIS = G.getMetadata().LIS;
311
312 // Interferenc matrices are incredibly regular - they're only a function of
313 // the allowed sets, so we cache them to avoid the overhead of constructing
314 // and uniquing them.
315 IMatrixCache C;
316
317 // Finding an edge is expensive in the worst case (O(max_clique(G))). So
318 // cache locally edges we have already seen.
319 IEdgeCache EC;
320
321 // Cache known disjoint allowed registers pairs
322 DisjointAllowedRegsCache D;
323
324 using IntervalSet = std::set<IntervalInfo, decltype(&lowestEndPoint)>;
325 using IntervalQueue =
326 std::priority_queue<IntervalInfo, std::vector<IntervalInfo>,
327 decltype(&lowestStartPoint)>;
328 IntervalSet Active(lowestEndPoint);
329 IntervalQueue Inactive(lowestStartPoint);
330
331 // Start by building the inactive set.
332 for (auto NId : G.nodeIds()) {
333 Register VReg = G.getNodeMetadata(NId).getVReg();
334 LiveInterval &LI = LIS.getInterval(VReg);
335 assert(!LI.empty() && "PBQP graph contains node for empty interval")((void)0);
336 Inactive.push(std::make_tuple(&LI, 0, NId));
337 }
338
339 while (!Inactive.empty()) {
340 // Tentatively grab the "next" interval - this choice may be overriden
341 // below.
342 IntervalInfo Cur = Inactive.top();
343
344 // Retire any active intervals that end before Cur starts.
345 IntervalSet::iterator RetireItr = Active.begin();
346 while (RetireItr != Active.end() &&
347 (getEndPoint(*RetireItr) <= getStartPoint(Cur))) {
348 // If this interval has subsequent segments, add the next one to the
349 // inactive list.
350 if (!isAtLastSegment(*RetireItr))
351 Inactive.push(nextSegment(*RetireItr));
352
353 ++RetireItr;
354 }
355 Active.erase(Active.begin(), RetireItr);
356
357 // One of the newly retired segments may actually start before the
358 // Cur segment, so re-grab the front of the inactive list.
359 Cur = Inactive.top();
360 Inactive.pop();
361
362 // At this point we know that Cur overlaps all active intervals. Add the
363 // interference edges.
364 PBQP::GraphBase::NodeId NId = getNodeId(Cur);
365 for (const auto &A : Active) {
366 PBQP::GraphBase::NodeId MId = getNodeId(A);
367
368 // Do not add an edge when the nodes' allowed registers do not
369 // intersect: there is obviously no interference.
370 if (haveDisjointAllowedRegs(G, NId, MId, D))
371 continue;
372
373 // Check that we haven't already added this edge
374 IEdgeKey EK(std::min(NId, MId), std::max(NId, MId));
375 if (EC.count(EK))
376 continue;
377
378 // This is a new edge - add it to the graph.
379 if (!createInterferenceEdge(G, NId, MId, C))
380 setDisjointAllowedRegs(G, NId, MId, D);
381 else
382 EC.insert(EK);
383 }
384
385 // Finally, add Cur to the Active set.
386 Active.insert(Cur);
387 }
388 }
389
390private:
391 // Create an Interference edge and add it to the graph, unless it is
392 // a null matrix, meaning the nodes' allowed registers do not have any
393 // interference. This case occurs frequently between integer and floating
394 // point registers for example.
395 // return true iff both nodes interferes.
396 bool createInterferenceEdge(PBQPRAGraph &G,
397 PBQPRAGraph::NodeId NId, PBQPRAGraph::NodeId MId,
398 IMatrixCache &C) {
399 const TargetRegisterInfo &TRI =
400 *G.getMetadata().MF.getSubtarget().getRegisterInfo();
401 const auto &NRegs = G.getNodeMetadata(NId).getAllowedRegs();
402 const auto &MRegs = G.getNodeMetadata(MId).getAllowedRegs();
403
404 // Try looking the edge costs up in the IMatrixCache first.
405 IKey K(&NRegs, &MRegs);
406 IMatrixCache::iterator I = C.find(K);
407 if (I != C.end()) {
408 G.addEdgeBypassingCostAllocator(NId, MId, I->second);
409 return true;
410 }
411
412 PBQPRAGraph::RawMatrix M(NRegs.size() + 1, MRegs.size() + 1, 0);
413 bool NodesInterfere = false;
414 for (unsigned I = 0; I != NRegs.size(); ++I) {
415 MCRegister PRegN = NRegs[I];
416 for (unsigned J = 0; J != MRegs.size(); ++J) {
417 MCRegister PRegM = MRegs[J];
418 if (TRI.regsOverlap(PRegN, PRegM)) {
419 M[I + 1][J + 1] = std::numeric_limits<PBQP::PBQPNum>::infinity();
420 NodesInterfere = true;
421 }
422 }
423 }
424
425 if (!NodesInterfere)
426 return false;
427
428 PBQPRAGraph::EdgeId EId = G.addEdge(NId, MId, std::move(M));
429 C[K] = G.getEdgeCostsPtr(EId);
430
431 return true;
432 }
433};
434
435class Coalescing : public PBQPRAConstraint {
436public:
437 void apply(PBQPRAGraph &G) override {
438 MachineFunction &MF = G.getMetadata().MF;
439 MachineBlockFrequencyInfo &MBFI = G.getMetadata().MBFI;
440 CoalescerPair CP(*MF.getSubtarget().getRegisterInfo());
441
442 // Scan the machine function and add a coalescing cost whenever CoalescerPair
443 // gives the Ok.
444 for (const auto &MBB : MF) {
445 for (const auto &MI : MBB) {
446 // Skip not-coalescable or already coalesced copies.
447 if (!CP.setRegisters(&MI) || CP.getSrcReg() == CP.getDstReg())
1
Assuming the condition is false
2
Taking false branch
448 continue;
449
450 Register DstReg = CP.getDstReg();
451 Register SrcReg = CP.getSrcReg();
452
453 PBQP::PBQPNum CBenefit = MBFI.getBlockFreqRelativeToEntryBlock(&MBB);
454
455 if (CP.isPhys()) {
3
Taking false branch
456 if (!MF.getRegInfo().isAllocatable(DstReg))
457 continue;
458
459 PBQPRAGraph::NodeId NId = G.getMetadata().getNodeIdForVReg(SrcReg);
460
461 const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed =
462 G.getNodeMetadata(NId).getAllowedRegs();
463
464 unsigned PRegOpt = 0;
465 while (PRegOpt < Allowed.size() && Allowed[PRegOpt].id() != DstReg)
466 ++PRegOpt;
467
468 if (PRegOpt < Allowed.size()) {
469 PBQPRAGraph::RawVector NewCosts(G.getNodeCosts(NId));
470 NewCosts[PRegOpt + 1] -= CBenefit;
471 G.setNodeCosts(NId, std::move(NewCosts));
472 }
473 } else {
474 PBQPRAGraph::NodeId N1Id = G.getMetadata().getNodeIdForVReg(DstReg);
475 PBQPRAGraph::NodeId N2Id = G.getMetadata().getNodeIdForVReg(SrcReg);
476 const PBQPRAGraph::NodeMetadata::AllowedRegVector *Allowed1 =
477 &G.getNodeMetadata(N1Id).getAllowedRegs();
478 const PBQPRAGraph::NodeMetadata::AllowedRegVector *Allowed2 =
479 &G.getNodeMetadata(N2Id).getAllowedRegs();
480
481 PBQPRAGraph::EdgeId EId = G.findEdge(N1Id, N2Id);
482 if (EId == G.invalidEdgeId()) {
4
Assuming the condition is true
5
Taking true branch
483 PBQPRAGraph::RawMatrix Costs(Allowed1->size() + 1,
484 Allowed2->size() + 1, 0);
485 addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
486 G.addEdge(N1Id, N2Id, std::move(Costs));
6
Calling 'Graph::addEdge'
487 } else {
488 if (G.getEdgeNode1Id(EId) == N2Id) {
489 std::swap(N1Id, N2Id);
490 std::swap(Allowed1, Allowed2);
491 }
492 PBQPRAGraph::RawMatrix Costs(G.getEdgeCosts(EId));
493 addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
494 G.updateEdgeCosts(EId, std::move(Costs));
495 }
496 }
497 }
498 }
499 }
500
501private:
502 void addVirtRegCoalesce(
503 PBQPRAGraph::RawMatrix &CostMat,
504 const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed1,
505 const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed2,
506 PBQP::PBQPNum Benefit) {
507 assert(CostMat.getRows() == Allowed1.size() + 1 && "Size mismatch.")((void)0);
508 assert(CostMat.getCols() == Allowed2.size() + 1 && "Size mismatch.")((void)0);
509 for (unsigned I = 0; I != Allowed1.size(); ++I) {
510 MCRegister PReg1 = Allowed1[I];
511 for (unsigned J = 0; J != Allowed2.size(); ++J) {
512 MCRegister PReg2 = Allowed2[J];
513 if (PReg1 == PReg2)
514 CostMat[I + 1][J + 1] -= Benefit;
515 }
516 }
517 }
518};
519
520/// PBQP-specific implementation of weight normalization.
521class PBQPVirtRegAuxInfo final : public VirtRegAuxInfo {
522 float normalize(float UseDefFreq, unsigned Size, unsigned NumInstr) override {
523 // All intervals have a spill weight that is mostly proportional to the
524 // number of uses, with uses in loops having a bigger weight.
525 return NumInstr * VirtRegAuxInfo::normalize(UseDefFreq, Size, 1);
526 }
527
528public:
529 PBQPVirtRegAuxInfo(MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM,
530 const MachineLoopInfo &Loops,
531 const MachineBlockFrequencyInfo &MBFI)
532 : VirtRegAuxInfo(MF, LIS, VRM, Loops, MBFI) {}
533};
534} // end anonymous namespace
535
536// Out-of-line destructor/anchor for PBQPRAConstraint.
537PBQPRAConstraint::~PBQPRAConstraint() = default;
538
539void PBQPRAConstraint::anchor() {}
540
541void PBQPRAConstraintList::anchor() {}
542
543void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
544 au.setPreservesCFG();
545 au.addRequired<AAResultsWrapperPass>();
546 au.addPreserved<AAResultsWrapperPass>();
547 au.addRequired<SlotIndexes>();
548 au.addPreserved<SlotIndexes>();
549 au.addRequired<LiveIntervals>();
550 au.addPreserved<LiveIntervals>();
551 //au.addRequiredID(SplitCriticalEdgesID);
552 if (customPassID)
553 au.addRequiredID(*customPassID);
554 au.addRequired<LiveStacks>();
555 au.addPreserved<LiveStacks>();
556 au.addRequired<MachineBlockFrequencyInfo>();
557 au.addPreserved<MachineBlockFrequencyInfo>();
558 au.addRequired<MachineLoopInfo>();
559 au.addPreserved<MachineLoopInfo>();
560 au.addRequired<MachineDominatorTree>();
561 au.addPreserved<MachineDominatorTree>();
562 au.addRequired<VirtRegMap>();
563 au.addPreserved<VirtRegMap>();
564 MachineFunctionPass::getAnalysisUsage(au);
565}
566
567void RegAllocPBQP::findVRegIntervalsToAlloc(const MachineFunction &MF,
568 LiveIntervals &LIS) {
569 const MachineRegisterInfo &MRI = MF.getRegInfo();
570
571 // Iterate over all live ranges.
572 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
573 Register Reg = Register::index2VirtReg(I);
574 if (MRI.reg_nodbg_empty(Reg))
575 continue;
576 VRegsToAlloc.insert(Reg);
577 }
578}
579
580static bool isACalleeSavedRegister(MCRegister Reg,
581 const TargetRegisterInfo &TRI,
582 const MachineFunction &MF) {
583 const MCPhysReg *CSR = MF.getRegInfo().getCalleeSavedRegs();
584 for (unsigned i = 0; CSR[i] != 0; ++i)
585 if (TRI.regsOverlap(Reg, CSR[i]))
586 return true;
587 return false;
588}
589
590void RegAllocPBQP::initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM,
591 Spiller &VRegSpiller) {
592 MachineFunction &MF = G.getMetadata().MF;
593
594 LiveIntervals &LIS = G.getMetadata().LIS;
595 const MachineRegisterInfo &MRI = G.getMetadata().MF.getRegInfo();
596 const TargetRegisterInfo &TRI =
597 *G.getMetadata().MF.getSubtarget().getRegisterInfo();
598
599 std::vector<Register> Worklist(VRegsToAlloc.begin(), VRegsToAlloc.end());
600
601 std::map<Register, std::vector<MCRegister>> VRegAllowedMap;
602
603 while (!Worklist.empty()) {
604 Register VReg = Worklist.back();
605 Worklist.pop_back();
606
607 LiveInterval &VRegLI = LIS.getInterval(VReg);
608
609 // If this is an empty interval move it to the EmptyIntervalVRegs set then
610 // continue.
611 if (VRegLI.empty()) {
612 EmptyIntervalVRegs.insert(VRegLI.reg());
613 VRegsToAlloc.erase(VRegLI.reg());
614 continue;
615 }
616
617 const TargetRegisterClass *TRC = MRI.getRegClass(VReg);
618
619 // Record any overlaps with regmask operands.
620 BitVector RegMaskOverlaps;
621 LIS.checkRegMaskInterference(VRegLI, RegMaskOverlaps);
622
623 // Compute an initial allowed set for the current vreg.
624 std::vector<MCRegister> VRegAllowed;
625 ArrayRef<MCPhysReg> RawPRegOrder = TRC->getRawAllocationOrder(MF);
626 for (unsigned I = 0; I != RawPRegOrder.size(); ++I) {
627 MCRegister PReg(RawPRegOrder[I]);
628 if (MRI.isReserved(PReg))
629 continue;
630
631 // vregLI crosses a regmask operand that clobbers preg.
632 if (!RegMaskOverlaps.empty() && !RegMaskOverlaps.test(PReg))
633 continue;
634
635 // vregLI overlaps fixed regunit interference.
636 bool Interference = false;
637 for (MCRegUnitIterator Units(PReg, &TRI); Units.isValid(); ++Units) {
638 if (VRegLI.overlaps(LIS.getRegUnit(*Units))) {
639 Interference = true;
640 break;
641 }
642 }
643 if (Interference)
644 continue;
645
646 // preg is usable for this virtual register.
647 VRegAllowed.push_back(PReg);
648 }
649
650 // Check for vregs that have no allowed registers. These should be
651 // pre-spilled and the new vregs added to the worklist.
652 if (VRegAllowed.empty()) {
653 SmallVector<Register, 8> NewVRegs;
654 spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
655 llvm::append_range(Worklist, NewVRegs);
656 continue;
657 }
658
659 VRegAllowedMap[VReg.id()] = std::move(VRegAllowed);
660 }
661
662 for (auto &KV : VRegAllowedMap) {
663 auto VReg = KV.first;
664
665 // Move empty intervals to the EmptyIntervalVReg set.
666 if (LIS.getInterval(VReg).empty()) {
667 EmptyIntervalVRegs.insert(VReg);
668 VRegsToAlloc.erase(VReg);
669 continue;
670 }
671
672 auto &VRegAllowed = KV.second;
673
674 PBQPRAGraph::RawVector NodeCosts(VRegAllowed.size() + 1, 0);
675
676 // Tweak cost of callee saved registers, as using then force spilling and
677 // restoring them. This would only happen in the prologue / epilogue though.
678 for (unsigned i = 0; i != VRegAllowed.size(); ++i)
679 if (isACalleeSavedRegister(VRegAllowed[i], TRI, MF))
680 NodeCosts[1 + i] += 1.0;
681
682 PBQPRAGraph::NodeId NId = G.addNode(std::move(NodeCosts));
683 G.getNodeMetadata(NId).setVReg(VReg);
684 G.getNodeMetadata(NId).setAllowedRegs(
685 G.getMetadata().getAllowedRegs(std::move(VRegAllowed)));
686 G.getMetadata().setNodeIdForVReg(VReg, NId);
687 }
688}
689
690void RegAllocPBQP::spillVReg(Register VReg,
691 SmallVectorImpl<Register> &NewIntervals,
692 MachineFunction &MF, LiveIntervals &LIS,
693 VirtRegMap &VRM, Spiller &VRegSpiller) {
694 VRegsToAlloc.erase(VReg);
695 LiveRangeEdit LRE(&LIS.getInterval(VReg), NewIntervals, MF, LIS, &VRM,
696 nullptr, &DeadRemats);
697 VRegSpiller.spill(LRE);
698
699 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
700 (void)TRI;
701 LLVM_DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> SPILLED (Cost: "do { } while (false)
702 << LRE.getParent().weight() << ", New vregs: ")do { } while (false);
703
704 // Copy any newly inserted live intervals into the list of regs to
705 // allocate.
706 for (const Register &R : LRE) {
707 const LiveInterval &LI = LIS.getInterval(R);
708 assert(!LI.empty() && "Empty spill range.")((void)0);
709 LLVM_DEBUG(dbgs() << printReg(LI.reg(), &TRI) << " ")do { } while (false);
710 VRegsToAlloc.insert(LI.reg());
711 }
712
713 LLVM_DEBUG(dbgs() << ")\n")do { } while (false);
714}
715
716bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAGraph &G,
717 const PBQP::Solution &Solution,
718 VirtRegMap &VRM,
719 Spiller &VRegSpiller) {
720 MachineFunction &MF = G.getMetadata().MF;
721 LiveIntervals &LIS = G.getMetadata().LIS;
722 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
723 (void)TRI;
724
725 // Set to true if we have any spills
726 bool AnotherRoundNeeded = false;
727
728 // Clear the existing allocation.
729 VRM.clearAllVirt();
730
731 // Iterate over the nodes mapping the PBQP solution to a register
732 // assignment.
733 for (auto NId : G.nodeIds()) {
734 Register VReg = G.getNodeMetadata(NId).getVReg();
735 unsigned AllocOpt = Solution.getSelection(NId);
736
737 if (AllocOpt != PBQP::RegAlloc::getSpillOptionIdx()) {
738 MCRegister PReg = G.getNodeMetadata(NId).getAllowedRegs()[AllocOpt - 1];
739 LLVM_DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> "do { } while (false)
740 << TRI.getName(PReg) << "\n")do { } while (false);
741 assert(PReg != 0 && "Invalid preg selected.")((void)0);
742 VRM.assignVirt2Phys(VReg, PReg);
743 } else {
744 // Spill VReg. If this introduces new intervals we'll need another round
745 // of allocation.
746 SmallVector<Register, 8> NewVRegs;
747 spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
748 AnotherRoundNeeded |= !NewVRegs.empty();
749 }
750 }
751
752 return !AnotherRoundNeeded;
753}
754
755void RegAllocPBQP::finalizeAlloc(MachineFunction &MF,
756 LiveIntervals &LIS,
757 VirtRegMap &VRM) const {
758 MachineRegisterInfo &MRI = MF.getRegInfo();
759
760 // First allocate registers for the empty intervals.
761 for (const Register &R : EmptyIntervalVRegs) {
762 LiveInterval &LI = LIS.getInterval(R);
763
764 Register PReg = MRI.getSimpleHint(LI.reg());
765
766 if (PReg == 0) {
767 const TargetRegisterClass &RC = *MRI.getRegClass(LI.reg());
768 const ArrayRef<MCPhysReg> RawPRegOrder = RC.getRawAllocationOrder(MF);
769 for (MCRegister CandidateReg : RawPRegOrder) {
770 if (!VRM.getRegInfo().isReserved(CandidateReg)) {
771 PReg = CandidateReg;
772 break;
773 }
774 }
775 assert(PReg &&((void)0)
776 "No un-reserved physical registers in this register class")((void)0);
777 }
778
779 VRM.assignVirt2Phys(LI.reg(), PReg);
780 }
781}
782
783void RegAllocPBQP::postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS) {
784 VRegSpiller.postOptimization();
785 /// Remove dead defs because of rematerialization.
786 for (auto DeadInst : DeadRemats) {
787 LIS.RemoveMachineInstrFromMaps(*DeadInst);
788 DeadInst->eraseFromParent();
789 }
790 DeadRemats.clear();
791}
792
793bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
794 LiveIntervals &LIS = getAnalysis<LiveIntervals>();
795 MachineBlockFrequencyInfo &MBFI =
796 getAnalysis<MachineBlockFrequencyInfo>();
797
798 VirtRegMap &VRM = getAnalysis<VirtRegMap>();
799
800 PBQPVirtRegAuxInfo VRAI(MF, LIS, VRM, getAnalysis<MachineLoopInfo>(), MBFI);
801 VRAI.calculateSpillWeightsAndHints();
802
803 // FIXME: we create DefaultVRAI here to match existing behavior pre-passing
804 // the VRAI through the spiller to the live range editor. However, it probably
805 // makes more sense to pass the PBQP VRAI. The existing behavior had
806 // LiveRangeEdit make its own VirtRegAuxInfo object.
807 VirtRegAuxInfo DefaultVRAI(MF, LIS, VRM, getAnalysis<MachineLoopInfo>(),
808 MBFI);
809 std::unique_ptr<Spiller> VRegSpiller(
810 createInlineSpiller(*this, MF, VRM, DefaultVRAI));
811
812 MF.getRegInfo().freezeReservedRegs(MF);
813
814 LLVM_DEBUG(dbgs() << "PBQP Register Allocating for " << MF.getName() << "\n")do { } while (false);
815
816 // Allocator main loop:
817 //
818 // * Map current regalloc problem to a PBQP problem
819 // * Solve the PBQP problem
820 // * Map the solution back to a register allocation
821 // * Spill if necessary
822 //
823 // This process is continued till no more spills are generated.
824
825 // Find the vreg intervals in need of allocation.
826 findVRegIntervalsToAlloc(MF, LIS);
827
828#ifndef NDEBUG1
829 const Function &F = MF.getFunction();
830 std::string FullyQualifiedName =
831 F.getParent()->getModuleIdentifier() + "." + F.getName().str();
832#endif
833
834 // If there are non-empty intervals allocate them using pbqp.
835 if (!VRegsToAlloc.empty()) {
836 const TargetSubtargetInfo &Subtarget = MF.getSubtarget();
837 std::unique_ptr<PBQPRAConstraintList> ConstraintsRoot =
838 std::make_unique<PBQPRAConstraintList>();
839 ConstraintsRoot->addConstraint(std::make_unique<SpillCosts>());
840 ConstraintsRoot->addConstraint(std::make_unique<Interference>());
841 if (PBQPCoalescing)
842 ConstraintsRoot->addConstraint(std::make_unique<Coalescing>());
843 ConstraintsRoot->addConstraint(Subtarget.getCustomPBQPConstraints());
844
845 bool PBQPAllocComplete = false;
846 unsigned Round = 0;
847
848 while (!PBQPAllocComplete) {
849 LLVM_DEBUG(dbgs() << " PBQP Regalloc round " << Round << ":\n")do { } while (false);
850
851 PBQPRAGraph G(PBQPRAGraph::GraphMetadata(MF, LIS, MBFI));
852 initializeGraph(G, VRM, *VRegSpiller);
853 ConstraintsRoot->apply(G);
854
855#ifndef NDEBUG1
856 if (PBQPDumpGraphs) {
857 std::ostringstream RS;
858 RS << Round;
859 std::string GraphFileName = FullyQualifiedName + "." + RS.str() +
860 ".pbqpgraph";
861 std::error_code EC;
862 raw_fd_ostream OS(GraphFileName, EC, sys::fs::OF_TextWithCRLF);
863 LLVM_DEBUG(dbgs() << "Dumping graph for round " << Round << " to \""do { } while (false)
864 << GraphFileName << "\"\n")do { } while (false);
865 G.dump(OS);
866 }
867#endif
868
869 PBQP::Solution Solution = PBQP::RegAlloc::solve(G);
870 PBQPAllocComplete = mapPBQPToRegAlloc(G, Solution, VRM, *VRegSpiller);
871 ++Round;
872 }
873 }
874
875 // Finalise allocation, allocate empty ranges.
876 finalizeAlloc(MF, LIS, VRM);
877 postOptimization(*VRegSpiller, LIS);
878 VRegsToAlloc.clear();
879 EmptyIntervalVRegs.clear();
880
881 LLVM_DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << VRM << "\n")do { } while (false);
882
883 return true;
884}
885
886/// Create Printable object for node and register info.
887static Printable PrintNodeInfo(PBQP::RegAlloc::PBQPRAGraph::NodeId NId,
888 const PBQP::RegAlloc::PBQPRAGraph &G) {
889 return Printable([NId, &G](raw_ostream &OS) {
890 const MachineRegisterInfo &MRI = G.getMetadata().MF.getRegInfo();
891 const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
892 Register VReg = G.getNodeMetadata(NId).getVReg();
893 const char *RegClassName = TRI->getRegClassName(MRI.getRegClass(VReg));
894 OS << NId << " (" << RegClassName << ':' << printReg(VReg, TRI) << ')';
895 });
896}
897
898#if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP)
899LLVM_DUMP_METHOD__attribute__((noinline)) void PBQP::RegAlloc::PBQPRAGraph::dump(raw_ostream &OS) const {
900 for (auto NId : nodeIds()) {
901 const Vector &Costs = getNodeCosts(NId);
902 assert(Costs.getLength() != 0 && "Empty vector in graph.")((void)0);
903 OS << PrintNodeInfo(NId, *this) << ": " << Costs << '\n';
904 }
905 OS << '\n';
906
907 for (auto EId : edgeIds()) {
908 NodeId N1Id = getEdgeNode1Id(EId);
909 NodeId N2Id = getEdgeNode2Id(EId);
910 assert(N1Id != N2Id && "PBQP graphs should not have self-edges.")((void)0);
911 const Matrix &M = getEdgeCosts(EId);
912 assert(M.getRows() != 0 && "No rows in matrix.")((void)0);
913 assert(M.getCols() != 0 && "No cols in matrix.")((void)0);
914 OS << PrintNodeInfo(N1Id, *this) << ' ' << M.getRows() << " rows / ";
915 OS << PrintNodeInfo(N2Id, *this) << ' ' << M.getCols() << " cols:\n";
916 OS << M << '\n';
917 }
918}
919
920LLVM_DUMP_METHOD__attribute__((noinline)) void PBQP::RegAlloc::PBQPRAGraph::dump() const {
921 dump(dbgs());
922}
923#endif
924
925void PBQP::RegAlloc::PBQPRAGraph::printDot(raw_ostream &OS) const {
926 OS << "graph {\n";
927 for (auto NId : nodeIds()) {
928 OS << " node" << NId << " [ label=\""
929 << PrintNodeInfo(NId, *this) << "\\n"
930 << getNodeCosts(NId) << "\" ]\n";
931 }
932
933 OS << " edge [ len=" << nodeIds().size() << " ]\n";
934 for (auto EId : edgeIds()) {
935 OS << " node" << getEdgeNode1Id(EId)
936 << " -- node" << getEdgeNode2Id(EId)
937 << " [ label=\"";
938 const Matrix &EdgeCosts = getEdgeCosts(EId);
939 for (unsigned i = 0; i < EdgeCosts.getRows(); ++i) {
940 OS << EdgeCosts.getRowAsVector(i) << "\\n";
941 }
942 OS << "\" ]\n";
943 }
944 OS << "}\n";
945}
946
947FunctionPass *llvm::createPBQPRegisterAllocator(char *customPassID) {
948 return new RegAllocPBQP(customPassID);
949}
950
951FunctionPass* llvm::createDefaultPBQPRegisterAllocator() {
952 return createPBQPRegisterAllocator();
953}

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/CodeGen/PBQP/Graph.h

1//===- Graph.h - PBQP Graph -------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// PBQP Graph class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
14#define LLVM_CODEGEN_PBQP_GRAPH_H
15
16#include "llvm/ADT/STLExtras.h"
17#include <algorithm>
18#include <cassert>
19#include <iterator>
20#include <limits>
21#include <vector>
22
23namespace llvm {
24namespace PBQP {
25
26 class GraphBase {
27 public:
28 using NodeId = unsigned;
29 using EdgeId = unsigned;
30
31 /// Returns a value representing an invalid (non-existent) node.
32 static NodeId invalidNodeId() {
33 return std::numeric_limits<NodeId>::max();
34 }
35
36 /// Returns a value representing an invalid (non-existent) edge.
37 static EdgeId invalidEdgeId() {
38 return std::numeric_limits<EdgeId>::max();
39 }
40 };
41
42 /// PBQP Graph class.
43 /// Instances of this class describe PBQP problems.
44 ///
45 template <typename SolverT>
46 class Graph : public GraphBase {
47 private:
48 using CostAllocator = typename SolverT::CostAllocator;
49
50 public:
51 using RawVector = typename SolverT::RawVector;
52 using RawMatrix = typename SolverT::RawMatrix;
53 using Vector = typename SolverT::Vector;
54 using Matrix = typename SolverT::Matrix;
55 using VectorPtr = typename CostAllocator::VectorPtr;
56 using MatrixPtr = typename CostAllocator::MatrixPtr;
57 using NodeMetadata = typename SolverT::NodeMetadata;
58 using EdgeMetadata = typename SolverT::EdgeMetadata;
59 using GraphMetadata = typename SolverT::GraphMetadata;
60
61 private:
62 class NodeEntry {
63 public:
64 using AdjEdgeList = std::vector<EdgeId>;
65 using AdjEdgeIdx = AdjEdgeList::size_type;
66 using AdjEdgeItr = AdjEdgeList::const_iterator;
67
68 NodeEntry(VectorPtr Costs) : Costs(std::move(Costs)) {}
69
70 static AdjEdgeIdx getInvalidAdjEdgeIdx() {
71 return std::numeric_limits<AdjEdgeIdx>::max();
72 }
73
74 AdjEdgeIdx addAdjEdgeId(EdgeId EId) {
75 AdjEdgeIdx Idx = AdjEdgeIds.size();
76 AdjEdgeIds.push_back(EId);
77 return Idx;
78 }
79
80 void removeAdjEdgeId(Graph &G, NodeId ThisNId, AdjEdgeIdx Idx) {
81 // Swap-and-pop for fast removal.
82 // 1) Update the adj index of the edge currently at back().
83 // 2) Move last Edge down to Idx.
84 // 3) pop_back()
85 // If Idx == size() - 1 then the setAdjEdgeIdx and swap are
86 // redundant, but both operations are cheap.
87 G.getEdge(AdjEdgeIds.back()).setAdjEdgeIdx(ThisNId, Idx);
88 AdjEdgeIds[Idx] = AdjEdgeIds.back();
89 AdjEdgeIds.pop_back();
90 }
91
92 const AdjEdgeList& getAdjEdgeIds() const { return AdjEdgeIds; }
93
94 VectorPtr Costs;
95 NodeMetadata Metadata;
96
97 private:
98 AdjEdgeList AdjEdgeIds;
99 };
100
101 class EdgeEntry {
102 public:
103 EdgeEntry(NodeId N1Id, NodeId N2Id, MatrixPtr Costs)
104 : Costs(std::move(Costs)) {
105 NIds[0] = N1Id;
106 NIds[1] = N2Id;
107 ThisEdgeAdjIdxs[0] = NodeEntry::getInvalidAdjEdgeIdx();
108 ThisEdgeAdjIdxs[1] = NodeEntry::getInvalidAdjEdgeIdx();
109 }
110
111 void connectToN(Graph &G, EdgeId ThisEdgeId, unsigned NIdx) {
112 assert(ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&((void)0)
113 "Edge already connected to NIds[NIdx].")((void)0);
114 NodeEntry &N = G.getNode(NIds[NIdx]);
115 ThisEdgeAdjIdxs[NIdx] = N.addAdjEdgeId(ThisEdgeId);
116 }
117
118 void connect(Graph &G, EdgeId ThisEdgeId) {
119 connectToN(G, ThisEdgeId, 0);
120 connectToN(G, ThisEdgeId, 1);
121 }
122
123 void setAdjEdgeIdx(NodeId NId, typename NodeEntry::AdjEdgeIdx NewIdx) {
124 if (NId == NIds[0])
125 ThisEdgeAdjIdxs[0] = NewIdx;
126 else {
127 assert(NId == NIds[1] && "Edge not connected to NId")((void)0);
128 ThisEdgeAdjIdxs[1] = NewIdx;
129 }
130 }
131
132 void disconnectFromN(Graph &G, unsigned NIdx) {
133 assert(ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&((void)0)
134 "Edge not connected to NIds[NIdx].")((void)0);
135 NodeEntry &N = G.getNode(NIds[NIdx]);
136 N.removeAdjEdgeId(G, NIds[NIdx], ThisEdgeAdjIdxs[NIdx]);
137 ThisEdgeAdjIdxs[NIdx] = NodeEntry::getInvalidAdjEdgeIdx();
138 }
139
140 void disconnectFrom(Graph &G, NodeId NId) {
141 if (NId == NIds[0])
142 disconnectFromN(G, 0);
143 else {
144 assert(NId == NIds[1] && "Edge does not connect NId")((void)0);
145 disconnectFromN(G, 1);
146 }
147 }
148
149 NodeId getN1Id() const { return NIds[0]; }
150 NodeId getN2Id() const { return NIds[1]; }
151
152 MatrixPtr Costs;
153 EdgeMetadata Metadata;
154
155 private:
156 NodeId NIds[2];
157 typename NodeEntry::AdjEdgeIdx ThisEdgeAdjIdxs[2];
158 };
159
160 // ----- MEMBERS -----
161
162 GraphMetadata Metadata;
163 CostAllocator CostAlloc;
164 SolverT *Solver = nullptr;
165
166 using NodeVector = std::vector<NodeEntry>;
167 using FreeNodeVector = std::vector<NodeId>;
168 NodeVector Nodes;
169 FreeNodeVector FreeNodeIds;
170
171 using EdgeVector = std::vector<EdgeEntry>;
172 using FreeEdgeVector = std::vector<EdgeId>;
173 EdgeVector Edges;
174 FreeEdgeVector FreeEdgeIds;
175
176 Graph(const Graph &Other) {}
177
178 // ----- INTERNAL METHODS -----
179
180 NodeEntry &getNode(NodeId NId) {
181 assert(NId < Nodes.size() && "Out of bound NodeId")((void)0);
182 return Nodes[NId];
183 }
184 const NodeEntry &getNode(NodeId NId) const {
185 assert(NId < Nodes.size() && "Out of bound NodeId")((void)0);
186 return Nodes[NId];
187 }
188
189 EdgeEntry& getEdge(EdgeId EId) { return Edges[EId]; }
190 const EdgeEntry& getEdge(EdgeId EId) const { return Edges[EId]; }
191
192 NodeId addConstructedNode(NodeEntry N) {
193 NodeId NId = 0;
194 if (!FreeNodeIds.empty()) {
195 NId = FreeNodeIds.back();
196 FreeNodeIds.pop_back();
197 Nodes[NId] = std::move(N);
198 } else {
199 NId = Nodes.size();
200 Nodes.push_back(std::move(N));
201 }
202 return NId;
203 }
204
205 EdgeId addConstructedEdge(EdgeEntry E) {
206 assert(findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&((void)0)
207 "Attempt to add duplicate edge.")((void)0);
208 EdgeId EId = 0;
209 if (!FreeEdgeIds.empty()) {
210 EId = FreeEdgeIds.back();
211 FreeEdgeIds.pop_back();
212 Edges[EId] = std::move(E);
213 } else {
214 EId = Edges.size();
215 Edges.push_back(std::move(E));
216 }
217
218 EdgeEntry &NE = getEdge(EId);
219
220 // Add the edge to the adjacency sets of its nodes.
221 NE.connect(*this, EId);
222 return EId;
223 }
224
225 void operator=(const Graph &Other) {}
226
227 public:
228 using AdjEdgeItr = typename NodeEntry::AdjEdgeItr;
229
230 class NodeItr {
231 public:
232 using iterator_category = std::forward_iterator_tag;
233 using value_type = NodeId;
234 using difference_type = int;
235 using pointer = NodeId *;
236 using reference = NodeId &;
237
238 NodeItr(NodeId CurNId, const Graph &G)
239 : CurNId(CurNId), EndNId(G.Nodes.size()), FreeNodeIds(G.FreeNodeIds) {
240 this->CurNId = findNextInUse(CurNId); // Move to first in-use node id
241 }
242
243 bool operator==(const NodeItr &O) const { return CurNId == O.CurNId; }
244 bool operator!=(const NodeItr &O) const { return !(*this == O); }
245 NodeItr& operator++() { CurNId = findNextInUse(++CurNId); return *this; }
246 NodeId operator*() const { return CurNId; }
247
248 private:
249 NodeId findNextInUse(NodeId NId) const {
250 while (NId < EndNId && is_contained(FreeNodeIds, NId)) {
251 ++NId;
252 }
253 return NId;
254 }
255
256 NodeId CurNId, EndNId;
257 const FreeNodeVector &FreeNodeIds;
258 };
259
260 class EdgeItr {
261 public:
262 EdgeItr(EdgeId CurEId, const Graph &G)
263 : CurEId(CurEId), EndEId(G.Edges.size()), FreeEdgeIds(G.FreeEdgeIds) {
264 this->CurEId = findNextInUse(CurEId); // Move to first in-use edge id
265 }
266
267 bool operator==(const EdgeItr &O) const { return CurEId == O.CurEId; }
268 bool operator!=(const EdgeItr &O) const { return !(*this == O); }
269 EdgeItr& operator++() { CurEId = findNextInUse(++CurEId); return *this; }
270 EdgeId operator*() const { return CurEId; }
271
272 private:
273 EdgeId findNextInUse(EdgeId EId) const {
274 while (EId < EndEId && is_contained(FreeEdgeIds, EId)) {
275 ++EId;
276 }
277 return EId;
278 }
279
280 EdgeId CurEId, EndEId;
281 const FreeEdgeVector &FreeEdgeIds;
282 };
283
284 class NodeIdSet {
285 public:
286 NodeIdSet(const Graph &G) : G(G) {}
287
288 NodeItr begin() const { return NodeItr(0, G); }
289 NodeItr end() const { return NodeItr(G.Nodes.size(), G); }
290
291 bool empty() const { return G.Nodes.empty(); }
292
293 typename NodeVector::size_type size() const {
294 return G.Nodes.size() - G.FreeNodeIds.size();
295 }
296
297 private:
298 const Graph& G;
299 };
300
301 class EdgeIdSet {
302 public:
303 EdgeIdSet(const Graph &G) : G(G) {}
304
305 EdgeItr begin() const { return EdgeItr(0, G); }
306 EdgeItr end() const { return EdgeItr(G.Edges.size(), G); }
307
308 bool empty() const { return G.Edges.empty(); }
309
310 typename NodeVector::size_type size() const {
311 return G.Edges.size() - G.FreeEdgeIds.size();
312 }
313
314 private:
315 const Graph& G;
316 };
317
318 class AdjEdgeIdSet {
319 public:
320 AdjEdgeIdSet(const NodeEntry &NE) : NE(NE) {}
321
322 typename NodeEntry::AdjEdgeItr begin() const {
323 return NE.getAdjEdgeIds().begin();
324 }
325
326 typename NodeEntry::AdjEdgeItr end() const {
327 return NE.getAdjEdgeIds().end();
328 }
329
330 bool empty() const { return NE.getAdjEdgeIds().empty(); }
331
332 typename NodeEntry::AdjEdgeList::size_type size() const {
333 return NE.getAdjEdgeIds().size();
334 }
335
336 private:
337 const NodeEntry &NE;
338 };
339
340 /// Construct an empty PBQP graph.
341 Graph() = default;
342
343 /// Construct an empty PBQP graph with the given graph metadata.
344 Graph(GraphMetadata Metadata) : Metadata(std::move(Metadata)) {}
345
346 /// Get a reference to the graph metadata.
347 GraphMetadata& getMetadata() { return Metadata; }
348
349 /// Get a const-reference to the graph metadata.
350 const GraphMetadata& getMetadata() const { return Metadata; }
351
352 /// Lock this graph to the given solver instance in preparation
353 /// for running the solver. This method will call solver.handleAddNode for
354 /// each node in the graph, and handleAddEdge for each edge, to give the
355 /// solver an opportunity to set up any requried metadata.
356 void setSolver(SolverT &S) {
357 assert(!Solver && "Solver already set. Call unsetSolver().")((void)0);
358 Solver = &S;
359 for (auto NId : nodeIds())
360 Solver->handleAddNode(NId);
361 for (auto EId : edgeIds())
362 Solver->handleAddEdge(EId);
363 }
364
365 /// Release from solver instance.
366 void unsetSolver() {
367 assert(Solver && "Solver not set.")((void)0);
368 Solver = nullptr;
369 }
370
371 /// Add a node with the given costs.
372 /// @param Costs Cost vector for the new node.
373 /// @return Node iterator for the added node.
374 template <typename OtherVectorT>
375 NodeId addNode(OtherVectorT Costs) {
376 // Get cost vector from the problem domain
377 VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
378 NodeId NId = addConstructedNode(NodeEntry(AllocatedCosts));
379 if (Solver)
380 Solver->handleAddNode(NId);
381 return NId;
382 }
383
384 /// Add a node bypassing the cost allocator.
385 /// @param Costs Cost vector ptr for the new node (must be convertible to
386 /// VectorPtr).
387 /// @return Node iterator for the added node.
388 ///
389 /// This method allows for fast addition of a node whose costs don't need
390 /// to be passed through the cost allocator. The most common use case for
391 /// this is when duplicating costs from an existing node (when using a
392 /// pooling allocator). These have already been uniqued, so we can avoid
393 /// re-constructing and re-uniquing them by attaching them directly to the
394 /// new node.
395 template <typename OtherVectorPtrT>
396 NodeId addNodeBypassingCostAllocator(OtherVectorPtrT Costs) {
397 NodeId NId = addConstructedNode(NodeEntry(Costs));
398 if (Solver)
399 Solver->handleAddNode(NId);
400 return NId;
401 }
402
403 /// Add an edge between the given nodes with the given costs.
404 /// @param N1Id First node.
405 /// @param N2Id Second node.
406 /// @param Costs Cost matrix for new edge.
407 /// @return Edge iterator for the added edge.
408 template <typename OtherVectorT>
409 EdgeId addEdge(NodeId N1Id, NodeId N2Id, OtherVectorT Costs) {
410 assert(getNodeCosts(N1Id).getLength() == Costs.getRows() &&((void)0)
411 getNodeCosts(N2Id).getLength() == Costs.getCols() &&((void)0)
412 "Matrix dimensions mismatch.")((void)0);
413 // Get cost matrix from the problem domain.
414 MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
7
Calling 'PoolCostAllocator::getMatrix'
415 EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, AllocatedCosts));
416 if (Solver)
417 Solver->handleAddEdge(EId);
418 return EId;
419 }
420
421 /// Add an edge bypassing the cost allocator.
422 /// @param N1Id First node.
423 /// @param N2Id Second node.
424 /// @param Costs Cost matrix for new edge.
425 /// @return Edge iterator for the added edge.
426 ///
427 /// This method allows for fast addition of an edge whose costs don't need
428 /// to be passed through the cost allocator. The most common use case for
429 /// this is when duplicating costs from an existing edge (when using a
430 /// pooling allocator). These have already been uniqued, so we can avoid
431 /// re-constructing and re-uniquing them by attaching them directly to the
432 /// new edge.
433 template <typename OtherMatrixPtrT>
434 NodeId addEdgeBypassingCostAllocator(NodeId N1Id, NodeId N2Id,
435 OtherMatrixPtrT Costs) {
436 assert(getNodeCosts(N1Id).getLength() == Costs->getRows() &&((void)0)
437 getNodeCosts(N2Id).getLength() == Costs->getCols() &&((void)0)
438 "Matrix dimensions mismatch.")((void)0);
439 // Get cost matrix from the problem domain.
440 EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, Costs));
441 if (Solver)
442 Solver->handleAddEdge(EId);
443 return EId;
444 }
445
446 /// Returns true if the graph is empty.
447 bool empty() const { return NodeIdSet(*this).empty(); }
448
449 NodeIdSet nodeIds() const { return NodeIdSet(*this); }
450 EdgeIdSet edgeIds() const { return EdgeIdSet(*this); }
451
452 AdjEdgeIdSet adjEdgeIds(NodeId NId) { return AdjEdgeIdSet(getNode(NId)); }
453
454 /// Get the number of nodes in the graph.
455 /// @return Number of nodes in the graph.
456 unsigned getNumNodes() const { return NodeIdSet(*this).size(); }
457
458 /// Get the number of edges in the graph.
459 /// @return Number of edges in the graph.
460 unsigned getNumEdges() const { return EdgeIdSet(*this).size(); }
461
462 /// Set a node's cost vector.
463 /// @param NId Node to update.
464 /// @param Costs New costs to set.
465 template <typename OtherVectorT>
466 void setNodeCosts(NodeId NId, OtherVectorT Costs) {
467 VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
468 if (Solver)
469 Solver->handleSetNodeCosts(NId, *AllocatedCosts);
470 getNode(NId).Costs = AllocatedCosts;
471 }
472
473 /// Get a VectorPtr to a node's cost vector. Rarely useful - use
474 /// getNodeCosts where possible.
475 /// @param NId Node id.
476 /// @return VectorPtr to node cost vector.
477 ///
478 /// This method is primarily useful for duplicating costs quickly by
479 /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
480 /// getNodeCosts when dealing with node cost values.
481 const VectorPtr& getNodeCostsPtr(NodeId NId) const {
482 return getNode(NId).Costs;
483 }
484
485 /// Get a node's cost vector.
486 /// @param NId Node id.
487 /// @return Node cost vector.
488 const Vector& getNodeCosts(NodeId NId) const {
489 return *getNodeCostsPtr(NId);
490 }
491
492 NodeMetadata& getNodeMetadata(NodeId NId) {
493 return getNode(NId).Metadata;
494 }
495
496 const NodeMetadata& getNodeMetadata(NodeId NId) const {
497 return getNode(NId).Metadata;
498 }
499
500 typename NodeEntry::AdjEdgeList::size_type getNodeDegree(NodeId NId) const {
501 return getNode(NId).getAdjEdgeIds().size();
502 }
503
504 /// Update an edge's cost matrix.
505 /// @param EId Edge id.
506 /// @param Costs New cost matrix.
507 template <typename OtherMatrixT>
508 void updateEdgeCosts(EdgeId EId, OtherMatrixT Costs) {
509 MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
510 if (Solver)
511 Solver->handleUpdateCosts(EId, *AllocatedCosts);
512 getEdge(EId).Costs = AllocatedCosts;
513 }
514
515 /// Get a MatrixPtr to a node's cost matrix. Rarely useful - use
516 /// getEdgeCosts where possible.
517 /// @param EId Edge id.
518 /// @return MatrixPtr to edge cost matrix.
519 ///
520 /// This method is primarily useful for duplicating costs quickly by
521 /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
522 /// getEdgeCosts when dealing with edge cost values.
523 const MatrixPtr& getEdgeCostsPtr(EdgeId EId) const {
524 return getEdge(EId).Costs;
525 }
526
527 /// Get an edge's cost matrix.
528 /// @param EId Edge id.
529 /// @return Edge cost matrix.
530 const Matrix& getEdgeCosts(EdgeId EId) const {
531 return *getEdge(EId).Costs;
532 }
533
534 EdgeMetadata& getEdgeMetadata(EdgeId EId) {
535 return getEdge(EId).Metadata;
536 }
537
538 const EdgeMetadata& getEdgeMetadata(EdgeId EId) const {
539 return getEdge(EId).Metadata;
540 }
541
542 /// Get the first node connected to this edge.
543 /// @param EId Edge id.
544 /// @return The first node connected to the given edge.
545 NodeId getEdgeNode1Id(EdgeId EId) const {
546 return getEdge(EId).getN1Id();
547 }
548
549 /// Get the second node connected to this edge.
550 /// @param EId Edge id.
551 /// @return The second node connected to the given edge.
552 NodeId getEdgeNode2Id(EdgeId EId) const {
553 return getEdge(EId).getN2Id();
554 }
555
556 /// Get the "other" node connected to this edge.
557 /// @param EId Edge id.
558 /// @param NId Node id for the "given" node.
559 /// @return The iterator for the "other" node connected to this edge.
560 NodeId getEdgeOtherNodeId(EdgeId EId, NodeId NId) {
561 EdgeEntry &E = getEdge(EId);
562 if (E.getN1Id() == NId) {
563 return E.getN2Id();
564 } // else
565 return E.getN1Id();
566 }
567
568 /// Get the edge connecting two nodes.
569 /// @param N1Id First node id.
570 /// @param N2Id Second node id.
571 /// @return An id for edge (N1Id, N2Id) if such an edge exists,
572 /// otherwise returns an invalid edge id.
573 EdgeId findEdge(NodeId N1Id, NodeId N2Id) {
574 for (auto AEId : adjEdgeIds(N1Id)) {
575 if ((getEdgeNode1Id(AEId) == N2Id) ||
576 (getEdgeNode2Id(AEId) == N2Id)) {
577 return AEId;
578 }
579 }
580 return invalidEdgeId();
581 }
582
583 /// Remove a node from the graph.
584 /// @param NId Node id.
585 void removeNode(NodeId NId) {
586 if (Solver)
587 Solver->handleRemoveNode(NId);
588 NodeEntry &N = getNode(NId);
589 // TODO: Can this be for-each'd?
590 for (AdjEdgeItr AEItr = N.adjEdgesBegin(),
591 AEEnd = N.adjEdgesEnd();
592 AEItr != AEEnd;) {
593 EdgeId EId = *AEItr;
594 ++AEItr;
595 removeEdge(EId);
596 }
597 FreeNodeIds.push_back(NId);
598 }
599
600 /// Disconnect an edge from the given node.
601 ///
602 /// Removes the given edge from the adjacency list of the given node.
603 /// This operation leaves the edge in an 'asymmetric' state: It will no
604 /// longer appear in an iteration over the given node's (NId's) edges, but
605 /// will appear in an iteration over the 'other', unnamed node's edges.
606 ///
607 /// This does not correspond to any normal graph operation, but exists to
608 /// support efficient PBQP graph-reduction based solvers. It is used to
609 /// 'effectively' remove the unnamed node from the graph while the solver
610 /// is performing the reduction. The solver will later call reconnectNode
611 /// to restore the edge in the named node's adjacency list.
612 ///
613 /// Since the degree of a node is the number of connected edges,
614 /// disconnecting an edge from a node 'u' will cause the degree of 'u' to
615 /// drop by 1.
616 ///
617 /// A disconnected edge WILL still appear in an iteration over the graph
618 /// edges.
619 ///
620 /// A disconnected edge should not be removed from the graph, it should be
621 /// reconnected first.
622 ///
623 /// A disconnected edge can be reconnected by calling the reconnectEdge
624 /// method.
625 void disconnectEdge(EdgeId EId, NodeId NId) {
626 if (Solver)
627 Solver->handleDisconnectEdge(EId, NId);
628
629 EdgeEntry &E = getEdge(EId);
630 E.disconnectFrom(*this, NId);
631 }
632
633 /// Convenience method to disconnect all neighbours from the given
634 /// node.
635 void disconnectAllNeighborsFromNode(NodeId NId) {
636 for (auto AEId : adjEdgeIds(NId))
637 disconnectEdge(AEId, getEdgeOtherNodeId(AEId, NId));
638 }
639
640 /// Re-attach an edge to its nodes.
641 ///
642 /// Adds an edge that had been previously disconnected back into the
643 /// adjacency set of the nodes that the edge connects.
644 void reconnectEdge(EdgeId EId, NodeId NId) {
645 EdgeEntry &E = getEdge(EId);
646 E.connectTo(*this, EId, NId);
647 if (Solver)
648 Solver->handleReconnectEdge(EId, NId);
649 }
650
651 /// Remove an edge from the graph.
652 /// @param EId Edge id.
653 void removeEdge(EdgeId EId) {
654 if (Solver)
655 Solver->handleRemoveEdge(EId);
656 EdgeEntry &E = getEdge(EId);
657 E.disconnect();
658 FreeEdgeIds.push_back(EId);
659 Edges[EId].invalidate();
660 }
661
662 /// Remove all nodes and edges from the graph.
663 void clear() {
664 Nodes.clear();
665 FreeNodeIds.clear();
666 Edges.clear();
667 FreeEdgeIds.clear();
668 }
669 };
670
671} // end namespace PBQP
672} // end namespace llvm
673
674#endif // LLVM_CODEGEN_PBQP_GRAPH_H

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/CodeGen/PBQP/CostAllocator.h

1//===- CostAllocator.h - PBQP Cost Allocator --------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Defines classes conforming to the PBQP cost value manager concept.
10//
11// Cost value managers are memory managers for PBQP cost values (vectors and
12// matrices). Since PBQP graphs can grow very large (E.g. hundreds of thousands
13// of edges on the largest function in SPEC2006).
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
18#define LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
19
20#include "llvm/ADT/DenseSet.h"
21#include <algorithm>
22#include <cstdint>
23#include <memory>
24
25namespace llvm {
26namespace PBQP {
27
28template <typename ValueT> class ValuePool {
29public:
30 using PoolRef = std::shared_ptr<const ValueT>;
31
32private:
33 class PoolEntry : public std::enable_shared_from_this<PoolEntry> {
34 public:
35 template <typename ValueKeyT>
36 PoolEntry(ValuePool &Pool, ValueKeyT Value)
37 : Pool(Pool), Value(std::move(Value)) {}
14
Calling constructor for 'MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>'
38
39 ~PoolEntry() { Pool.removeEntry(this); }
40
41 const ValueT &getValue() const { return Value; }
42
43 private:
44 ValuePool &Pool;
45 ValueT Value;
46 };
47
48 class PoolEntryDSInfo {
49 public:
50 static inline PoolEntry *getEmptyKey() { return nullptr; }
51
52 static inline PoolEntry *getTombstoneKey() {
53 return reinterpret_cast<PoolEntry *>(static_cast<uintptr_t>(1));
54 }
55
56 template <typename ValueKeyT>
57 static unsigned getHashValue(const ValueKeyT &C) {
58 return hash_value(C);
59 }
60
61 static unsigned getHashValue(PoolEntry *P) {
62 return getHashValue(P->getValue());
63 }
64
65 static unsigned getHashValue(const PoolEntry *P) {
66 return getHashValue(P->getValue());
67 }
68
69 template <typename ValueKeyT1, typename ValueKeyT2>
70 static bool isEqual(const ValueKeyT1 &C1, const ValueKeyT2 &C2) {
71 return C1 == C2;
72 }
73
74 template <typename ValueKeyT>
75 static bool isEqual(const ValueKeyT &C, PoolEntry *P) {
76 if (P == getEmptyKey() || P == getTombstoneKey())
77 return false;
78 return isEqual(C, P->getValue());
79 }
80
81 static bool isEqual(PoolEntry *P1, PoolEntry *P2) {
82 if (P1 == getEmptyKey() || P1 == getTombstoneKey())
83 return P1 == P2;
84 return isEqual(P1->getValue(), P2);
85 }
86 };
87
88 using EntrySetT = DenseSet<PoolEntry *, PoolEntryDSInfo>;
89
90 EntrySetT EntrySet;
91
92 void removeEntry(PoolEntry *P) { EntrySet.erase(P); }
93
94public:
95 template <typename ValueKeyT> PoolRef getValue(ValueKeyT ValueKey) {
96 typename EntrySetT::iterator I = EntrySet.find_as(ValueKey);
97
98 if (I != EntrySet.end())
9
Taking false branch
99 return PoolRef((*I)->shared_from_this(), &(*I)->getValue());
100
101 auto P = std::make_shared<PoolEntry>(*this, std::move(ValueKey));
10
Calling 'make_shared<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry, llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>> &, llvm::PBQP::Matrix, void>'
102 EntrySet.insert(P.get());
103 return PoolRef(std::move(P), &P->getValue());
104 }
105};
106
107template <typename VectorT, typename MatrixT> class PoolCostAllocator {
108private:
109 using VectorCostPool = ValuePool<VectorT>;
110 using MatrixCostPool = ValuePool<MatrixT>;
111
112public:
113 using Vector = VectorT;
114 using Matrix = MatrixT;
115 using VectorPtr = typename VectorCostPool::PoolRef;
116 using MatrixPtr = typename MatrixCostPool::PoolRef;
117
118 template <typename VectorKeyT> VectorPtr getVector(VectorKeyT v) {
119 return VectorPool.getValue(std::move(v));
120 }
121
122 template <typename MatrixKeyT> MatrixPtr getMatrix(MatrixKeyT m) {
123 return MatrixPool.getValue(std::move(m));
8
Calling 'ValuePool::getValue'
124 }
125
126private:
127 VectorCostPool VectorPool;
128 MatrixCostPool MatrixPool;
129};
130
131} // end namespace PBQP
132} // end namespace llvm
133
134#endif // LLVM_CODEGEN_PBQP_COSTALLOCATOR_H

/usr/include/c++/v1/__memory/shared_ptr.h

1// -*- C++ -*-
2//===----------------------------------------------------------------------===//
3//
4// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5// See https://llvm.org/LICENSE.txt for license information.
6// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef _LIBCPP___MEMORY_SHARED_PTR_H
11#define _LIBCPP___MEMORY_SHARED_PTR_H
12
13#include <__availability>
14#include <__config>
15#include <__functional_base>
16#include <__functional/binary_function.h>
17#include <__functional/operations.h>
18#include <__functional/reference_wrapper.h>
19#include <__memory/addressof.h>
20#include <__memory/allocation_guard.h>
21#include <__memory/allocator_traits.h>
22#include <__memory/allocator.h>
23#include <__memory/compressed_pair.h>
24#include <__memory/pointer_traits.h>
25#include <__memory/unique_ptr.h>
26#include <__utility/forward.h>
27#include <cstddef>
28#include <cstdlib> // abort
29#include <iosfwd>
30#include <stdexcept>
31#include <typeinfo>
32#include <type_traits>
33#include <utility>
34#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
35# include <atomic>
36#endif
37
38#if _LIBCPP_STD_VER14 <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR)
39# include <__memory/auto_ptr.h>
40#endif
41
42#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
43#pragma GCC system_header
44#endif
45
46_LIBCPP_PUSH_MACROSpush_macro("min") push_macro("max")
47#include <__undef_macros>
48
49_LIBCPP_BEGIN_NAMESPACE_STDnamespace std { inline namespace __1 {
50
51template <class _Alloc>
52class __allocator_destructor
53{
54 typedef _LIBCPP_NODEBUG_TYPE__attribute__((nodebug)) allocator_traits<_Alloc> __alloc_traits;
55public:
56 typedef _LIBCPP_NODEBUG_TYPE__attribute__((nodebug)) typename __alloc_traits::pointer pointer;
57 typedef _LIBCPP_NODEBUG_TYPE__attribute__((nodebug)) typename __alloc_traits::size_type size_type;
58private:
59 _Alloc& __alloc_;
60 size_type __s_;
61public:
62 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
__allocator_destructor(_Alloc& __a, size_type __s)
63 _NOEXCEPTnoexcept
64 : __alloc_(__a), __s_(__s) {}
65 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
66 void operator()(pointer __p) _NOEXCEPTnoexcept
67 {__alloc_traits::deallocate(__alloc_, __p, __s_);}
68};
69
70// NOTE: Relaxed and acq/rel atomics (for increment and decrement respectively)
71// should be sufficient for thread safety.
72// See https://llvm.org/PR22803
73#if defined(__clang__1) && __has_builtin(__atomic_add_fetch)1 \
74 && defined(__ATOMIC_RELAXED0) \
75 && defined(__ATOMIC_ACQ_REL4)
76# define _LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT
77#elif defined(_LIBCPP_COMPILER_GCC)
78# define _LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT
79#endif
80
81template <class _ValueType>
82inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
83_ValueType __libcpp_relaxed_load(_ValueType const* __value) {
84#if !defined(_LIBCPP_HAS_NO_THREADS) && \
85 defined(__ATOMIC_RELAXED0) && \
86 (__has_builtin(__atomic_load_n)1 || defined(_LIBCPP_COMPILER_GCC))
87 return __atomic_load_n(__value, __ATOMIC_RELAXED0);
88#else
89 return *__value;
90#endif
91}
92
93template <class _ValueType>
94inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
95_ValueType __libcpp_acquire_load(_ValueType const* __value) {
96#if !defined(_LIBCPP_HAS_NO_THREADS) && \
97 defined(__ATOMIC_ACQUIRE2) && \
98 (__has_builtin(__atomic_load_n)1 || defined(_LIBCPP_COMPILER_GCC))
99 return __atomic_load_n(__value, __ATOMIC_ACQUIRE2);
100#else
101 return *__value;
102#endif
103}
104
105template <class _Tp>
106inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
_Tp
107__libcpp_atomic_refcount_increment(_Tp& __t) _NOEXCEPTnoexcept
108{
109#if defined(_LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT) && !defined(_LIBCPP_HAS_NO_THREADS)
110 return __atomic_add_fetch(&__t, 1, __ATOMIC_RELAXED0);
111#else
112 return __t += 1;
113#endif
114}
115
116template <class _Tp>
117inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
_Tp
118__libcpp_atomic_refcount_decrement(_Tp& __t) _NOEXCEPTnoexcept
119{
120#if defined(_LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT) && !defined(_LIBCPP_HAS_NO_THREADS)
121 return __atomic_add_fetch(&__t, -1, __ATOMIC_ACQ_REL4);
122#else
123 return __t -= 1;
124#endif
125}
126
127class _LIBCPP_EXCEPTION_ABI__attribute__ ((__visibility__("default"))) bad_weak_ptr
128 : public std::exception
129{
130public:
131 bad_weak_ptr() _NOEXCEPTnoexcept = default;
132 bad_weak_ptr(const bad_weak_ptr&) _NOEXCEPTnoexcept = default;
133 virtual ~bad_weak_ptr() _NOEXCEPTnoexcept;
134 virtual const char* what() const _NOEXCEPTnoexcept;
135};
136
137_LIBCPP_NORETURN[[noreturn]] inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
138void __throw_bad_weak_ptr()
139{
140#ifndef _LIBCPP_NO_EXCEPTIONS
141 throw bad_weak_ptr();
142#else
143 _VSTDstd::__1::abort();
144#endif
145}
146
147template<class _Tp> class _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) weak_ptr;
148
149class _LIBCPP_TYPE_VIS__attribute__ ((__visibility__("default"))) __shared_count
150{
151 __shared_count(const __shared_count&);
152 __shared_count& operator=(const __shared_count&);
153
154protected:
155 long __shared_owners_;
156 virtual ~__shared_count();
157private:
158 virtual void __on_zero_shared() _NOEXCEPTnoexcept = 0;
159
160public:
161 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
162 explicit __shared_count(long __refs = 0) _NOEXCEPTnoexcept
163 : __shared_owners_(__refs) {}
164
165#if defined(_LIBCPP_BUILDING_LIBRARY) && \
166 defined(_LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS)
167 void __add_shared() _NOEXCEPTnoexcept;
168 bool __release_shared() _NOEXCEPTnoexcept;
169#else
170 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
171 void __add_shared() _NOEXCEPTnoexcept {
172 __libcpp_atomic_refcount_increment(__shared_owners_);
173 }
174 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
175 bool __release_shared() _NOEXCEPTnoexcept {
176 if (__libcpp_atomic_refcount_decrement(__shared_owners_) == -1) {
177 __on_zero_shared();
178 return true;
179 }
180 return false;
181 }
182#endif
183 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
184 long use_count() const _NOEXCEPTnoexcept {
185 return __libcpp_relaxed_load(&__shared_owners_) + 1;
186 }
187};
188
189class _LIBCPP_TYPE_VIS__attribute__ ((__visibility__("default"))) __shared_weak_count
190 : private __shared_count
191{
192 long __shared_weak_owners_;
193
194public:
195 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
196 explicit __shared_weak_count(long __refs = 0) _NOEXCEPTnoexcept
197 : __shared_count(__refs),
198 __shared_weak_owners_(__refs) {}
199protected:
200 virtual ~__shared_weak_count();
201
202public:
203#if defined(_LIBCPP_BUILDING_LIBRARY) && \
204 defined(_LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS)
205 void __add_shared() _NOEXCEPTnoexcept;
206 void __add_weak() _NOEXCEPTnoexcept;
207 void __release_shared() _NOEXCEPTnoexcept;
208#else
209 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
210 void __add_shared() _NOEXCEPTnoexcept {
211 __shared_count::__add_shared();
212 }
213 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
214 void __add_weak() _NOEXCEPTnoexcept {
215 __libcpp_atomic_refcount_increment(__shared_weak_owners_);
216 }
217 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
218 void __release_shared() _NOEXCEPTnoexcept {
219 if (__shared_count::__release_shared())
220 __release_weak();
221 }
222#endif
223 void __release_weak() _NOEXCEPTnoexcept;
224 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
225 long use_count() const _NOEXCEPTnoexcept {return __shared_count::use_count();}
226 __shared_weak_count* lock() _NOEXCEPTnoexcept;
227
228 virtual const void* __get_deleter(const type_info&) const _NOEXCEPTnoexcept;
229private:
230 virtual void __on_zero_shared_weak() _NOEXCEPTnoexcept = 0;
231};
232
233template <class _Tp, class _Dp, class _Alloc>
234class __shared_ptr_pointer
235 : public __shared_weak_count
236{
237 __compressed_pair<__compressed_pair<_Tp, _Dp>, _Alloc> __data_;
238public:
239 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
240 __shared_ptr_pointer(_Tp __p, _Dp __d, _Alloc __a)
241 : __data_(__compressed_pair<_Tp, _Dp>(__p, _VSTDstd::__1::move(__d)), _VSTDstd::__1::move(__a)) {}
242
243#ifndef _LIBCPP_NO_RTTI
244 virtual const void* __get_deleter(const type_info&) const _NOEXCEPTnoexcept;
245#endif
246
247private:
248 virtual void __on_zero_shared() _NOEXCEPTnoexcept;
249 virtual void __on_zero_shared_weak() _NOEXCEPTnoexcept;
250};
251
252#ifndef _LIBCPP_NO_RTTI
253
254template <class _Tp, class _Dp, class _Alloc>
255const void*
256__shared_ptr_pointer<_Tp, _Dp, _Alloc>::__get_deleter(const type_info& __t) const _NOEXCEPTnoexcept
257{
258 return __t == typeid(_Dp) ? _VSTDstd::__1::addressof(__data_.first().second()) : nullptr;
259}
260
261#endif // _LIBCPP_NO_RTTI
262
263template <class _Tp, class _Dp, class _Alloc>
264void
265__shared_ptr_pointer<_Tp, _Dp, _Alloc>::__on_zero_shared() _NOEXCEPTnoexcept
266{
267 __data_.first().second()(__data_.first().first());
268 __data_.first().second().~_Dp();
269}
270
271template <class _Tp, class _Dp, class _Alloc>
272void
273__shared_ptr_pointer<_Tp, _Dp, _Alloc>::__on_zero_shared_weak() _NOEXCEPTnoexcept
274{
275 typedef typename __allocator_traits_rebind<_Alloc, __shared_ptr_pointer>::type _Al;
276 typedef allocator_traits<_Al> _ATraits;
277 typedef pointer_traits<typename _ATraits::pointer> _PTraits;
278
279 _Al __a(__data_.second());
280 __data_.second().~_Alloc();
281 __a.deallocate(_PTraits::pointer_to(*this), 1);
282}
283
284template <class _Tp, class _Alloc>
285struct __shared_ptr_emplace
286 : __shared_weak_count
287{
288 template<class ..._Args>
289 _LIBCPP_HIDE_FROM_ABI__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
290 explicit __shared_ptr_emplace(_Alloc __a, _Args&& ...__args)
291 : __storage_(_VSTDstd::__1::move(__a))
292 {
293#if _LIBCPP_STD_VER14 > 17
294 using _TpAlloc = typename __allocator_traits_rebind<_Alloc, _Tp>::type;
295 _TpAlloc __tmp(*__get_alloc());
296 allocator_traits<_TpAlloc>::construct(__tmp, __get_elem(), _VSTDstd::__1::forward<_Args>(__args)...);
297#else
298 ::new ((void*)__get_elem()) _Tp(_VSTDstd::__1::forward<_Args>(__args)...);
13
Calling constructor for 'PoolEntry'
299#endif
300 }
301
302 _LIBCPP_HIDE_FROM_ABI__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
303 _Alloc* __get_alloc() _NOEXCEPTnoexcept { return __storage_.__get_alloc(); }
304
305 _LIBCPP_HIDE_FROM_ABI__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
306 _Tp* __get_elem() _NOEXCEPTnoexcept { return __storage_.__get_elem(); }
307
308private:
309 virtual void __on_zero_shared() _NOEXCEPTnoexcept {
310#if _LIBCPP_STD_VER14 > 17
311 using _TpAlloc = typename __allocator_traits_rebind<_Alloc, _Tp>::type;
312 _TpAlloc __tmp(*__get_alloc());
313 allocator_traits<_TpAlloc>::destroy(__tmp, __get_elem());
314#else
315 __get_elem()->~_Tp();
316#endif
317 }
318
319 virtual void __on_zero_shared_weak() _NOEXCEPTnoexcept {
320 using _ControlBlockAlloc = typename __allocator_traits_rebind<_Alloc, __shared_ptr_emplace>::type;
321 using _ControlBlockPointer = typename allocator_traits<_ControlBlockAlloc>::pointer;
322 _ControlBlockAlloc __tmp(*__get_alloc());
323 __storage_.~_Storage();
324 allocator_traits<_ControlBlockAlloc>::deallocate(__tmp,
325 pointer_traits<_ControlBlockPointer>::pointer_to(*this), 1);
326 }
327
328 // This class implements the control block for non-array shared pointers created
329 // through `std::allocate_shared` and `std::make_shared`.
330 //
331 // In previous versions of the library, we used a compressed pair to store
332 // both the _Alloc and the _Tp. This implies using EBO, which is incompatible
333 // with Allocator construction for _Tp. To allow implementing P0674 in C++20,
334 // we now use a properly aligned char buffer while making sure that we maintain
335 // the same layout that we had when we used a compressed pair.
336 using _CompressedPair = __compressed_pair<_Alloc, _Tp>;
337 struct _ALIGNAS_TYPE(_CompressedPair)alignas(_CompressedPair) _Storage {
338 char __blob_[sizeof(_CompressedPair)];
339
340 _LIBCPP_HIDE_FROM_ABI__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
explicit _Storage(_Alloc&& __a) {
341 ::new ((void*)__get_alloc()) _Alloc(_VSTDstd::__1::move(__a));
342 }
343 _LIBCPP_HIDE_FROM_ABI__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
~_Storage() {
344 __get_alloc()->~_Alloc();
345 }
346 _Alloc* __get_alloc() _NOEXCEPTnoexcept {
347 _CompressedPair *__as_pair = reinterpret_cast<_CompressedPair*>(__blob_);
348 typename _CompressedPair::_Base1* __first = _CompressedPair::__get_first_base(__as_pair);
349 _Alloc *__alloc = reinterpret_cast<_Alloc*>(__first);
350 return __alloc;
351 }
352 _LIBCPP_NO_CFI__attribute__((__no_sanitize__("cfi"))) _Tp* __get_elem() _NOEXCEPTnoexcept {
353 _CompressedPair *__as_pair = reinterpret_cast<_CompressedPair*>(__blob_);
354 typename _CompressedPair::_Base2* __second = _CompressedPair::__get_second_base(__as_pair);
355 _Tp *__elem = reinterpret_cast<_Tp*>(__second);
356 return __elem;
357 }
358 };
359
360 static_assert(_LIBCPP_ALIGNOF(_Storage)alignof(_Storage) == _LIBCPP_ALIGNOF(_CompressedPair)alignof(_CompressedPair), "");
361 static_assert(sizeof(_Storage) == sizeof(_CompressedPair), "");
362 _Storage __storage_;
363};
364
365struct __shared_ptr_dummy_rebind_allocator_type;
366template <>
367class _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) allocator<__shared_ptr_dummy_rebind_allocator_type>
368{
369public:
370 template <class _Other>
371 struct rebind
372 {
373 typedef allocator<_Other> other;
374 };
375};
376
377template<class _Tp> class _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) enable_shared_from_this;
378
379template<class _Tp, class _Up>
380struct __compatible_with
381#if _LIBCPP_STD_VER14 > 14
382 : is_convertible<remove_extent_t<_Tp>*, remove_extent_t<_Up>*> {};
383#else
384 : is_convertible<_Tp*, _Up*> {};
385#endif // _LIBCPP_STD_VER > 14
386
387template <class _Ptr, class = void>
388struct __is_deletable : false_type { };
389template <class _Ptr>
390struct __is_deletable<_Ptr, decltype(delete declval<_Ptr>())> : true_type { };
391
392template <class _Ptr, class = void>
393struct __is_array_deletable : false_type { };
394template <class _Ptr>
395struct __is_array_deletable<_Ptr, decltype(delete[] declval<_Ptr>())> : true_type { };
396
397template <class _Dp, class _Pt,
398 class = decltype(declval<_Dp>()(declval<_Pt>()))>
399static true_type __well_formed_deleter_test(int);
400
401template <class, class>
402static false_type __well_formed_deleter_test(...);
403
404template <class _Dp, class _Pt>
405struct __well_formed_deleter : decltype(__well_formed_deleter_test<_Dp, _Pt>(0)) {};
406
407template<class _Dp, class _Tp, class _Yp>
408struct __shared_ptr_deleter_ctor_reqs
409{
410 static const bool value = __compatible_with<_Tp, _Yp>::value &&
411 is_move_constructible<_Dp>::value &&
412 __well_formed_deleter<_Dp, _Tp*>::value;
413};
414
415#if defined(_LIBCPP_ABI_ENABLE_SHARED_PTR_TRIVIAL_ABI)
416# define _LIBCPP_SHARED_PTR_TRIVIAL_ABI __attribute__((trivial_abi))
417#else
418# define _LIBCPP_SHARED_PTR_TRIVIAL_ABI
419#endif
420
421template<class _Tp>
422class _LIBCPP_SHARED_PTR_TRIVIAL_ABI _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) shared_ptr
423{
424public:
425#if _LIBCPP_STD_VER14 > 14
426 typedef weak_ptr<_Tp> weak_type;
427 typedef remove_extent_t<_Tp> element_type;
428#else
429 typedef _Tp element_type;
430#endif
431
432private:
433 element_type* __ptr_;
434 __shared_weak_count* __cntrl_;
435
436 struct __nat {int __for_bool_;};
437public:
438 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
439 _LIBCPP_CONSTEXPRconstexpr shared_ptr() _NOEXCEPTnoexcept;
440 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
441 _LIBCPP_CONSTEXPRconstexpr shared_ptr(nullptr_t) _NOEXCEPTnoexcept;
442
443 template<class _Yp, class = _EnableIf<
444 _And<
445 __compatible_with<_Yp, _Tp>
446 // In C++03 we get errors when trying to do SFINAE with the
447 // delete operator, so we always pretend that it's deletable.
448 // The same happens on GCC.
449#if !defined(_LIBCPP_CXX03_LANG) && !defined(_LIBCPP_COMPILER_GCC)
450 , _If<is_array<_Tp>::value, __is_array_deletable<_Yp*>, __is_deletable<_Yp*> >
451#endif
452 >::value
453 > >
454 explicit shared_ptr(_Yp* __p) : __ptr_(__p) {
455 unique_ptr<_Yp> __hold(__p);
456 typedef typename __shared_ptr_default_allocator<_Yp>::type _AllocT;
457 typedef __shared_ptr_pointer<_Yp*, __shared_ptr_default_delete<_Tp, _Yp>, _AllocT > _CntrlBlk;
458 __cntrl_ = new _CntrlBlk(__p, __shared_ptr_default_delete<_Tp, _Yp>(), _AllocT());
459 __hold.release();
460 __enable_weak_this(__p, __p);
461 }
462
463 template<class _Yp, class _Dp>
464 shared_ptr(_Yp* __p, _Dp __d,
465 typename enable_if<__shared_ptr_deleter_ctor_reqs<_Dp, _Yp, element_type>::value, __nat>::type = __nat());
466 template<class _Yp, class _Dp, class _Alloc>
467 shared_ptr(_Yp* __p, _Dp __d, _Alloc __a,
468 typename enable_if<__shared_ptr_deleter_ctor_reqs<_Dp, _Yp, element_type>::value, __nat>::type = __nat());
469 template <class _Dp> shared_ptr(nullptr_t __p, _Dp __d);
470 template <class _Dp, class _Alloc> shared_ptr(nullptr_t __p, _Dp __d, _Alloc __a);
471 template<class _Yp> _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
shared_ptr(const shared_ptr<_Yp>& __r, element_type* __p) _NOEXCEPTnoexcept;
472 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
473 shared_ptr(const shared_ptr& __r) _NOEXCEPTnoexcept;
474 template<class _Yp>
475 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
476 shared_ptr(const shared_ptr<_Yp>& __r,
477 typename enable_if<__compatible_with<_Yp, element_type>::value, __nat>::type = __nat())
478 _NOEXCEPTnoexcept;
479 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
480 shared_ptr(shared_ptr&& __r) _NOEXCEPTnoexcept;
481 template<class _Yp> _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
shared_ptr(shared_ptr<_Yp>&& __r,
482 typename enable_if<__compatible_with<_Yp, element_type>::value, __nat>::type = __nat())
483 _NOEXCEPTnoexcept;
484 template<class _Yp> explicit shared_ptr(const weak_ptr<_Yp>& __r,
485 typename enable_if<is_convertible<_Yp*, element_type*>::value, __nat>::type= __nat());
486#if _LIBCPP_STD_VER14 <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR)
487 template<class _Yp>
488 shared_ptr(auto_ptr<_Yp>&& __r,
489 typename enable_if<is_convertible<_Yp*, element_type*>::value, __nat>::type = __nat());
490#endif
491 template <class _Yp, class _Dp>
492 shared_ptr(unique_ptr<_Yp, _Dp>&&,
493 typename enable_if
494 <
495 !is_lvalue_reference<_Dp>::value &&
496 is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value,
497 __nat
498 >::type = __nat());
499 template <class _Yp, class _Dp>
500 shared_ptr(unique_ptr<_Yp, _Dp>&&,
501 typename enable_if
502 <
503 is_lvalue_reference<_Dp>::value &&
504 is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value,
505 __nat
506 >::type = __nat());
507
508 ~shared_ptr();
509
510 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
511 shared_ptr& operator=(const shared_ptr& __r) _NOEXCEPTnoexcept;
512 template<class _Yp>
513 typename enable_if
514 <
515 __compatible_with<_Yp, element_type>::value,
516 shared_ptr&
517 >::type
518 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
519 operator=(const shared_ptr<_Yp>& __r) _NOEXCEPTnoexcept;
520 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
521 shared_ptr& operator=(shared_ptr&& __r) _NOEXCEPTnoexcept;
522 template<class _Yp>
523 typename enable_if
524 <
525 __compatible_with<_Yp, element_type>::value,
526 shared_ptr&
527 >::type
528 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
529 operator=(shared_ptr<_Yp>&& __r);
530#if _LIBCPP_STD_VER14 <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR)
531 template<class _Yp>
532 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
533 typename enable_if
534 <
535 !is_array<_Yp>::value &&
536 is_convertible<_Yp*, element_type*>::value,
537 shared_ptr
538 >::type&
539 operator=(auto_ptr<_Yp>&& __r);
540#endif
541 template <class _Yp, class _Dp>
542 typename enable_if
543 <
544 is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value,
545 shared_ptr&
546 >::type
547 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
548 operator=(unique_ptr<_Yp, _Dp>&& __r);
549
550 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
551 void swap(shared_ptr& __r) _NOEXCEPTnoexcept;
552 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
553 void reset() _NOEXCEPTnoexcept;
554 template<class _Yp>
555 typename enable_if
556 <
557 __compatible_with<_Yp, element_type>::value,
558 void
559 >::type
560 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
561 reset(_Yp* __p);
562 template<class _Yp, class _Dp>
563 typename enable_if
564 <
565 __compatible_with<_Yp, element_type>::value,
566 void
567 >::type
568 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
569 reset(_Yp* __p, _Dp __d);
570 template<class _Yp, class _Dp, class _Alloc>
571 typename enable_if
572 <
573 __compatible_with<_Yp, element_type>::value,
574 void
575 >::type
576 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
577 reset(_Yp* __p, _Dp __d, _Alloc __a);
578
579 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
580 element_type* get() const _NOEXCEPTnoexcept {return __ptr_;}
581 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
582 typename add_lvalue_reference<element_type>::type operator*() const _NOEXCEPTnoexcept
583 {return *__ptr_;}
584 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
585 element_type* operator->() const _NOEXCEPTnoexcept
586 {
587 static_assert(!is_array<_Tp>::value,
588 "std::shared_ptr<T>::operator-> is only valid when T is not an array type.");
589 return __ptr_;
590 }
591 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
592 long use_count() const _NOEXCEPTnoexcept {return __cntrl_ ? __cntrl_->use_count() : 0;}
593 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
594 bool unique() const _NOEXCEPTnoexcept {return use_count() == 1;}
595 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
596 explicit operator bool() const _NOEXCEPTnoexcept {return get() != nullptr;}
597 template <class _Up>
598 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
599 bool owner_before(shared_ptr<_Up> const& __p) const _NOEXCEPTnoexcept
600 {return __cntrl_ < __p.__cntrl_;}
601 template <class _Up>
602 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
603 bool owner_before(weak_ptr<_Up> const& __p) const _NOEXCEPTnoexcept
604 {return __cntrl_ < __p.__cntrl_;}
605 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
606 bool
607 __owner_equivalent(const shared_ptr& __p) const
608 {return __cntrl_ == __p.__cntrl_;}
609
610#if _LIBCPP_STD_VER14 > 14
611 typename add_lvalue_reference<element_type>::type
612 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
613 operator[](ptrdiff_t __i) const
614 {
615 static_assert(is_array<_Tp>::value,
616 "std::shared_ptr<T>::operator[] is only valid when T is an array type.");
617 return __ptr_[__i];
618 }
619#endif
620
621#ifndef _LIBCPP_NO_RTTI
622 template <class _Dp>
623 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
624 _Dp* __get_deleter() const _NOEXCEPTnoexcept
625 {return static_cast<_Dp*>(__cntrl_
626 ? const_cast<void *>(__cntrl_->__get_deleter(typeid(_Dp)))
627 : nullptr);}
628#endif // _LIBCPP_NO_RTTI
629
630 template<class _Yp, class _CntrlBlk>
631 static shared_ptr<_Tp>
632 __create_with_control_block(_Yp* __p, _CntrlBlk* __cntrl) _NOEXCEPTnoexcept
633 {
634 shared_ptr<_Tp> __r;
635 __r.__ptr_ = __p;
636 __r.__cntrl_ = __cntrl;
637 __r.__enable_weak_this(__r.__ptr_, __r.__ptr_);
638 return __r;
639 }
640
641private:
642 template <class _Yp, bool = is_function<_Yp>::value>
643 struct __shared_ptr_default_allocator
644 {
645 typedef allocator<_Yp> type;
646 };
647
648 template <class _Yp>
649 struct __shared_ptr_default_allocator<_Yp, true>
650 {
651 typedef allocator<__shared_ptr_dummy_rebind_allocator_type> type;
652 };
653
654 template <class _Yp, class _OrigPtr>
655 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
656 typename enable_if<is_convertible<_OrigPtr*,
657 const enable_shared_from_this<_Yp>*
658 >::value,
659 void>::type
660 __enable_weak_this(const enable_shared_from_this<_Yp>* __e,
661 _OrigPtr* __ptr) _NOEXCEPTnoexcept
662 {
663 typedef typename remove_cv<_Yp>::type _RawYp;
664 if (__e && __e->__weak_this_.expired())
665 {
666 __e->__weak_this_ = shared_ptr<_RawYp>(*this,
667 const_cast<_RawYp*>(static_cast<const _Yp*>(__ptr)));
668 }
669 }
670
671 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
void __enable_weak_this(...) _NOEXCEPTnoexcept {}
672
673 template <class, class _Yp>
674 struct __shared_ptr_default_delete
675 : default_delete<_Yp> {};
676
677 template <class _Yp, class _Un, size_t _Sz>
678 struct __shared_ptr_default_delete<_Yp[_Sz], _Un>
679 : default_delete<_Yp[]> {};
680
681 template <class _Yp, class _Un>
682 struct __shared_ptr_default_delete<_Yp[], _Un>
683 : default_delete<_Yp[]> {};
684
685 template <class _Up> friend class _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) shared_ptr;
686 template <class _Up> friend class _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) weak_ptr;
687};
688
689#ifndef _LIBCPP_HAS_NO_DEDUCTION_GUIDES
690template<class _Tp>
691shared_ptr(weak_ptr<_Tp>) -> shared_ptr<_Tp>;
692template<class _Tp, class _Dp>
693shared_ptr(unique_ptr<_Tp, _Dp>) -> shared_ptr<_Tp>;
694#endif
695
696template<class _Tp>
697inline
698_LIBCPP_CONSTEXPRconstexpr
699shared_ptr<_Tp>::shared_ptr() _NOEXCEPTnoexcept
700 : __ptr_(nullptr),
701 __cntrl_(nullptr)
702{
703}
704
705template<class _Tp>
706inline
707_LIBCPP_CONSTEXPRconstexpr
708shared_ptr<_Tp>::shared_ptr(nullptr_t) _NOEXCEPTnoexcept
709 : __ptr_(nullptr),
710 __cntrl_(nullptr)
711{
712}
713
714template<class _Tp>
715template<class _Yp, class _Dp>
716shared_ptr<_Tp>::shared_ptr(_Yp* __p, _Dp __d,
717 typename enable_if<__shared_ptr_deleter_ctor_reqs<_Dp, _Yp, element_type>::value, __nat>::type)
718 : __ptr_(__p)
719{
720#ifndef _LIBCPP_NO_EXCEPTIONS
721 try
722 {
723#endif // _LIBCPP_NO_EXCEPTIONS
724 typedef typename __shared_ptr_default_allocator<_Yp>::type _AllocT;
725 typedef __shared_ptr_pointer<_Yp*, _Dp, _AllocT > _CntrlBlk;
726#ifndef _LIBCPP_CXX03_LANG
727 __cntrl_ = new _CntrlBlk(__p, _VSTDstd::__1::move(__d), _AllocT());
728#else
729 __cntrl_ = new _CntrlBlk(__p, __d, _AllocT());
730#endif // not _LIBCPP_CXX03_LANG
731 __enable_weak_this(__p, __p);
732#ifndef _LIBCPP_NO_EXCEPTIONS
733 }
734 catch (...)
735 {
736 __d(__p);
737 throw;
738 }
739#endif // _LIBCPP_NO_EXCEPTIONS
740}
741
742template<class _Tp>
743template<class _Dp>
744shared_ptr<_Tp>::shared_ptr(nullptr_t __p, _Dp __d)
745 : __ptr_(nullptr)
746{
747#ifndef _LIBCPP_NO_EXCEPTIONS
748 try
749 {
750#endif // _LIBCPP_NO_EXCEPTIONS
751 typedef typename __shared_ptr_default_allocator<_Tp>::type _AllocT;
752 typedef __shared_ptr_pointer<nullptr_t, _Dp, _AllocT > _CntrlBlk;
753#ifndef _LIBCPP_CXX03_LANG
754 __cntrl_ = new _CntrlBlk(__p, _VSTDstd::__1::move(__d), _AllocT());
755#else
756 __cntrl_ = new _CntrlBlk(__p, __d, _AllocT());
757#endif // not _LIBCPP_CXX03_LANG
758#ifndef _LIBCPP_NO_EXCEPTIONS
759 }
760 catch (...)
761 {
762 __d(__p);
763 throw;
764 }
765#endif // _LIBCPP_NO_EXCEPTIONS
766}
767
768template<class _Tp>
769template<class _Yp, class _Dp, class _Alloc>
770shared_ptr<_Tp>::shared_ptr(_Yp* __p, _Dp __d, _Alloc __a,
771 typename enable_if<__shared_ptr_deleter_ctor_reqs<_Dp, _Yp, element_type>::value, __nat>::type)
772 : __ptr_(__p)
773{
774#ifndef _LIBCPP_NO_EXCEPTIONS
775 try
776 {
777#endif // _LIBCPP_NO_EXCEPTIONS
778 typedef __shared_ptr_pointer<_Yp*, _Dp, _Alloc> _CntrlBlk;
779 typedef typename __allocator_traits_rebind<_Alloc, _CntrlBlk>::type _A2;
780 typedef __allocator_destructor<_A2> _D2;
781 _A2 __a2(__a);
782 unique_ptr<_CntrlBlk, _D2> __hold2(__a2.allocate(1), _D2(__a2, 1));
783 ::new ((void*)_VSTDstd::__1::addressof(*__hold2.get()))
784#ifndef _LIBCPP_CXX03_LANG
785 _CntrlBlk(__p, _VSTDstd::__1::move(__d), __a);
786#else
787 _CntrlBlk(__p, __d, __a);
788#endif // not _LIBCPP_CXX03_LANG
789 __cntrl_ = _VSTDstd::__1::addressof(*__hold2.release());
790 __enable_weak_this(__p, __p);
791#ifndef _LIBCPP_NO_EXCEPTIONS
792 }
793 catch (...)
794 {
795 __d(__p);
796 throw;
797 }
798#endif // _LIBCPP_NO_EXCEPTIONS
799}
800
801template<class _Tp>
802template<class _Dp, class _Alloc>
803shared_ptr<_Tp>::shared_ptr(nullptr_t __p, _Dp __d, _Alloc __a)
804 : __ptr_(nullptr)
805{
806#ifndef _LIBCPP_NO_EXCEPTIONS
807 try
808 {
809#endif // _LIBCPP_NO_EXCEPTIONS
810 typedef __shared_ptr_pointer<nullptr_t, _Dp, _Alloc> _CntrlBlk;
811 typedef typename __allocator_traits_rebind<_Alloc, _CntrlBlk>::type _A2;
812 typedef __allocator_destructor<_A2> _D2;
813 _A2 __a2(__a);
814 unique_ptr<_CntrlBlk, _D2> __hold2(__a2.allocate(1), _D2(__a2, 1));
815 ::new ((void*)_VSTDstd::__1::addressof(*__hold2.get()))
816#ifndef _LIBCPP_CXX03_LANG
817 _CntrlBlk(__p, _VSTDstd::__1::move(__d), __a);
818#else
819 _CntrlBlk(__p, __d, __a);
820#endif // not _LIBCPP_CXX03_LANG
821 __cntrl_ = _VSTDstd::__1::addressof(*__hold2.release());
822#ifndef _LIBCPP_NO_EXCEPTIONS
823 }
824 catch (...)
825 {
826 __d(__p);
827 throw;
828 }
829#endif // _LIBCPP_NO_EXCEPTIONS
830}
831
832template<class _Tp>
833template<class _Yp>
834inline
835shared_ptr<_Tp>::shared_ptr(const shared_ptr<_Yp>& __r, element_type *__p) _NOEXCEPTnoexcept
836 : __ptr_(__p),
837 __cntrl_(__r.__cntrl_)
838{
839 if (__cntrl_)
840 __cntrl_->__add_shared();
841}
842
843template<class _Tp>
844inline
845shared_ptr<_Tp>::shared_ptr(const shared_ptr& __r) _NOEXCEPTnoexcept
846 : __ptr_(__r.__ptr_),
847 __cntrl_(__r.__cntrl_)
848{
849 if (__cntrl_)
850 __cntrl_->__add_shared();
851}
852
853template<class _Tp>
854template<class _Yp>
855inline
856shared_ptr<_Tp>::shared_ptr(const shared_ptr<_Yp>& __r,
857 typename enable_if<__compatible_with<_Yp, element_type>::value, __nat>::type)
858 _NOEXCEPTnoexcept
859 : __ptr_(__r.__ptr_),
860 __cntrl_(__r.__cntrl_)
861{
862 if (__cntrl_)
863 __cntrl_->__add_shared();
864}
865
866template<class _Tp>
867inline
868shared_ptr<_Tp>::shared_ptr(shared_ptr&& __r) _NOEXCEPTnoexcept
869 : __ptr_(__r.__ptr_),
870 __cntrl_(__r.__cntrl_)
871{
872 __r.__ptr_ = nullptr;
873 __r.__cntrl_ = nullptr;
874}
875
876template<class _Tp>
877template<class _Yp>
878inline
879shared_ptr<_Tp>::shared_ptr(shared_ptr<_Yp>&& __r,
880 typename enable_if<__compatible_with<_Yp, element_type>::value, __nat>::type)
881 _NOEXCEPTnoexcept
882 : __ptr_(__r.__ptr_),
883 __cntrl_(__r.__cntrl_)
884{
885 __r.__ptr_ = nullptr;
886 __r.__cntrl_ = nullptr;
887}
888
889#if _LIBCPP_STD_VER14 <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR)
890template<class _Tp>
891template<class _Yp>
892shared_ptr<_Tp>::shared_ptr(auto_ptr<_Yp>&& __r,
893 typename enable_if<is_convertible<_Yp*, element_type*>::value, __nat>::type)
894 : __ptr_(__r.get())
895{
896 typedef __shared_ptr_pointer<_Yp*, default_delete<_Yp>, allocator<_Yp> > _CntrlBlk;
897 __cntrl_ = new _CntrlBlk(__r.get(), default_delete<_Yp>(), allocator<_Yp>());
898 __enable_weak_this(__r.get(), __r.get());
899 __r.release();
900}
901#endif
902
903template<class _Tp>
904template <class _Yp, class _Dp>
905shared_ptr<_Tp>::shared_ptr(unique_ptr<_Yp, _Dp>&& __r,
906 typename enable_if
907 <
908 !is_lvalue_reference<_Dp>::value &&
909 is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value,
910 __nat
911 >::type)
912 : __ptr_(__r.get())
913{
914#if _LIBCPP_STD_VER14 > 11
915 if (__ptr_ == nullptr)
916 __cntrl_ = nullptr;
917 else
918#endif
919 {
920 typedef typename __shared_ptr_default_allocator<_Yp>::type _AllocT;
921 typedef __shared_ptr_pointer<typename unique_ptr<_Yp, _Dp>::pointer, _Dp, _AllocT > _CntrlBlk;
922 __cntrl_ = new _CntrlBlk(__r.get(), __r.get_deleter(), _AllocT());
923 __enable_weak_this(__r.get(), __r.get());
924 }
925 __r.release();
926}
927
928template<class _Tp>
929template <class _Yp, class _Dp>
930shared_ptr<_Tp>::shared_ptr(unique_ptr<_Yp, _Dp>&& __r,
931 typename enable_if
932 <
933 is_lvalue_reference<_Dp>::value &&
934 is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value,
935 __nat
936 >::type)
937 : __ptr_(__r.get())
938{
939#if _LIBCPP_STD_VER14 > 11
940 if (__ptr_ == nullptr)
941 __cntrl_ = nullptr;
942 else
943#endif
944 {
945 typedef typename __shared_ptr_default_allocator<_Yp>::type _AllocT;
946 typedef __shared_ptr_pointer<typename unique_ptr<_Yp, _Dp>::pointer,
947 reference_wrapper<typename remove_reference<_Dp>::type>,
948 _AllocT > _CntrlBlk;
949 __cntrl_ = new _CntrlBlk(__r.get(), _VSTDstd::__1::ref(__r.get_deleter()), _AllocT());
950 __enable_weak_this(__r.get(), __r.get());
951 }
952 __r.release();
953}
954
955template<class _Tp>
956shared_ptr<_Tp>::~shared_ptr()
957{
958 if (__cntrl_)
959 __cntrl_->__release_shared();
960}
961
962template<class _Tp>
963inline
964shared_ptr<_Tp>&
965shared_ptr<_Tp>::operator=(const shared_ptr& __r) _NOEXCEPTnoexcept
966{
967 shared_ptr(__r).swap(*this);
968 return *this;
969}
970
971template<class _Tp>
972template<class _Yp>
973inline
974typename enable_if
975<
976 __compatible_with<_Yp, typename shared_ptr<_Tp>::element_type>::value,
977 shared_ptr<_Tp>&
978>::type
979shared_ptr<_Tp>::operator=(const shared_ptr<_Yp>& __r) _NOEXCEPTnoexcept
980{
981 shared_ptr(__r).swap(*this);
982 return *this;
983}
984
985template<class _Tp>
986inline
987shared_ptr<_Tp>&
988shared_ptr<_Tp>::operator=(shared_ptr&& __r) _NOEXCEPTnoexcept
989{
990 shared_ptr(_VSTDstd::__1::move(__r)).swap(*this);
991 return *this;
992}
993
994template<class _Tp>
995template<class _Yp>
996inline
997typename enable_if
998<
999 __compatible_with<_Yp, typename shared_ptr<_Tp>::element_type>::value,
1000 shared_ptr<_Tp>&
1001>::type
1002shared_ptr<_Tp>::operator=(shared_ptr<_Yp>&& __r)
1003{
1004 shared_ptr(_VSTDstd::__1::move(__r)).swap(*this);
1005 return *this;
1006}
1007
1008#if _LIBCPP_STD_VER14 <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR)
1009template<class _Tp>
1010template<class _Yp>
1011inline
1012typename enable_if
1013<
1014 !is_array<_Yp>::value &&
1015 is_convertible<_Yp*, typename shared_ptr<_Tp>::element_type*>::value,
1016 shared_ptr<_Tp>
1017>::type&
1018shared_ptr<_Tp>::operator=(auto_ptr<_Yp>&& __r)
1019{
1020 shared_ptr(_VSTDstd::__1::move(__r)).swap(*this);
1021 return *this;
1022}
1023#endif
1024
1025template<class _Tp>
1026template <class _Yp, class _Dp>
1027inline
1028typename enable_if
1029<
1030 is_convertible<typename unique_ptr<_Yp, _Dp>::pointer,
1031 typename shared_ptr<_Tp>::element_type*>::value,
1032 shared_ptr<_Tp>&
1033>::type
1034shared_ptr<_Tp>::operator=(unique_ptr<_Yp, _Dp>&& __r)
1035{
1036 shared_ptr(_VSTDstd::__1::move(__r)).swap(*this);
1037 return *this;
1038}
1039
1040template<class _Tp>
1041inline
1042void
1043shared_ptr<_Tp>::swap(shared_ptr& __r) _NOEXCEPTnoexcept
1044{
1045 _VSTDstd::__1::swap(__ptr_, __r.__ptr_);
1046 _VSTDstd::__1::swap(__cntrl_, __r.__cntrl_);
1047}
1048
1049template<class _Tp>
1050inline
1051void
1052shared_ptr<_Tp>::reset() _NOEXCEPTnoexcept
1053{
1054 shared_ptr().swap(*this);
1055}
1056
1057template<class _Tp>
1058template<class _Yp>
1059inline
1060typename enable_if
1061<
1062 __compatible_with<_Yp, typename shared_ptr<_Tp>::element_type>::value,
1063 void
1064>::type
1065shared_ptr<_Tp>::reset(_Yp* __p)
1066{
1067 shared_ptr(__p).swap(*this);
1068}
1069
1070template<class _Tp>
1071template<class _Yp, class _Dp>
1072inline
1073typename enable_if
1074<
1075 __compatible_with<_Yp, typename shared_ptr<_Tp>::element_type>::value,
1076 void
1077>::type
1078shared_ptr<_Tp>::reset(_Yp* __p, _Dp __d)
1079{
1080 shared_ptr(__p, __d).swap(*this);
1081}
1082
1083template<class _Tp>
1084template<class _Yp, class _Dp, class _Alloc>
1085inline
1086typename enable_if
1087<
1088 __compatible_with<_Yp, typename shared_ptr<_Tp>::element_type>::value,
1089 void
1090>::type
1091shared_ptr<_Tp>::reset(_Yp* __p, _Dp __d, _Alloc __a)
1092{
1093 shared_ptr(__p, __d, __a).swap(*this);
1094}
1095
1096//
1097// std::allocate_shared and std::make_shared
1098//
1099template<class _Tp, class _Alloc, class ..._Args, class = _EnableIf<!is_array<_Tp>::value> >
1100_LIBCPP_HIDE_FROM_ABI__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1101shared_ptr<_Tp> allocate_shared(const _Alloc& __a, _Args&& ...__args)
1102{
1103 using _ControlBlock = __shared_ptr_emplace<_Tp, _Alloc>;
1104 using _ControlBlockAllocator = typename __allocator_traits_rebind<_Alloc, _ControlBlock>::type;
1105 __allocation_guard<_ControlBlockAllocator> __guard(__a, 1);
1106 ::new ((void*)_VSTDstd::__1::addressof(*__guard.__get())) _ControlBlock(__a, _VSTDstd::__1::forward<_Args>(__args)...);
12
Calling constructor for '__shared_ptr_emplace<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry, std::allocator<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry>>'
1107 auto __control_block = __guard.__release_ptr();
1108 return shared_ptr<_Tp>::__create_with_control_block((*__control_block).__get_elem(), _VSTDstd::__1::addressof(*__control_block));
1109}
1110
1111template<class _Tp, class ..._Args, class = _EnableIf<!is_array<_Tp>::value> >
1112_LIBCPP_HIDE_FROM_ABI__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1113shared_ptr<_Tp> make_shared(_Args&& ...__args)
1114{
1115 return _VSTDstd::__1::allocate_shared<_Tp>(allocator<_Tp>(), _VSTDstd::__1::forward<_Args>(__args)...);
11
Calling 'allocate_shared<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry, std::allocator<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry>, llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>> &, llvm::PBQP::Matrix, void>'
1116}
1117
1118template<class _Tp, class _Up>
1119inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1120bool
1121operator==(const shared_ptr<_Tp>& __x, const shared_ptr<_Up>& __y) _NOEXCEPTnoexcept
1122{
1123 return __x.get() == __y.get();
1124}
1125
1126template<class _Tp, class _Up>
1127inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1128bool
1129operator!=(const shared_ptr<_Tp>& __x, const shared_ptr<_Up>& __y) _NOEXCEPTnoexcept
1130{
1131 return !(__x == __y);
1132}
1133
1134template<class _Tp, class _Up>
1135inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1136bool
1137operator<(const shared_ptr<_Tp>& __x, const shared_ptr<_Up>& __y) _NOEXCEPTnoexcept
1138{
1139#if _LIBCPP_STD_VER14 <= 11
1140 typedef typename common_type<_Tp*, _Up*>::type _Vp;
1141 return less<_Vp>()(__x.get(), __y.get());
1142#else
1143 return less<>()(__x.get(), __y.get());
1144#endif
1145
1146}
1147
1148template<class _Tp, class _Up>
1149inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1150bool
1151operator>(const shared_ptr<_Tp>& __x, const shared_ptr<_Up>& __y) _NOEXCEPTnoexcept
1152{
1153 return __y < __x;
1154}
1155
1156template<class _Tp, class _Up>
1157inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1158bool
1159operator<=(const shared_ptr<_Tp>& __x, const shared_ptr<_Up>& __y) _NOEXCEPTnoexcept
1160{
1161 return !(__y < __x);
1162}
1163
1164template<class _Tp, class _Up>
1165inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1166bool
1167operator>=(const shared_ptr<_Tp>& __x, const shared_ptr<_Up>& __y) _NOEXCEPTnoexcept
1168{
1169 return !(__x < __y);
1170}
1171
1172template<class _Tp>
1173inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1174bool
1175operator==(const shared_ptr<_Tp>& __x, nullptr_t) _NOEXCEPTnoexcept
1176{
1177 return !__x;
1178}
1179
1180template<class _Tp>
1181inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1182bool
1183operator==(nullptr_t, const shared_ptr<_Tp>& __x) _NOEXCEPTnoexcept
1184{
1185 return !__x;
1186}
1187
1188template<class _Tp>
1189inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1190bool
1191operator!=(const shared_ptr<_Tp>& __x, nullptr_t) _NOEXCEPTnoexcept
1192{
1193 return static_cast<bool>(__x);
1194}
1195
1196template<class _Tp>
1197inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1198bool
1199operator!=(nullptr_t, const shared_ptr<_Tp>& __x) _NOEXCEPTnoexcept
1200{
1201 return static_cast<bool>(__x);
1202}
1203
1204template<class _Tp>
1205inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1206bool
1207operator<(const shared_ptr<_Tp>& __x, nullptr_t) _NOEXCEPTnoexcept
1208{
1209 return less<_Tp*>()(__x.get(), nullptr);
1210}
1211
1212template<class _Tp>
1213inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1214bool
1215operator<(nullptr_t, const shared_ptr<_Tp>& __x) _NOEXCEPTnoexcept
1216{
1217 return less<_Tp*>()(nullptr, __x.get());
1218}
1219
1220template<class _Tp>
1221inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1222bool
1223operator>(const shared_ptr<_Tp>& __x, nullptr_t) _NOEXCEPTnoexcept
1224{
1225 return nullptr < __x;
1226}
1227
1228template<class _Tp>
1229inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1230bool
1231operator>(nullptr_t, const shared_ptr<_Tp>& __x) _NOEXCEPTnoexcept
1232{
1233 return __x < nullptr;
1234}
1235
1236template<class _Tp>
1237inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1238bool
1239operator<=(const shared_ptr<_Tp>& __x, nullptr_t) _NOEXCEPTnoexcept
1240{
1241 return !(nullptr < __x);
1242}
1243
1244template<class _Tp>
1245inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1246bool
1247operator<=(nullptr_t, const shared_ptr<_Tp>& __x) _NOEXCEPTnoexcept
1248{
1249 return !(__x < nullptr);
1250}
1251
1252template<class _Tp>
1253inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1254bool
1255operator>=(const shared_ptr<_Tp>& __x, nullptr_t) _NOEXCEPTnoexcept
1256{
1257 return !(__x < nullptr);
1258}
1259
1260template<class _Tp>
1261inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1262bool
1263operator>=(nullptr_t, const shared_ptr<_Tp>& __x) _NOEXCEPTnoexcept
1264{
1265 return !(nullptr < __x);
1266}
1267
1268template<class _Tp>
1269inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1270void
1271swap(shared_ptr<_Tp>& __x, shared_ptr<_Tp>& __y) _NOEXCEPTnoexcept
1272{
1273 __x.swap(__y);
1274}
1275
1276template<class _Tp, class _Up>
1277inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1278shared_ptr<_Tp>
1279static_pointer_cast(const shared_ptr<_Up>& __r) _NOEXCEPTnoexcept
1280{
1281 return shared_ptr<_Tp>(__r,
1282 static_cast<
1283 typename shared_ptr<_Tp>::element_type*>(__r.get()));
1284}
1285
1286template<class _Tp, class _Up>
1287inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1288shared_ptr<_Tp>
1289dynamic_pointer_cast(const shared_ptr<_Up>& __r) _NOEXCEPTnoexcept
1290{
1291 typedef typename shared_ptr<_Tp>::element_type _ET;
1292 _ET* __p = dynamic_cast<_ET*>(__r.get());
1293 return __p ? shared_ptr<_Tp>(__r, __p) : shared_ptr<_Tp>();
1294}
1295
1296template<class _Tp, class _Up>
1297shared_ptr<_Tp>
1298const_pointer_cast(const shared_ptr<_Up>& __r) _NOEXCEPTnoexcept
1299{
1300 typedef typename shared_ptr<_Tp>::element_type _RTp;
1301 return shared_ptr<_Tp>(__r, const_cast<_RTp*>(__r.get()));
1302}
1303
1304template<class _Tp, class _Up>
1305shared_ptr<_Tp>
1306reinterpret_pointer_cast(const shared_ptr<_Up>& __r) _NOEXCEPTnoexcept
1307{
1308 return shared_ptr<_Tp>(__r,
1309 reinterpret_cast<
1310 typename shared_ptr<_Tp>::element_type*>(__r.get()));
1311}
1312
1313#ifndef _LIBCPP_NO_RTTI
1314
1315template<class _Dp, class _Tp>
1316inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1317_Dp*
1318get_deleter(const shared_ptr<_Tp>& __p) _NOEXCEPTnoexcept
1319{
1320 return __p.template __get_deleter<_Dp>();
1321}
1322
1323#endif // _LIBCPP_NO_RTTI
1324
1325template<class _Tp>
1326class _LIBCPP_SHARED_PTR_TRIVIAL_ABI _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) weak_ptr
1327{
1328public:
1329 typedef _Tp element_type;
1330private:
1331 element_type* __ptr_;
1332 __shared_weak_count* __cntrl_;
1333
1334public:
1335 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1336 _LIBCPP_CONSTEXPRconstexpr weak_ptr() _NOEXCEPTnoexcept;
1337 template<class _Yp> _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
weak_ptr(shared_ptr<_Yp> const& __r,
1338 typename enable_if<is_convertible<_Yp*, _Tp*>::value, __nat*>::type = 0)
1339 _NOEXCEPTnoexcept;
1340 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1341 weak_ptr(weak_ptr const& __r) _NOEXCEPTnoexcept;
1342 template<class _Yp> _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
weak_ptr(weak_ptr<_Yp> const& __r,
1343 typename enable_if<is_convertible<_Yp*, _Tp*>::value, __nat*>::type = 0)
1344 _NOEXCEPTnoexcept;
1345
1346 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1347 weak_ptr(weak_ptr&& __r) _NOEXCEPTnoexcept;
1348 template<class _Yp> _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
weak_ptr(weak_ptr<_Yp>&& __r,
1349 typename enable_if<is_convertible<_Yp*, _Tp*>::value, __nat*>::type = 0)
1350 _NOEXCEPTnoexcept;
1351 ~weak_ptr();
1352
1353 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1354 weak_ptr& operator=(weak_ptr const& __r) _NOEXCEPTnoexcept;
1355 template<class _Yp>
1356 typename enable_if
1357 <
1358 is_convertible<_Yp*, element_type*>::value,
1359 weak_ptr&
1360 >::type
1361 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1362 operator=(weak_ptr<_Yp> const& __r) _NOEXCEPTnoexcept;
1363
1364 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1365 weak_ptr& operator=(weak_ptr&& __r) _NOEXCEPTnoexcept;
1366 template<class _Yp>
1367 typename enable_if
1368 <
1369 is_convertible<_Yp*, element_type*>::value,
1370 weak_ptr&
1371 >::type
1372 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1373 operator=(weak_ptr<_Yp>&& __r) _NOEXCEPTnoexcept;
1374
1375 template<class _Yp>
1376 typename enable_if
1377 <
1378 is_convertible<_Yp*, element_type*>::value,
1379 weak_ptr&
1380 >::type
1381 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1382 operator=(shared_ptr<_Yp> const& __r) _NOEXCEPTnoexcept;
1383
1384 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1385 void swap(weak_ptr& __r) _NOEXCEPTnoexcept;
1386 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1387 void reset() _NOEXCEPTnoexcept;
1388
1389 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1390 long use_count() const _NOEXCEPTnoexcept
1391 {return __cntrl_ ? __cntrl_->use_count() : 0;}
1392 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1393 bool expired() const _NOEXCEPTnoexcept
1394 {return __cntrl_ == nullptr || __cntrl_->use_count() == 0;}
1395 shared_ptr<_Tp> lock() const _NOEXCEPTnoexcept;
1396 template<class _Up>
1397 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1398 bool owner_before(const shared_ptr<_Up>& __r) const _NOEXCEPTnoexcept
1399 {return __cntrl_ < __r.__cntrl_;}
1400 template<class _Up>
1401 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1402 bool owner_before(const weak_ptr<_Up>& __r) const _NOEXCEPTnoexcept
1403 {return __cntrl_ < __r.__cntrl_;}
1404
1405 template <class _Up> friend class _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) weak_ptr;
1406 template <class _Up> friend class _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) shared_ptr;
1407};
1408
1409#ifndef _LIBCPP_HAS_NO_DEDUCTION_GUIDES
1410template<class _Tp>
1411weak_ptr(shared_ptr<_Tp>) -> weak_ptr<_Tp>;
1412#endif
1413
1414template<class _Tp>
1415inline
1416_LIBCPP_CONSTEXPRconstexpr
1417weak_ptr<_Tp>::weak_ptr() _NOEXCEPTnoexcept
1418 : __ptr_(nullptr),
1419 __cntrl_(nullptr)
1420{
1421}
1422
1423template<class _Tp>
1424inline
1425weak_ptr<_Tp>::weak_ptr(weak_ptr const& __r) _NOEXCEPTnoexcept
1426 : __ptr_(__r.__ptr_),
1427 __cntrl_(__r.__cntrl_)
1428{
1429 if (__cntrl_)
1430 __cntrl_->__add_weak();
1431}
1432
1433template<class _Tp>
1434template<class _Yp>
1435inline
1436weak_ptr<_Tp>::weak_ptr(shared_ptr<_Yp> const& __r,
1437 typename enable_if<is_convertible<_Yp*, _Tp*>::value, __nat*>::type)
1438 _NOEXCEPTnoexcept
1439 : __ptr_(__r.__ptr_),
1440 __cntrl_(__r.__cntrl_)
1441{
1442 if (__cntrl_)
1443 __cntrl_->__add_weak();
1444}
1445
1446template<class _Tp>
1447template<class _Yp>
1448inline
1449weak_ptr<_Tp>::weak_ptr(weak_ptr<_Yp> const& __r,
1450 typename enable_if<is_convertible<_Yp*, _Tp*>::value, __nat*>::type)
1451 _NOEXCEPTnoexcept
1452 : __ptr_(__r.__ptr_),
1453 __cntrl_(__r.__cntrl_)
1454{
1455 if (__cntrl_)
1456 __cntrl_->__add_weak();
1457}
1458
1459template<class _Tp>
1460inline
1461weak_ptr<_Tp>::weak_ptr(weak_ptr&& __r) _NOEXCEPTnoexcept
1462 : __ptr_(__r.__ptr_),
1463 __cntrl_(__r.__cntrl_)
1464{
1465 __r.__ptr_ = nullptr;
1466 __r.__cntrl_ = nullptr;
1467}
1468
1469template<class _Tp>
1470template<class _Yp>
1471inline
1472weak_ptr<_Tp>::weak_ptr(weak_ptr<_Yp>&& __r,
1473 typename enable_if<is_convertible<_Yp*, _Tp*>::value, __nat*>::type)
1474 _NOEXCEPTnoexcept
1475 : __ptr_(__r.__ptr_),
1476 __cntrl_(__r.__cntrl_)
1477{
1478 __r.__ptr_ = nullptr;
1479 __r.__cntrl_ = nullptr;
1480}
1481
1482template<class _Tp>
1483weak_ptr<_Tp>::~weak_ptr()
1484{
1485 if (__cntrl_)
1486 __cntrl_->__release_weak();
1487}
1488
1489template<class _Tp>
1490inline
1491weak_ptr<_Tp>&
1492weak_ptr<_Tp>::operator=(weak_ptr const& __r) _NOEXCEPTnoexcept
1493{
1494 weak_ptr(__r).swap(*this);
1495 return *this;
1496}
1497
1498template<class _Tp>
1499template<class _Yp>
1500inline
1501typename enable_if
1502<
1503 is_convertible<_Yp*, _Tp*>::value,
1504 weak_ptr<_Tp>&
1505>::type
1506weak_ptr<_Tp>::operator=(weak_ptr<_Yp> const& __r) _NOEXCEPTnoexcept
1507{
1508 weak_ptr(__r).swap(*this);
1509 return *this;
1510}
1511
1512template<class _Tp>
1513inline
1514weak_ptr<_Tp>&
1515weak_ptr<_Tp>::operator=(weak_ptr&& __r) _NOEXCEPTnoexcept
1516{
1517 weak_ptr(_VSTDstd::__1::move(__r)).swap(*this);
1518 return *this;
1519}
1520
1521template<class _Tp>
1522template<class _Yp>
1523inline
1524typename enable_if
1525<
1526 is_convertible<_Yp*, _Tp*>::value,
1527 weak_ptr<_Tp>&
1528>::type
1529weak_ptr<_Tp>::operator=(weak_ptr<_Yp>&& __r) _NOEXCEPTnoexcept
1530{
1531 weak_ptr(_VSTDstd::__1::move(__r)).swap(*this);
1532 return *this;
1533}
1534
1535template<class _Tp>
1536template<class _Yp>
1537inline
1538typename enable_if
1539<
1540 is_convertible<_Yp*, _Tp*>::value,
1541 weak_ptr<_Tp>&
1542>::type
1543weak_ptr<_Tp>::operator=(shared_ptr<_Yp> const& __r) _NOEXCEPTnoexcept
1544{
1545 weak_ptr(__r).swap(*this);
1546 return *this;
1547}
1548
1549template<class _Tp>
1550inline
1551void
1552weak_ptr<_Tp>::swap(weak_ptr& __r) _NOEXCEPTnoexcept
1553{
1554 _VSTDstd::__1::swap(__ptr_, __r.__ptr_);
1555 _VSTDstd::__1::swap(__cntrl_, __r.__cntrl_);
1556}
1557
1558template<class _Tp>
1559inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1560void
1561swap(weak_ptr<_Tp>& __x, weak_ptr<_Tp>& __y) _NOEXCEPTnoexcept
1562{
1563 __x.swap(__y);
1564}
1565
1566template<class _Tp>
1567inline
1568void
1569weak_ptr<_Tp>::reset() _NOEXCEPTnoexcept
1570{
1571 weak_ptr().swap(*this);
1572}
1573
1574template<class _Tp>
1575template<class _Yp>
1576shared_ptr<_Tp>::shared_ptr(const weak_ptr<_Yp>& __r,
1577 typename enable_if<is_convertible<_Yp*, element_type*>::value, __nat>::type)
1578 : __ptr_(__r.__ptr_),
1579 __cntrl_(__r.__cntrl_ ? __r.__cntrl_->lock() : __r.__cntrl_)
1580{
1581 if (__cntrl_ == nullptr)
1582 __throw_bad_weak_ptr();
1583}
1584
1585template<class _Tp>
1586shared_ptr<_Tp>
1587weak_ptr<_Tp>::lock() const _NOEXCEPTnoexcept
1588{
1589 shared_ptr<_Tp> __r;
1590 __r.__cntrl_ = __cntrl_ ? __cntrl_->lock() : __cntrl_;
1591 if (__r.__cntrl_)
1592 __r.__ptr_ = __ptr_;
1593 return __r;
1594}
1595
1596#if _LIBCPP_STD_VER14 > 14
1597template <class _Tp = void> struct owner_less;
1598#else
1599template <class _Tp> struct owner_less;
1600#endif
1601
1602
1603_LIBCPP_SUPPRESS_DEPRECATED_PUSHGCC diagnostic push GCC diagnostic ignored "-Wdeprecated" GCC
diagnostic ignored "-Wdeprecated-declarations"
1604template <class _Tp>
1605struct _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) owner_less<shared_ptr<_Tp> >
1606#if !defined(_LIBCPP_ABI_NO_BINDER_BASES)
1607 : binary_function<shared_ptr<_Tp>, shared_ptr<_Tp>, bool>
1608#endif
1609{
1610_LIBCPP_SUPPRESS_DEPRECATED_POPGCC diagnostic pop
1611#if _LIBCPP_STD_VER14 <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
1612 _LIBCPP_DEPRECATED_IN_CXX17 typedef bool result_type;
1613 _LIBCPP_DEPRECATED_IN_CXX17 typedef shared_ptr<_Tp> first_argument_type;
1614 _LIBCPP_DEPRECATED_IN_CXX17 typedef shared_ptr<_Tp> second_argument_type;
1615#endif
1616 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1617 bool operator()(shared_ptr<_Tp> const& __x, shared_ptr<_Tp> const& __y) const _NOEXCEPTnoexcept
1618 {return __x.owner_before(__y);}
1619 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1620 bool operator()(shared_ptr<_Tp> const& __x, weak_ptr<_Tp> const& __y) const _NOEXCEPTnoexcept
1621 {return __x.owner_before(__y);}
1622 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1623 bool operator()( weak_ptr<_Tp> const& __x, shared_ptr<_Tp> const& __y) const _NOEXCEPTnoexcept
1624 {return __x.owner_before(__y);}
1625};
1626
1627_LIBCPP_SUPPRESS_DEPRECATED_PUSHGCC diagnostic push GCC diagnostic ignored "-Wdeprecated" GCC
diagnostic ignored "-Wdeprecated-declarations"
1628template <class _Tp>
1629struct _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) owner_less<weak_ptr<_Tp> >
1630#if !defined(_LIBCPP_ABI_NO_BINDER_BASES)
1631 : binary_function<weak_ptr<_Tp>, weak_ptr<_Tp>, bool>
1632#endif
1633{
1634_LIBCPP_SUPPRESS_DEPRECATED_POPGCC diagnostic pop
1635#if _LIBCPP_STD_VER14 <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
1636 _LIBCPP_DEPRECATED_IN_CXX17 typedef bool result_type;
1637 _LIBCPP_DEPRECATED_IN_CXX17 typedef weak_ptr<_Tp> first_argument_type;
1638 _LIBCPP_DEPRECATED_IN_CXX17 typedef weak_ptr<_Tp> second_argument_type;
1639#endif
1640 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1641 bool operator()( weak_ptr<_Tp> const& __x, weak_ptr<_Tp> const& __y) const _NOEXCEPTnoexcept
1642 {return __x.owner_before(__y);}
1643 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1644 bool operator()(shared_ptr<_Tp> const& __x, weak_ptr<_Tp> const& __y) const _NOEXCEPTnoexcept
1645 {return __x.owner_before(__y);}
1646 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1647 bool operator()( weak_ptr<_Tp> const& __x, shared_ptr<_Tp> const& __y) const _NOEXCEPTnoexcept
1648 {return __x.owner_before(__y);}
1649};
1650
1651#if _LIBCPP_STD_VER14 > 14
1652template <>
1653struct _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) owner_less<void>
1654{
1655 template <class _Tp, class _Up>
1656 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1657 bool operator()( shared_ptr<_Tp> const& __x, shared_ptr<_Up> const& __y) const _NOEXCEPTnoexcept
1658 {return __x.owner_before(__y);}
1659 template <class _Tp, class _Up>
1660 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1661 bool operator()( shared_ptr<_Tp> const& __x, weak_ptr<_Up> const& __y) const _NOEXCEPTnoexcept
1662 {return __x.owner_before(__y);}
1663 template <class _Tp, class _Up>
1664 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1665 bool operator()( weak_ptr<_Tp> const& __x, shared_ptr<_Up> const& __y) const _NOEXCEPTnoexcept
1666 {return __x.owner_before(__y);}
1667 template <class _Tp, class _Up>
1668 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1669 bool operator()( weak_ptr<_Tp> const& __x, weak_ptr<_Up> const& __y) const _NOEXCEPTnoexcept
1670 {return __x.owner_before(__y);}
1671 typedef void is_transparent;
1672};
1673#endif
1674
1675template<class _Tp>
1676class _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) enable_shared_from_this
1677{
1678 mutable weak_ptr<_Tp> __weak_this_;
1679protected:
1680 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
_LIBCPP_CONSTEXPRconstexpr
1681 enable_shared_from_this() _NOEXCEPTnoexcept {}
1682 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1683 enable_shared_from_this(enable_shared_from_this const&) _NOEXCEPTnoexcept {}
1684 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1685 enable_shared_from_this& operator=(enable_shared_from_this const&) _NOEXCEPTnoexcept
1686 {return *this;}
1687 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1688 ~enable_shared_from_this() {}
1689public:
1690 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1691 shared_ptr<_Tp> shared_from_this()
1692 {return shared_ptr<_Tp>(__weak_this_);}
1693 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1694 shared_ptr<_Tp const> shared_from_this() const
1695 {return shared_ptr<const _Tp>(__weak_this_);}
1696
1697#if _LIBCPP_STD_VER14 > 14
1698 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1699 weak_ptr<_Tp> weak_from_this() _NOEXCEPTnoexcept
1700 { return __weak_this_; }
1701
1702 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1703 weak_ptr<const _Tp> weak_from_this() const _NOEXCEPTnoexcept
1704 { return __weak_this_; }
1705#endif // _LIBCPP_STD_VER > 14
1706
1707 template <class _Up> friend class shared_ptr;
1708};
1709
1710template <class _Tp> struct _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) hash;
1711
1712template <class _Tp>
1713struct _LIBCPP_TEMPLATE_VIS__attribute__ ((__type_visibility__("default"))) hash<shared_ptr<_Tp> >
1714{
1715#if _LIBCPP_STD_VER14 <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
1716 _LIBCPP_DEPRECATED_IN_CXX17 typedef shared_ptr<_Tp> argument_type;
1717 _LIBCPP_DEPRECATED_IN_CXX17 typedef size_t result_type;
1718#endif
1719
1720 _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1721 size_t operator()(const shared_ptr<_Tp>& __ptr) const _NOEXCEPTnoexcept
1722 {
1723 return hash<typename shared_ptr<_Tp>::element_type*>()(__ptr.get());
1724 }
1725};
1726
1727template<class _CharT, class _Traits, class _Yp>
1728inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1729basic_ostream<_CharT, _Traits>&
1730operator<<(basic_ostream<_CharT, _Traits>& __os, shared_ptr<_Yp> const& __p);
1731
1732
1733#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
1734
1735class _LIBCPP_TYPE_VIS__attribute__ ((__visibility__("default"))) __sp_mut
1736{
1737 void* __lx;
1738public:
1739 void lock() _NOEXCEPTnoexcept;
1740 void unlock() _NOEXCEPTnoexcept;
1741
1742private:
1743 _LIBCPP_CONSTEXPRconstexpr __sp_mut(void*) _NOEXCEPTnoexcept;
1744 __sp_mut(const __sp_mut&);
1745 __sp_mut& operator=(const __sp_mut&);
1746
1747 friend _LIBCPP_FUNC_VIS__attribute__ ((__visibility__("default"))) __sp_mut& __get_sp_mut(const void*);
1748};
1749
1750_LIBCPP_FUNC_VIS__attribute__ ((__visibility__("default"))) _LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1751__sp_mut& __get_sp_mut(const void*);
1752
1753template <class _Tp>
1754inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1755bool
1756atomic_is_lock_free(const shared_ptr<_Tp>*)
1757{
1758 return false;
1759}
1760
1761template <class _Tp>
1762_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1763shared_ptr<_Tp>
1764atomic_load(const shared_ptr<_Tp>* __p)
1765{
1766 __sp_mut& __m = __get_sp_mut(__p);
1767 __m.lock();
1768 shared_ptr<_Tp> __q = *__p;
1769 __m.unlock();
1770 return __q;
1771}
1772
1773template <class _Tp>
1774inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1775_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1776shared_ptr<_Tp>
1777atomic_load_explicit(const shared_ptr<_Tp>* __p, memory_order)
1778{
1779 return atomic_load(__p);
1780}
1781
1782template <class _Tp>
1783_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1784void
1785atomic_store(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
1786{
1787 __sp_mut& __m = __get_sp_mut(__p);
1788 __m.lock();
1789 __p->swap(__r);
1790 __m.unlock();
1791}
1792
1793template <class _Tp>
1794inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1795_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1796void
1797atomic_store_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r, memory_order)
1798{
1799 atomic_store(__p, __r);
1800}
1801
1802template <class _Tp>
1803_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1804shared_ptr<_Tp>
1805atomic_exchange(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
1806{
1807 __sp_mut& __m = __get_sp_mut(__p);
1808 __m.lock();
1809 __p->swap(__r);
1810 __m.unlock();
1811 return __r;
1812}
1813
1814template <class _Tp>
1815inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1816_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1817shared_ptr<_Tp>
1818atomic_exchange_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r, memory_order)
1819{
1820 return atomic_exchange(__p, __r);
1821}
1822
1823template <class _Tp>
1824_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1825bool
1826atomic_compare_exchange_strong(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w)
1827{
1828 shared_ptr<_Tp> __temp;
1829 __sp_mut& __m = __get_sp_mut(__p);
1830 __m.lock();
1831 if (__p->__owner_equivalent(*__v))
1832 {
1833 _VSTDstd::__1::swap(__temp, *__p);
1834 *__p = __w;
1835 __m.unlock();
1836 return true;
1837 }
1838 _VSTDstd::__1::swap(__temp, *__v);
1839 *__v = *__p;
1840 __m.unlock();
1841 return false;
1842}
1843
1844template <class _Tp>
1845inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1846_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1847bool
1848atomic_compare_exchange_weak(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w)
1849{
1850 return atomic_compare_exchange_strong(__p, __v, __w);
1851}
1852
1853template <class _Tp>
1854inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1855_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1856bool
1857atomic_compare_exchange_strong_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
1858 shared_ptr<_Tp> __w, memory_order, memory_order)
1859{
1860 return atomic_compare_exchange_strong(__p, __v, __w);
1861}
1862
1863template <class _Tp>
1864inline _LIBCPP_INLINE_VISIBILITY__attribute__ ((__visibility__("hidden"))) __attribute__ ((__exclude_from_explicit_instantiation__
))
1865_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
1866bool
1867atomic_compare_exchange_weak_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
1868 shared_ptr<_Tp> __w, memory_order, memory_order)
1869{
1870 return atomic_compare_exchange_weak(__p, __v, __w);
1871}
1872
1873#endif // !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
1874
1875_LIBCPP_END_NAMESPACE_STD} }
1876
1877_LIBCPP_POP_MACROSpop_macro("min") pop_macro("max")
1878
1879#endif // _LIBCPP___MEMORY_SHARED_PTR_H

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/CodeGen/PBQP/Math.h

1//===- Math.h - PBQP Vector and Matrix classes ------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_CODEGEN_PBQP_MATH_H
10#define LLVM_CODEGEN_PBQP_MATH_H
11
12#include "llvm/ADT/Hashing.h"
13#include "llvm/ADT/STLExtras.h"
14#include <algorithm>
15#include <cassert>
16#include <functional>
17#include <memory>
18
19namespace llvm {
20namespace PBQP {
21
22using PBQPNum = float;
23
24/// PBQP Vector class.
25class Vector {
26 friend hash_code hash_value(const Vector &);
27
28public:
29 /// Construct a PBQP vector of the given size.
30 explicit Vector(unsigned Length)
31 : Length(Length), Data(std::make_unique<PBQPNum []>(Length)) {}
32
33 /// Construct a PBQP vector with initializer.
34 Vector(unsigned Length, PBQPNum InitVal)
35 : Length(Length), Data(std::make_unique<PBQPNum []>(Length)) {
36 std::fill(Data.get(), Data.get() + Length, InitVal);
37 }
38
39 /// Copy construct a PBQP vector.
40 Vector(const Vector &V)
41 : Length(V.Length), Data(std::make_unique<PBQPNum []>(Length)) {
42 std::copy(V.Data.get(), V.Data.get() + Length, Data.get());
43 }
44
45 /// Move construct a PBQP vector.
46 Vector(Vector &&V)
47 : Length(V.Length), Data(std::move(V.Data)) {
48 V.Length = 0;
49 }
50
51 /// Comparison operator.
52 bool operator==(const Vector &V) const {
53 assert(Length != 0 && Data && "Invalid vector")((void)0);
54 if (Length != V.Length)
55 return false;
56 return std::equal(Data.get(), Data.get() + Length, V.Data.get());
57 }
58
59 /// Return the length of the vector
60 unsigned getLength() const {
61 assert(Length != 0 && Data && "Invalid vector")((void)0);
62 return Length;
63 }
64
65 /// Element access.
66 PBQPNum& operator[](unsigned Index) {
67 assert(Length != 0 && Data && "Invalid vector")((void)0);
68 assert(Index < Length && "Vector element access out of bounds.")((void)0);
69 return Data[Index];
70 }
71
72 /// Const element access.
73 const PBQPNum& operator[](unsigned Index) const {
74 assert(Length != 0 && Data && "Invalid vector")((void)0);
75 assert(Index < Length && "Vector element access out of bounds.")((void)0);
76 return Data[Index];
77 }
78
79 /// Add another vector to this one.
80 Vector& operator+=(const Vector &V) {
81 assert(Length != 0 && Data && "Invalid vector")((void)0);
82 assert(Length == V.Length && "Vector length mismatch.")((void)0);
83 std::transform(Data.get(), Data.get() + Length, V.Data.get(), Data.get(),
84 std::plus<PBQPNum>());
85 return *this;
86 }
87
88 /// Returns the index of the minimum value in this vector
89 unsigned minIndex() const {
90 assert(Length != 0 && Data && "Invalid vector")((void)0);
91 return std::min_element(Data.get(), Data.get() + Length) - Data.get();
92 }
93
94private:
95 unsigned Length;
96 std::unique_ptr<PBQPNum []> Data;
97};
98
99/// Return a hash_value for the given vector.
100inline hash_code hash_value(const Vector &V) {
101 unsigned *VBegin = reinterpret_cast<unsigned*>(V.Data.get());
102 unsigned *VEnd = reinterpret_cast<unsigned*>(V.Data.get() + V.Length);
103 return hash_combine(V.Length, hash_combine_range(VBegin, VEnd));
104}
105
106/// Output a textual representation of the given vector on the given
107/// output stream.
108template <typename OStream>
109OStream& operator<<(OStream &OS, const Vector &V) {
110 assert((V.getLength() != 0) && "Zero-length vector badness.")((void)0);
111
112 OS << "[ " << V[0];
113 for (unsigned i = 1; i < V.getLength(); ++i)
114 OS << ", " << V[i];
115 OS << " ]";
116
117 return OS;
118}
119
120/// PBQP Matrix class
121class Matrix {
122private:
123 friend hash_code hash_value(const Matrix &);
124
125public:
126 /// Construct a PBQP Matrix with the given dimensions.
127 Matrix(unsigned Rows, unsigned Cols) :
128 Rows(Rows), Cols(Cols), Data(std::make_unique<PBQPNum []>(Rows * Cols)) {
129 }
130
131 /// Construct a PBQP Matrix with the given dimensions and initial
132 /// value.
133 Matrix(unsigned Rows, unsigned Cols, PBQPNum InitVal)
134 : Rows(Rows), Cols(Cols),
135 Data(std::make_unique<PBQPNum []>(Rows * Cols)) {
136 std::fill(Data.get(), Data.get() + (Rows * Cols), InitVal);
137 }
138
139 /// Copy construct a PBQP matrix.
140 Matrix(const Matrix &M)
141 : Rows(M.Rows), Cols(M.Cols),
142 Data(std::make_unique<PBQPNum []>(Rows * Cols)) {
143 std::copy(M.Data.get(), M.Data.get() + (Rows * Cols), Data.get());
144 }
145
146 /// Move construct a PBQP matrix.
147 Matrix(Matrix &&M)
148 : Rows(M.Rows), Cols(M.Cols), Data(std::move(M.Data)) {
149 M.Rows = M.Cols = 0;
150 }
151
152 /// Comparison operator.
153 bool operator==(const Matrix &M) const {
154 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((void)0);
155 if (Rows != M.Rows || Cols != M.Cols)
156 return false;
157 return std::equal(Data.get(), Data.get() + (Rows * Cols), M.Data.get());
158 }
159
160 /// Return the number of rows in this matrix.
161 unsigned getRows() const {
162 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((void)0);
163 return Rows;
164 }
165
166 /// Return the number of cols in this matrix.
167 unsigned getCols() const {
168 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((void)0);
169 return Cols;
170 }
171
172 /// Matrix element access.
173 PBQPNum* operator[](unsigned R) {
174 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((void)0);
175 assert(R < Rows && "Row out of bounds.")((void)0);
176 return Data.get() + (R * Cols);
177 }
178
179 /// Matrix element access.
180 const PBQPNum* operator[](unsigned R) const {
181 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((void)0);
182 assert(R < Rows && "Row out of bounds.")((void)0);
183 return Data.get() + (R * Cols);
184 }
185
186 /// Returns the given row as a vector.
187 Vector getRowAsVector(unsigned R) const {
188 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((void)0);
189 Vector V(Cols);
190 for (unsigned C = 0; C < Cols; ++C)
191 V[C] = (*this)[R][C];
192 return V;
193 }
194
195 /// Returns the given column as a vector.
196 Vector getColAsVector(unsigned C) const {
197 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((void)0);
198 Vector V(Rows);
199 for (unsigned R = 0; R < Rows; ++R)
200 V[R] = (*this)[R][C];
201 return V;
202 }
203
204 /// Matrix transpose.
205 Matrix transpose() const {
206 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((void)0);
207 Matrix M(Cols, Rows);
208 for (unsigned r = 0; r < Rows; ++r)
209 for (unsigned c = 0; c < Cols; ++c)
210 M[c][r] = (*this)[r][c];
211 return M;
212 }
213
214 /// Add the given matrix to this one.
215 Matrix& operator+=(const Matrix &M) {
216 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((void)0);
217 assert(Rows == M.Rows && Cols == M.Cols &&((void)0)
218 "Matrix dimensions mismatch.")((void)0);
219 std::transform(Data.get(), Data.get() + (Rows * Cols), M.Data.get(),
220 Data.get(), std::plus<PBQPNum>());
221 return *this;
222 }
223
224 Matrix operator+(const Matrix &M) {
225 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((void)0);
226 Matrix Tmp(*this);
227 Tmp += M;
228 return Tmp;
229 }
230
231private:
232 unsigned Rows, Cols;
233 std::unique_ptr<PBQPNum []> Data;
234};
235
236/// Return a hash_code for the given matrix.
237inline hash_code hash_value(const Matrix &M) {
238 unsigned *MBegin = reinterpret_cast<unsigned*>(M.Data.get());
239 unsigned *MEnd =
240 reinterpret_cast<unsigned*>(M.Data.get() + (M.Rows * M.Cols));
241 return hash_combine(M.Rows, M.Cols, hash_combine_range(MBegin, MEnd));
242}
243
244/// Output a textual representation of the given matrix on the given
245/// output stream.
246template <typename OStream>
247OStream& operator<<(OStream &OS, const Matrix &M) {
248 assert((M.getRows() != 0) && "Zero-row matrix badness.")((void)0);
249 for (unsigned i = 0; i < M.getRows(); ++i)
250 OS << M.getRowAsVector(i) << "\n";
251 return OS;
252}
253
254template <typename Metadata>
255class MDVector : public Vector {
256public:
257 MDVector(const Vector &v) : Vector(v), md(*this) {}
258 MDVector(Vector &&v) : Vector(std::move(v)), md(*this) { }
259
260 const Metadata& getMetadata() const { return md; }
261
262private:
263 Metadata md;
264};
265
266template <typename Metadata>
267inline hash_code hash_value(const MDVector<Metadata> &V) {
268 return hash_value(static_cast<const Vector&>(V));
269}
270
271template <typename Metadata>
272class MDMatrix : public Matrix {
273public:
274 MDMatrix(const Matrix &m) : Matrix(m), md(*this) {}
275 MDMatrix(Matrix &&m) : Matrix(std::move(m)), md(*this) { }
15
Calling constructor for 'MatrixMetadata'
276
277 const Metadata& getMetadata() const { return md; }
278
279private:
280 Metadata md;
281};
282
283template <typename Metadata>
284inline hash_code hash_value(const MDMatrix<Metadata> &M) {
285 return hash_value(static_cast<const Matrix&>(M));
286}
287
288} // end namespace PBQP
289} // end namespace llvm
290
291#endif // LLVM_CODEGEN_PBQP_MATH_H

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/CodeGen/RegAllocPBQP.h

1//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the PBQPBuilder interface, for classes which build PBQP
10// instances to represent register allocation problems, and the RegAllocPBQP
11// interface.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
16#define LLVM_CODEGEN_REGALLOCPBQP_H
17
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/CodeGen/PBQP/CostAllocator.h"
21#include "llvm/CodeGen/PBQP/Graph.h"
22#include "llvm/CodeGen/PBQP/Math.h"
23#include "llvm/CodeGen/PBQP/ReductionRules.h"
24#include "llvm/CodeGen/PBQP/Solution.h"
25#include "llvm/CodeGen/Register.h"
26#include "llvm/MC/MCRegister.h"
27#include "llvm/Support/ErrorHandling.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <limits>
32#include <memory>
33#include <set>
34#include <vector>
35
36namespace llvm {
37
38class FunctionPass;
39class LiveIntervals;
40class MachineBlockFrequencyInfo;
41class MachineFunction;
42class raw_ostream;
43
44namespace PBQP {
45namespace RegAlloc {
46
47/// Spill option index.
48inline unsigned getSpillOptionIdx() { return 0; }
49
50/// Metadata to speed allocatability test.
51///
52/// Keeps track of the number of infinities in each row and column.
53class MatrixMetadata {
54public:
55 MatrixMetadata(const Matrix& M)
56 : UnsafeRows(new bool[M.getRows() - 1]()),
57 UnsafeCols(new bool[M.getCols() - 1]()) {
58 unsigned* ColCounts = new unsigned[M.getCols() - 1]();
16
Memory is allocated
59
60 for (unsigned i = 1; i < M.getRows(); ++i) {
17
Loop condition is true. Entering loop body
19
Loop condition is false. Execution continues on line 73
61 unsigned RowCount = 0;
62 for (unsigned j = 1; j < M.getCols(); ++j) {
18
Loop condition is false. Execution continues on line 70
63 if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
64 ++RowCount;
65 ++ColCounts[j - 1];
66 UnsafeRows[i - 1] = true;
67 UnsafeCols[j - 1] = true;
68 }
69 }
70 WorstRow = std::max(WorstRow, RowCount);
71 }
72 unsigned WorstColCountForCurRow =
73 *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
20
Use of zero-allocated memory
74 WorstCol = std::max(WorstCol, WorstColCountForCurRow);
75 delete[] ColCounts;
76 }
77
78 MatrixMetadata(const MatrixMetadata &) = delete;
79 MatrixMetadata &operator=(const MatrixMetadata &) = delete;
80
81 unsigned getWorstRow() const { return WorstRow; }
82 unsigned getWorstCol() const { return WorstCol; }
83 const bool* getUnsafeRows() const { return UnsafeRows.get(); }
84 const bool* getUnsafeCols() const { return UnsafeCols.get(); }
85
86private:
87 unsigned WorstRow = 0;
88 unsigned WorstCol = 0;
89 std::unique_ptr<bool[]> UnsafeRows;
90 std::unique_ptr<bool[]> UnsafeCols;
91};
92
93/// Holds a vector of the allowed physical regs for a vreg.
94class AllowedRegVector {
95 friend hash_code hash_value(const AllowedRegVector &);
96
97public:
98 AllowedRegVector() = default;
99 AllowedRegVector(AllowedRegVector &&) = default;
100
101 AllowedRegVector(const std::vector<MCRegister> &OptVec)
102 : NumOpts(OptVec.size()), Opts(new MCRegister[NumOpts]) {
103 std::copy(OptVec.begin(), OptVec.end(), Opts.get());
104 }
105
106 unsigned size() const { return NumOpts; }
107 MCRegister operator[](size_t I) const { return Opts[I]; }
108
109 bool operator==(const AllowedRegVector &Other) const {
110 if (NumOpts != Other.NumOpts)
111 return false;
112 return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
113 }
114
115 bool operator!=(const AllowedRegVector &Other) const {
116 return !(*this == Other);
117 }
118
119private:
120 unsigned NumOpts = 0;
121 std::unique_ptr<MCRegister[]> Opts;
122};
123
124inline hash_code hash_value(const AllowedRegVector &OptRegs) {
125 MCRegister *OStart = OptRegs.Opts.get();
126 MCRegister *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
127 return hash_combine(OptRegs.NumOpts,
128 hash_combine_range(OStart, OEnd));
129}
130
131/// Holds graph-level metadata relevant to PBQP RA problems.
132class GraphMetadata {
133private:
134 using AllowedRegVecPool = ValuePool<AllowedRegVector>;
135
136public:
137 using AllowedRegVecRef = AllowedRegVecPool::PoolRef;
138
139 GraphMetadata(MachineFunction &MF,
140 LiveIntervals &LIS,
141 MachineBlockFrequencyInfo &MBFI)
142 : MF(MF), LIS(LIS), MBFI(MBFI) {}
143
144 MachineFunction &MF;
145 LiveIntervals &LIS;
146 MachineBlockFrequencyInfo &MBFI;
147
148 void setNodeIdForVReg(Register VReg, GraphBase::NodeId NId) {
149 VRegToNodeId[VReg.id()] = NId;
150 }
151
152 GraphBase::NodeId getNodeIdForVReg(Register VReg) const {
153 auto VRegItr = VRegToNodeId.find(VReg);
154 if (VRegItr == VRegToNodeId.end())
155 return GraphBase::invalidNodeId();
156 return VRegItr->second;
157 }
158
159 AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
160 return AllowedRegVecs.getValue(std::move(Allowed));
161 }
162
163private:
164 DenseMap<Register, GraphBase::NodeId> VRegToNodeId;
165 AllowedRegVecPool AllowedRegVecs;
166};
167
168/// Holds solver state and other metadata relevant to each PBQP RA node.
169class NodeMetadata {
170public:
171 using AllowedRegVector = RegAlloc::AllowedRegVector;
172
173 // The node's reduction state. The order in this enum is important,
174 // as it is assumed nodes can only progress up (i.e. towards being
175 // optimally reducible) when reducing the graph.
176 using ReductionState = enum {
177 Unprocessed,
178 NotProvablyAllocatable,
179 ConservativelyAllocatable,
180 OptimallyReducible
181 };
182
183 NodeMetadata() = default;
184
185 NodeMetadata(const NodeMetadata &Other)
186 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
187 OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
188 AllowedRegs(Other.AllowedRegs)
189#ifndef NDEBUG1
190 , everConservativelyAllocatable(Other.everConservativelyAllocatable)
191#endif
192 {
193 if (NumOpts > 0) {
194 std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
195 &OptUnsafeEdges[0]);
196 }
197 }
198
199 NodeMetadata(NodeMetadata &&) = default;
200 NodeMetadata& operator=(NodeMetadata &&) = default;
201
202 void setVReg(Register VReg) { this->VReg = VReg; }
203 Register getVReg() const { return VReg; }
204
205 void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
206 this->AllowedRegs = std::move(AllowedRegs);
207 }
208 const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
209
210 void setup(const Vector& Costs) {
211 NumOpts = Costs.getLength() - 1;
212 OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
213 }
214
215 ReductionState getReductionState() const { return RS; }
216 void setReductionState(ReductionState RS) {
217 assert(RS >= this->RS && "A node's reduction state can not be downgraded")((void)0);
218 this->RS = RS;
219
220#ifndef NDEBUG1
221 // Remember this state to assert later that a non-infinite register
222 // option was available.
223 if (RS == ConservativelyAllocatable)
224 everConservativelyAllocatable = true;
225#endif
226 }
227
228 void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
229 DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
230 const bool* UnsafeOpts =
231 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
232 for (unsigned i = 0; i < NumOpts; ++i)
233 OptUnsafeEdges[i] += UnsafeOpts[i];
234 }
235
236 void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
237 DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
238 const bool* UnsafeOpts =
239 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
240 for (unsigned i = 0; i < NumOpts; ++i)
241 OptUnsafeEdges[i] -= UnsafeOpts[i];
242 }
243
244 bool isConservativelyAllocatable() const {
245 return (DeniedOpts < NumOpts) ||
246 (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
247 &OptUnsafeEdges[NumOpts]);
248 }
249
250#ifndef NDEBUG1
251 bool wasConservativelyAllocatable() const {
252 return everConservativelyAllocatable;
253 }
254#endif
255
256private:
257 ReductionState RS = Unprocessed;
258 unsigned NumOpts = 0;
259 unsigned DeniedOpts = 0;
260 std::unique_ptr<unsigned[]> OptUnsafeEdges;
261 Register VReg;
262 GraphMetadata::AllowedRegVecRef AllowedRegs;
263
264#ifndef NDEBUG1
265 bool everConservativelyAllocatable = false;
266#endif
267};
268
269class RegAllocSolverImpl {
270private:
271 using RAMatrix = MDMatrix<MatrixMetadata>;
272
273public:
274 using RawVector = PBQP::Vector;
275 using RawMatrix = PBQP::Matrix;
276 using Vector = PBQP::Vector;
277 using Matrix = RAMatrix;
278 using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;
279
280 using NodeId = GraphBase::NodeId;
281 using EdgeId = GraphBase::EdgeId;
282
283 using NodeMetadata = RegAlloc::NodeMetadata;
284 struct EdgeMetadata {};
285 using GraphMetadata = RegAlloc::GraphMetadata;
286
287 using Graph = PBQP::Graph<RegAllocSolverImpl>;
288
289 RegAllocSolverImpl(Graph &G) : G(G) {}
290
291 Solution solve() {
292 G.setSolver(*this);
293 Solution S;
294 setup();
295 S = backpropagate(G, reduce());
296 G.unsetSolver();
297 return S;
298 }
299
300 void handleAddNode(NodeId NId) {
301 assert(G.getNodeCosts(NId).getLength() > 1 &&((void)0)
302 "PBQP Graph should not contain single or zero-option nodes")((void)0);
303 G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
304 }
305
306 void handleRemoveNode(NodeId NId) {}
307 void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
308
309 void handleAddEdge(EdgeId EId) {
310 handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
311 handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
312 }
313
314 void handleDisconnectEdge(EdgeId EId, NodeId NId) {
315 NodeMetadata& NMd = G.getNodeMetadata(NId);
316 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
317 NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
318 promote(NId, NMd);
319 }
320
321 void handleReconnectEdge(EdgeId EId, NodeId NId) {
322 NodeMetadata& NMd = G.getNodeMetadata(NId);
323 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
324 NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
325 }
326
327 void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
328 NodeId N1Id = G.getEdgeNode1Id(EId);
329 NodeId N2Id = G.getEdgeNode2Id(EId);
330 NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
331 NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
332 bool Transpose = N1Id != G.getEdgeNode1Id(EId);
333
334 // Metadata are computed incrementally. First, update them
335 // by removing the old cost.
336 const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
337 N1Md.handleRemoveEdge(OldMMd, Transpose);
338 N2Md.handleRemoveEdge(OldMMd, !Transpose);
339
340 // And update now the metadata with the new cost.
341 const MatrixMetadata& MMd = NewCosts.getMetadata();
342 N1Md.handleAddEdge(MMd, Transpose);
343 N2Md.handleAddEdge(MMd, !Transpose);
344
345 // As the metadata may have changed with the update, the nodes may have
346 // become ConservativelyAllocatable or OptimallyReducible.
347 promote(N1Id, N1Md);
348 promote(N2Id, N2Md);
349 }
350
351private:
352 void promote(NodeId NId, NodeMetadata& NMd) {
353 if (G.getNodeDegree(NId) == 3) {
354 // This node is becoming optimally reducible.
355 moveToOptimallyReducibleNodes(NId);
356 } else if (NMd.getReductionState() ==
357 NodeMetadata::NotProvablyAllocatable &&
358 NMd.isConservativelyAllocatable()) {
359 // This node just became conservatively allocatable.
360 moveToConservativelyAllocatableNodes(NId);
361 }
362 }
363
364 void removeFromCurrentSet(NodeId NId) {
365 switch (G.getNodeMetadata(NId).getReductionState()) {
366 case NodeMetadata::Unprocessed: break;
367 case NodeMetadata::OptimallyReducible:
368 assert(OptimallyReducibleNodes.find(NId) !=((void)0)
369 OptimallyReducibleNodes.end() &&((void)0)
370 "Node not in optimally reducible set.")((void)0);
371 OptimallyReducibleNodes.erase(NId);
372 break;
373 case NodeMetadata::ConservativelyAllocatable:
374 assert(ConservativelyAllocatableNodes.find(NId) !=((void)0)
375 ConservativelyAllocatableNodes.end() &&((void)0)
376 "Node not in conservatively allocatable set.")((void)0);
377 ConservativelyAllocatableNodes.erase(NId);
378 break;
379 case NodeMetadata::NotProvablyAllocatable:
380 assert(NotProvablyAllocatableNodes.find(NId) !=((void)0)
381 NotProvablyAllocatableNodes.end() &&((void)0)
382 "Node not in not-provably-allocatable set.")((void)0);
383 NotProvablyAllocatableNodes.erase(NId);
384 break;
385 }
386 }
387
388 void moveToOptimallyReducibleNodes(NodeId NId) {
389 removeFromCurrentSet(NId);
390 OptimallyReducibleNodes.insert(NId);
391 G.getNodeMetadata(NId).setReductionState(
392 NodeMetadata::OptimallyReducible);
393 }
394
395 void moveToConservativelyAllocatableNodes(NodeId NId) {
396 removeFromCurrentSet(NId);
397 ConservativelyAllocatableNodes.insert(NId);
398 G.getNodeMetadata(NId).setReductionState(
399 NodeMetadata::ConservativelyAllocatable);
400 }
401
402 void moveToNotProvablyAllocatableNodes(NodeId NId) {
403 removeFromCurrentSet(NId);
404 NotProvablyAllocatableNodes.insert(NId);
405 G.getNodeMetadata(NId).setReductionState(
406 NodeMetadata::NotProvablyAllocatable);
407 }
408
409 void setup() {
410 // Set up worklists.
411 for (auto NId : G.nodeIds()) {
412 if (G.getNodeDegree(NId) < 3)
413 moveToOptimallyReducibleNodes(NId);
414 else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
415 moveToConservativelyAllocatableNodes(NId);
416 else
417 moveToNotProvablyAllocatableNodes(NId);
418 }
419 }
420
421 // Compute a reduction order for the graph by iteratively applying PBQP
422 // reduction rules. Locally optimal rules are applied whenever possible (R0,
423 // R1, R2). If no locally-optimal rules apply then any conservatively
424 // allocatable node is reduced. Finally, if no conservatively allocatable
425 // node exists then the node with the lowest spill-cost:degree ratio is
426 // selected.
427 std::vector<GraphBase::NodeId> reduce() {
428 assert(!G.empty() && "Cannot reduce empty graph.")((void)0);
429
430 using NodeId = GraphBase::NodeId;
431 std::vector<NodeId> NodeStack;
432
433 // Consume worklists.
434 while (true) {
435 if (!OptimallyReducibleNodes.empty()) {
436 NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
437 NodeId NId = *NItr;
438 OptimallyReducibleNodes.erase(NItr);
439 NodeStack.push_back(NId);
440 switch (G.getNodeDegree(NId)) {
441 case 0:
442 break;
443 case 1:
444 applyR1(G, NId);
445 break;
446 case 2:
447 applyR2(G, NId);
448 break;
449 default: llvm_unreachable("Not an optimally reducible node.")__builtin_unreachable();
450 }
451 } else if (!ConservativelyAllocatableNodes.empty()) {
452 // Conservatively allocatable nodes will never spill. For now just
453 // take the first node in the set and push it on the stack. When we
454 // start optimizing more heavily for register preferencing, it may
455 // would be better to push nodes with lower 'expected' or worst-case
456 // register costs first (since early nodes are the most
457 // constrained).
458 NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
459 NodeId NId = *NItr;
460 ConservativelyAllocatableNodes.erase(NItr);
461 NodeStack.push_back(NId);
462 G.disconnectAllNeighborsFromNode(NId);
463 } else if (!NotProvablyAllocatableNodes.empty()) {
464 NodeSet::iterator NItr =
465 std::min_element(NotProvablyAllocatableNodes.begin(),
466 NotProvablyAllocatableNodes.end(),
467 SpillCostComparator(G));
468 NodeId NId = *NItr;
469 NotProvablyAllocatableNodes.erase(NItr);
470 NodeStack.push_back(NId);
471 G.disconnectAllNeighborsFromNode(NId);
472 } else
473 break;
474 }
475
476 return NodeStack;
477 }
478
479 class SpillCostComparator {
480 public:
481 SpillCostComparator(const Graph& G) : G(G) {}
482
483 bool operator()(NodeId N1Id, NodeId N2Id) {
484 PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
485 PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
486 if (N1SC == N2SC)
487 return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
488 return N1SC < N2SC;
489 }
490
491 private:
492 const Graph& G;
493 };
494
495 Graph& G;
496 using NodeSet = std::set<NodeId>;
497 NodeSet OptimallyReducibleNodes;
498 NodeSet ConservativelyAllocatableNodes;
499 NodeSet NotProvablyAllocatableNodes;
500};
501
502class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
503private:
504 using BaseT = PBQP::Graph<RegAllocSolverImpl>;
505
506public:
507 PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
508
509 /// Dump this graph to dbgs().
510 void dump() const;
511
512 /// Dump this graph to an output stream.
513 /// @param OS Output stream to print on.
514 void dump(raw_ostream &OS) const;
515
516 /// Print a representation of this graph in DOT format.
517 /// @param OS Output stream to print on.
518 void printDot(raw_ostream &OS) const;
519};
520
521inline Solution solve(PBQPRAGraph& G) {
522 if (G.empty())
523 return Solution();
524 RegAllocSolverImpl RegAllocSolver(G);
525 return RegAllocSolver.solve();
526}
527
528} // end namespace RegAlloc
529} // end namespace PBQP
530
531/// Create a PBQP register allocator instance.
532FunctionPass *
533createPBQPRegisterAllocator(char *customPassID = nullptr);
534
535} // end namespace llvm
536
537#endif // LLVM_CODEGEN_REGALLOCPBQP_H