File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/RDFGraph.cpp |
Warning: | line 1701, column 29 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- RDFGraph.cpp -------------------------------------------------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // Target-independent, SSA-based data flow graph for register data flow (RDF). | |||
10 | // | |||
11 | #include "llvm/ADT/BitVector.h" | |||
12 | #include "llvm/ADT/STLExtras.h" | |||
13 | #include "llvm/ADT/SetVector.h" | |||
14 | #include "llvm/CodeGen/MachineBasicBlock.h" | |||
15 | #include "llvm/CodeGen/MachineDominanceFrontier.h" | |||
16 | #include "llvm/CodeGen/MachineDominators.h" | |||
17 | #include "llvm/CodeGen/MachineFunction.h" | |||
18 | #include "llvm/CodeGen/MachineInstr.h" | |||
19 | #include "llvm/CodeGen/MachineOperand.h" | |||
20 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
21 | #include "llvm/CodeGen/RDFGraph.h" | |||
22 | #include "llvm/CodeGen/RDFRegisters.h" | |||
23 | #include "llvm/CodeGen/TargetInstrInfo.h" | |||
24 | #include "llvm/CodeGen/TargetLowering.h" | |||
25 | #include "llvm/CodeGen/TargetRegisterInfo.h" | |||
26 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | |||
27 | #include "llvm/IR/Function.h" | |||
28 | #include "llvm/MC/LaneBitmask.h" | |||
29 | #include "llvm/MC/MCInstrDesc.h" | |||
30 | #include "llvm/MC/MCRegisterInfo.h" | |||
31 | #include "llvm/Support/Debug.h" | |||
32 | #include "llvm/Support/ErrorHandling.h" | |||
33 | #include "llvm/Support/raw_ostream.h" | |||
34 | #include <algorithm> | |||
35 | #include <cassert> | |||
36 | #include <cstdint> | |||
37 | #include <cstring> | |||
38 | #include <iterator> | |||
39 | #include <set> | |||
40 | #include <utility> | |||
41 | #include <vector> | |||
42 | ||||
43 | using namespace llvm; | |||
44 | using namespace rdf; | |||
45 | ||||
46 | // Printing functions. Have them here first, so that the rest of the code | |||
47 | // can use them. | |||
48 | namespace llvm { | |||
49 | namespace rdf { | |||
50 | ||||
51 | raw_ostream &operator<< (raw_ostream &OS, const PrintLaneMaskOpt &P) { | |||
52 | if (!P.Mask.all()) | |||
53 | OS << ':' << PrintLaneMask(P.Mask); | |||
54 | return OS; | |||
55 | } | |||
56 | ||||
57 | raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterRef> &P) { | |||
58 | auto &TRI = P.G.getTRI(); | |||
59 | if (P.Obj.Reg > 0 && P.Obj.Reg < TRI.getNumRegs()) | |||
60 | OS << TRI.getName(P.Obj.Reg); | |||
61 | else | |||
62 | OS << '#' << P.Obj.Reg; | |||
63 | OS << PrintLaneMaskOpt(P.Obj.Mask); | |||
64 | return OS; | |||
65 | } | |||
66 | ||||
67 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeId> &P) { | |||
68 | auto NA = P.G.addr<NodeBase*>(P.Obj); | |||
69 | uint16_t Attrs = NA.Addr->getAttrs(); | |||
70 | uint16_t Kind = NodeAttrs::kind(Attrs); | |||
71 | uint16_t Flags = NodeAttrs::flags(Attrs); | |||
72 | switch (NodeAttrs::type(Attrs)) { | |||
73 | case NodeAttrs::Code: | |||
74 | switch (Kind) { | |||
75 | case NodeAttrs::Func: OS << 'f'; break; | |||
76 | case NodeAttrs::Block: OS << 'b'; break; | |||
77 | case NodeAttrs::Stmt: OS << 's'; break; | |||
78 | case NodeAttrs::Phi: OS << 'p'; break; | |||
79 | default: OS << "c?"; break; | |||
80 | } | |||
81 | break; | |||
82 | case NodeAttrs::Ref: | |||
83 | if (Flags & NodeAttrs::Undef) | |||
84 | OS << '/'; | |||
85 | if (Flags & NodeAttrs::Dead) | |||
86 | OS << '\\'; | |||
87 | if (Flags & NodeAttrs::Preserving) | |||
88 | OS << '+'; | |||
89 | if (Flags & NodeAttrs::Clobbering) | |||
90 | OS << '~'; | |||
91 | switch (Kind) { | |||
92 | case NodeAttrs::Use: OS << 'u'; break; | |||
93 | case NodeAttrs::Def: OS << 'd'; break; | |||
94 | case NodeAttrs::Block: OS << 'b'; break; | |||
95 | default: OS << "r?"; break; | |||
96 | } | |||
97 | break; | |||
98 | default: | |||
99 | OS << '?'; | |||
100 | break; | |||
101 | } | |||
102 | OS << P.Obj; | |||
103 | if (Flags & NodeAttrs::Shadow) | |||
104 | OS << '"'; | |||
105 | return OS; | |||
106 | } | |||
107 | ||||
108 | static void printRefHeader(raw_ostream &OS, const NodeAddr<RefNode*> RA, | |||
109 | const DataFlowGraph &G) { | |||
110 | OS << Print<NodeId>(RA.Id, G) << '<' | |||
111 | << Print<RegisterRef>(RA.Addr->getRegRef(G), G) << '>'; | |||
112 | if (RA.Addr->getFlags() & NodeAttrs::Fixed) | |||
113 | OS << '!'; | |||
114 | } | |||
115 | ||||
116 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<DefNode*>> &P) { | |||
117 | printRefHeader(OS, P.Obj, P.G); | |||
118 | OS << '('; | |||
119 | if (NodeId N = P.Obj.Addr->getReachingDef()) | |||
120 | OS << Print<NodeId>(N, P.G); | |||
121 | OS << ','; | |||
122 | if (NodeId N = P.Obj.Addr->getReachedDef()) | |||
123 | OS << Print<NodeId>(N, P.G); | |||
124 | OS << ','; | |||
125 | if (NodeId N = P.Obj.Addr->getReachedUse()) | |||
126 | OS << Print<NodeId>(N, P.G); | |||
127 | OS << "):"; | |||
128 | if (NodeId N = P.Obj.Addr->getSibling()) | |||
129 | OS << Print<NodeId>(N, P.G); | |||
130 | return OS; | |||
131 | } | |||
132 | ||||
133 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<UseNode*>> &P) { | |||
134 | printRefHeader(OS, P.Obj, P.G); | |||
135 | OS << '('; | |||
136 | if (NodeId N = P.Obj.Addr->getReachingDef()) | |||
137 | OS << Print<NodeId>(N, P.G); | |||
138 | OS << "):"; | |||
139 | if (NodeId N = P.Obj.Addr->getSibling()) | |||
140 | OS << Print<NodeId>(N, P.G); | |||
141 | return OS; | |||
142 | } | |||
143 | ||||
144 | raw_ostream &operator<< (raw_ostream &OS, | |||
145 | const Print<NodeAddr<PhiUseNode*>> &P) { | |||
146 | printRefHeader(OS, P.Obj, P.G); | |||
147 | OS << '('; | |||
148 | if (NodeId N = P.Obj.Addr->getReachingDef()) | |||
149 | OS << Print<NodeId>(N, P.G); | |||
150 | OS << ','; | |||
151 | if (NodeId N = P.Obj.Addr->getPredecessor()) | |||
152 | OS << Print<NodeId>(N, P.G); | |||
153 | OS << "):"; | |||
154 | if (NodeId N = P.Obj.Addr->getSibling()) | |||
155 | OS << Print<NodeId>(N, P.G); | |||
156 | return OS; | |||
157 | } | |||
158 | ||||
159 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<RefNode*>> &P) { | |||
160 | switch (P.Obj.Addr->getKind()) { | |||
161 | case NodeAttrs::Def: | |||
162 | OS << PrintNode<DefNode*>(P.Obj, P.G); | |||
163 | break; | |||
164 | case NodeAttrs::Use: | |||
165 | if (P.Obj.Addr->getFlags() & NodeAttrs::PhiRef) | |||
166 | OS << PrintNode<PhiUseNode*>(P.Obj, P.G); | |||
167 | else | |||
168 | OS << PrintNode<UseNode*>(P.Obj, P.G); | |||
169 | break; | |||
170 | } | |||
171 | return OS; | |||
172 | } | |||
173 | ||||
174 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeList> &P) { | |||
175 | unsigned N = P.Obj.size(); | |||
176 | for (auto I : P.Obj) { | |||
177 | OS << Print<NodeId>(I.Id, P.G); | |||
178 | if (--N) | |||
179 | OS << ' '; | |||
180 | } | |||
181 | return OS; | |||
182 | } | |||
183 | ||||
184 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeSet> &P) { | |||
185 | unsigned N = P.Obj.size(); | |||
186 | for (auto I : P.Obj) { | |||
187 | OS << Print<NodeId>(I, P.G); | |||
188 | if (--N) | |||
189 | OS << ' '; | |||
190 | } | |||
191 | return OS; | |||
192 | } | |||
193 | ||||
194 | namespace { | |||
195 | ||||
196 | template <typename T> | |||
197 | struct PrintListV { | |||
198 | PrintListV(const NodeList &L, const DataFlowGraph &G) : List(L), G(G) {} | |||
199 | ||||
200 | using Type = T; | |||
201 | const NodeList &List; | |||
202 | const DataFlowGraph &G; | |||
203 | }; | |||
204 | ||||
205 | template <typename T> | |||
206 | raw_ostream &operator<< (raw_ostream &OS, const PrintListV<T> &P) { | |||
207 | unsigned N = P.List.size(); | |||
208 | for (NodeAddr<T> A : P.List) { | |||
209 | OS << PrintNode<T>(A, P.G); | |||
210 | if (--N) | |||
211 | OS << ", "; | |||
212 | } | |||
213 | return OS; | |||
214 | } | |||
215 | ||||
216 | } // end anonymous namespace | |||
217 | ||||
218 | raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<PhiNode*>> &P) { | |||
219 | OS << Print<NodeId>(P.Obj.Id, P.G) << ": phi [" | |||
220 | << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']'; | |||
221 | return OS; | |||
222 | } | |||
223 | ||||
224 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<StmtNode *>> &P) { | |||
225 | const MachineInstr &MI = *P.Obj.Addr->getCode(); | |||
226 | unsigned Opc = MI.getOpcode(); | |||
227 | OS << Print<NodeId>(P.Obj.Id, P.G) << ": " << P.G.getTII().getName(Opc); | |||
228 | // Print the target for calls and branches (for readability). | |||
229 | if (MI.isCall() || MI.isBranch()) { | |||
230 | MachineInstr::const_mop_iterator T = | |||
231 | llvm::find_if(MI.operands(), | |||
232 | [] (const MachineOperand &Op) -> bool { | |||
233 | return Op.isMBB() || Op.isGlobal() || Op.isSymbol(); | |||
234 | }); | |||
235 | if (T != MI.operands_end()) { | |||
236 | OS << ' '; | |||
237 | if (T->isMBB()) | |||
238 | OS << printMBBReference(*T->getMBB()); | |||
239 | else if (T->isGlobal()) | |||
240 | OS << T->getGlobal()->getName(); | |||
241 | else if (T->isSymbol()) | |||
242 | OS << T->getSymbolName(); | |||
243 | } | |||
244 | } | |||
245 | OS << " [" << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']'; | |||
246 | return OS; | |||
247 | } | |||
248 | ||||
249 | raw_ostream &operator<< (raw_ostream &OS, | |||
250 | const Print<NodeAddr<InstrNode*>> &P) { | |||
251 | switch (P.Obj.Addr->getKind()) { | |||
252 | case NodeAttrs::Phi: | |||
253 | OS << PrintNode<PhiNode*>(P.Obj, P.G); | |||
254 | break; | |||
255 | case NodeAttrs::Stmt: | |||
256 | OS << PrintNode<StmtNode*>(P.Obj, P.G); | |||
257 | break; | |||
258 | default: | |||
259 | OS << "instr? " << Print<NodeId>(P.Obj.Id, P.G); | |||
260 | break; | |||
261 | } | |||
262 | return OS; | |||
263 | } | |||
264 | ||||
265 | raw_ostream &operator<< (raw_ostream &OS, | |||
266 | const Print<NodeAddr<BlockNode*>> &P) { | |||
267 | MachineBasicBlock *BB = P.Obj.Addr->getCode(); | |||
268 | unsigned NP = BB->pred_size(); | |||
269 | std::vector<int> Ns; | |||
270 | auto PrintBBs = [&OS] (std::vector<int> Ns) -> void { | |||
271 | unsigned N = Ns.size(); | |||
272 | for (int I : Ns) { | |||
273 | OS << "%bb." << I; | |||
274 | if (--N) | |||
275 | OS << ", "; | |||
276 | } | |||
277 | }; | |||
278 | ||||
279 | OS << Print<NodeId>(P.Obj.Id, P.G) << ": --- " << printMBBReference(*BB) | |||
280 | << " --- preds(" << NP << "): "; | |||
281 | for (MachineBasicBlock *B : BB->predecessors()) | |||
282 | Ns.push_back(B->getNumber()); | |||
283 | PrintBBs(Ns); | |||
284 | ||||
285 | unsigned NS = BB->succ_size(); | |||
286 | OS << " succs(" << NS << "): "; | |||
287 | Ns.clear(); | |||
288 | for (MachineBasicBlock *B : BB->successors()) | |||
289 | Ns.push_back(B->getNumber()); | |||
290 | PrintBBs(Ns); | |||
291 | OS << '\n'; | |||
292 | ||||
293 | for (auto I : P.Obj.Addr->members(P.G)) | |||
294 | OS << PrintNode<InstrNode*>(I, P.G) << '\n'; | |||
295 | return OS; | |||
296 | } | |||
297 | ||||
298 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<FuncNode *>> &P) { | |||
299 | OS << "DFG dump:[\n" << Print<NodeId>(P.Obj.Id, P.G) << ": Function: " | |||
300 | << P.Obj.Addr->getCode()->getName() << '\n'; | |||
301 | for (auto I : P.Obj.Addr->members(P.G)) | |||
302 | OS << PrintNode<BlockNode*>(I, P.G) << '\n'; | |||
303 | OS << "]\n"; | |||
304 | return OS; | |||
305 | } | |||
306 | ||||
307 | raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterSet> &P) { | |||
308 | OS << '{'; | |||
309 | for (auto I : P.Obj) | |||
310 | OS << ' ' << Print<RegisterRef>(I, P.G); | |||
311 | OS << " }"; | |||
312 | return OS; | |||
313 | } | |||
314 | ||||
315 | raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterAggr> &P) { | |||
316 | P.Obj.print(OS); | |||
317 | return OS; | |||
318 | } | |||
319 | ||||
320 | raw_ostream &operator<< (raw_ostream &OS, | |||
321 | const Print<DataFlowGraph::DefStack> &P) { | |||
322 | for (auto I = P.Obj.top(), E = P.Obj.bottom(); I != E; ) { | |||
323 | OS << Print<NodeId>(I->Id, P.G) | |||
324 | << '<' << Print<RegisterRef>(I->Addr->getRegRef(P.G), P.G) << '>'; | |||
325 | I.down(); | |||
326 | if (I != E) | |||
327 | OS << ' '; | |||
328 | } | |||
329 | return OS; | |||
330 | } | |||
331 | ||||
332 | } // end namespace rdf | |||
333 | } // end namespace llvm | |||
334 | ||||
335 | // Node allocation functions. | |||
336 | // | |||
337 | // Node allocator is like a slab memory allocator: it allocates blocks of | |||
338 | // memory in sizes that are multiples of the size of a node. Each block has | |||
339 | // the same size. Nodes are allocated from the currently active block, and | |||
340 | // when it becomes full, a new one is created. | |||
341 | // There is a mapping scheme between node id and its location in a block, | |||
342 | // and within that block is described in the header file. | |||
343 | // | |||
344 | void NodeAllocator::startNewBlock() { | |||
345 | void *T = MemPool.Allocate(NodesPerBlock*NodeMemSize, NodeMemSize); | |||
346 | char *P = static_cast<char*>(T); | |||
347 | Blocks.push_back(P); | |||
348 | // Check if the block index is still within the allowed range, i.e. less | |||
349 | // than 2^N, where N is the number of bits in NodeId for the block index. | |||
350 | // BitsPerIndex is the number of bits per node index. | |||
351 | assert((Blocks.size() < ((size_t)1 << (8*sizeof(NodeId)-BitsPerIndex))) &&((void)0) | |||
352 | "Out of bits for block index")((void)0); | |||
353 | ActiveEnd = P; | |||
354 | } | |||
355 | ||||
356 | bool NodeAllocator::needNewBlock() { | |||
357 | if (Blocks.empty()) | |||
358 | return true; | |||
359 | ||||
360 | char *ActiveBegin = Blocks.back(); | |||
361 | uint32_t Index = (ActiveEnd-ActiveBegin)/NodeMemSize; | |||
362 | return Index >= NodesPerBlock; | |||
363 | } | |||
364 | ||||
365 | NodeAddr<NodeBase*> NodeAllocator::New() { | |||
366 | if (needNewBlock()) | |||
367 | startNewBlock(); | |||
368 | ||||
369 | uint32_t ActiveB = Blocks.size()-1; | |||
370 | uint32_t Index = (ActiveEnd - Blocks[ActiveB])/NodeMemSize; | |||
371 | NodeAddr<NodeBase*> NA = { reinterpret_cast<NodeBase*>(ActiveEnd), | |||
372 | makeId(ActiveB, Index) }; | |||
373 | ActiveEnd += NodeMemSize; | |||
374 | return NA; | |||
375 | } | |||
376 | ||||
377 | NodeId NodeAllocator::id(const NodeBase *P) const { | |||
378 | uintptr_t A = reinterpret_cast<uintptr_t>(P); | |||
379 | for (unsigned i = 0, n = Blocks.size(); i != n; ++i) { | |||
380 | uintptr_t B = reinterpret_cast<uintptr_t>(Blocks[i]); | |||
381 | if (A < B || A >= B + NodesPerBlock*NodeMemSize) | |||
382 | continue; | |||
383 | uint32_t Idx = (A-B)/NodeMemSize; | |||
384 | return makeId(i, Idx); | |||
385 | } | |||
386 | llvm_unreachable("Invalid node address")__builtin_unreachable(); | |||
387 | } | |||
388 | ||||
389 | void NodeAllocator::clear() { | |||
390 | MemPool.Reset(); | |||
391 | Blocks.clear(); | |||
392 | ActiveEnd = nullptr; | |||
393 | } | |||
394 | ||||
395 | // Insert node NA after "this" in the circular chain. | |||
396 | void NodeBase::append(NodeAddr<NodeBase*> NA) { | |||
397 | NodeId Nx = Next; | |||
398 | // If NA is already "next", do nothing. | |||
399 | if (Next != NA.Id) { | |||
400 | Next = NA.Id; | |||
401 | NA.Addr->Next = Nx; | |||
402 | } | |||
403 | } | |||
404 | ||||
405 | // Fundamental node manipulator functions. | |||
406 | ||||
407 | // Obtain the register reference from a reference node. | |||
408 | RegisterRef RefNode::getRegRef(const DataFlowGraph &G) const { | |||
409 | assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref)((void)0); | |||
410 | if (NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef) | |||
411 | return G.unpack(Ref.PR); | |||
412 | assert(Ref.Op != nullptr)((void)0); | |||
413 | return G.makeRegRef(*Ref.Op); | |||
414 | } | |||
415 | ||||
416 | // Set the register reference in the reference node directly (for references | |||
417 | // in phi nodes). | |||
418 | void RefNode::setRegRef(RegisterRef RR, DataFlowGraph &G) { | |||
419 | assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref)((void)0); | |||
420 | assert(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef)((void)0); | |||
421 | Ref.PR = G.pack(RR); | |||
422 | } | |||
423 | ||||
424 | // Set the register reference in the reference node based on a machine | |||
425 | // operand (for references in statement nodes). | |||
426 | void RefNode::setRegRef(MachineOperand *Op, DataFlowGraph &G) { | |||
427 | assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref)((void)0); | |||
428 | assert(!(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef))((void)0); | |||
429 | (void)G; | |||
430 | Ref.Op = Op; | |||
431 | } | |||
432 | ||||
433 | // Get the owner of a given reference node. | |||
434 | NodeAddr<NodeBase*> RefNode::getOwner(const DataFlowGraph &G) { | |||
435 | NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext()); | |||
436 | ||||
437 | while (NA.Addr != this) { | |||
438 | if (NA.Addr->getType() == NodeAttrs::Code) | |||
439 | return NA; | |||
440 | NA = G.addr<NodeBase*>(NA.Addr->getNext()); | |||
441 | } | |||
442 | llvm_unreachable("No owner in circular list")__builtin_unreachable(); | |||
443 | } | |||
444 | ||||
445 | // Connect the def node to the reaching def node. | |||
446 | void DefNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) { | |||
447 | Ref.RD = DA.Id; | |||
448 | Ref.Sib = DA.Addr->getReachedDef(); | |||
449 | DA.Addr->setReachedDef(Self); | |||
450 | } | |||
451 | ||||
452 | // Connect the use node to the reaching def node. | |||
453 | void UseNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) { | |||
454 | Ref.RD = DA.Id; | |||
455 | Ref.Sib = DA.Addr->getReachedUse(); | |||
456 | DA.Addr->setReachedUse(Self); | |||
457 | } | |||
458 | ||||
459 | // Get the first member of the code node. | |||
460 | NodeAddr<NodeBase*> CodeNode::getFirstMember(const DataFlowGraph &G) const { | |||
461 | if (Code.FirstM == 0) | |||
462 | return NodeAddr<NodeBase*>(); | |||
463 | return G.addr<NodeBase*>(Code.FirstM); | |||
464 | } | |||
465 | ||||
466 | // Get the last member of the code node. | |||
467 | NodeAddr<NodeBase*> CodeNode::getLastMember(const DataFlowGraph &G) const { | |||
468 | if (Code.LastM == 0) | |||
469 | return NodeAddr<NodeBase*>(); | |||
470 | return G.addr<NodeBase*>(Code.LastM); | |||
471 | } | |||
472 | ||||
473 | // Add node NA at the end of the member list of the given code node. | |||
474 | void CodeNode::addMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) { | |||
475 | NodeAddr<NodeBase*> ML = getLastMember(G); | |||
476 | if (ML.Id != 0) { | |||
477 | ML.Addr->append(NA); | |||
478 | } else { | |||
479 | Code.FirstM = NA.Id; | |||
480 | NodeId Self = G.id(this); | |||
481 | NA.Addr->setNext(Self); | |||
482 | } | |||
483 | Code.LastM = NA.Id; | |||
484 | } | |||
485 | ||||
486 | // Add node NA after member node MA in the given code node. | |||
487 | void CodeNode::addMemberAfter(NodeAddr<NodeBase*> MA, NodeAddr<NodeBase*> NA, | |||
488 | const DataFlowGraph &G) { | |||
489 | MA.Addr->append(NA); | |||
490 | if (Code.LastM == MA.Id) | |||
491 | Code.LastM = NA.Id; | |||
492 | } | |||
493 | ||||
494 | // Remove member node NA from the given code node. | |||
495 | void CodeNode::removeMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) { | |||
496 | NodeAddr<NodeBase*> MA = getFirstMember(G); | |||
497 | assert(MA.Id != 0)((void)0); | |||
498 | ||||
499 | // Special handling if the member to remove is the first member. | |||
500 | if (MA.Id == NA.Id) { | |||
501 | if (Code.LastM == MA.Id) { | |||
502 | // If it is the only member, set both first and last to 0. | |||
503 | Code.FirstM = Code.LastM = 0; | |||
504 | } else { | |||
505 | // Otherwise, advance the first member. | |||
506 | Code.FirstM = MA.Addr->getNext(); | |||
507 | } | |||
508 | return; | |||
509 | } | |||
510 | ||||
511 | while (MA.Addr != this) { | |||
512 | NodeId MX = MA.Addr->getNext(); | |||
513 | if (MX == NA.Id) { | |||
514 | MA.Addr->setNext(NA.Addr->getNext()); | |||
515 | // If the member to remove happens to be the last one, update the | |||
516 | // LastM indicator. | |||
517 | if (Code.LastM == NA.Id) | |||
518 | Code.LastM = MA.Id; | |||
519 | return; | |||
520 | } | |||
521 | MA = G.addr<NodeBase*>(MX); | |||
522 | } | |||
523 | llvm_unreachable("No such member")__builtin_unreachable(); | |||
524 | } | |||
525 | ||||
526 | // Return the list of all members of the code node. | |||
527 | NodeList CodeNode::members(const DataFlowGraph &G) const { | |||
528 | static auto True = [] (NodeAddr<NodeBase*>) -> bool { return true; }; | |||
529 | return members_if(True, G); | |||
530 | } | |||
531 | ||||
532 | // Return the owner of the given instr node. | |||
533 | NodeAddr<NodeBase*> InstrNode::getOwner(const DataFlowGraph &G) { | |||
534 | NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext()); | |||
535 | ||||
536 | while (NA.Addr != this) { | |||
537 | assert(NA.Addr->getType() == NodeAttrs::Code)((void)0); | |||
538 | if (NA.Addr->getKind() == NodeAttrs::Block) | |||
539 | return NA; | |||
540 | NA = G.addr<NodeBase*>(NA.Addr->getNext()); | |||
541 | } | |||
542 | llvm_unreachable("No owner in circular list")__builtin_unreachable(); | |||
543 | } | |||
544 | ||||
545 | // Add the phi node PA to the given block node. | |||
546 | void BlockNode::addPhi(NodeAddr<PhiNode*> PA, const DataFlowGraph &G) { | |||
547 | NodeAddr<NodeBase*> M = getFirstMember(G); | |||
548 | if (M.Id == 0) { | |||
549 | addMember(PA, G); | |||
550 | return; | |||
551 | } | |||
552 | ||||
553 | assert(M.Addr->getType() == NodeAttrs::Code)((void)0); | |||
554 | if (M.Addr->getKind() == NodeAttrs::Stmt) { | |||
555 | // If the first member of the block is a statement, insert the phi as | |||
556 | // the first member. | |||
557 | Code.FirstM = PA.Id; | |||
558 | PA.Addr->setNext(M.Id); | |||
559 | } else { | |||
560 | // If the first member is a phi, find the last phi, and append PA to it. | |||
561 | assert(M.Addr->getKind() == NodeAttrs::Phi)((void)0); | |||
562 | NodeAddr<NodeBase*> MN = M; | |||
563 | do { | |||
564 | M = MN; | |||
565 | MN = G.addr<NodeBase*>(M.Addr->getNext()); | |||
566 | assert(MN.Addr->getType() == NodeAttrs::Code)((void)0); | |||
567 | } while (MN.Addr->getKind() == NodeAttrs::Phi); | |||
568 | ||||
569 | // M is the last phi. | |||
570 | addMemberAfter(M, PA, G); | |||
571 | } | |||
572 | } | |||
573 | ||||
574 | // Find the block node corresponding to the machine basic block BB in the | |||
575 | // given func node. | |||
576 | NodeAddr<BlockNode*> FuncNode::findBlock(const MachineBasicBlock *BB, | |||
577 | const DataFlowGraph &G) const { | |||
578 | auto EqBB = [BB] (NodeAddr<NodeBase*> NA) -> bool { | |||
579 | return NodeAddr<BlockNode*>(NA).Addr->getCode() == BB; | |||
580 | }; | |||
581 | NodeList Ms = members_if(EqBB, G); | |||
582 | if (!Ms.empty()) | |||
583 | return Ms[0]; | |||
584 | return NodeAddr<BlockNode*>(); | |||
585 | } | |||
586 | ||||
587 | // Get the block node for the entry block in the given function. | |||
588 | NodeAddr<BlockNode*> FuncNode::getEntryBlock(const DataFlowGraph &G) { | |||
589 | MachineBasicBlock *EntryB = &getCode()->front(); | |||
590 | return findBlock(EntryB, G); | |||
591 | } | |||
592 | ||||
593 | // Target operand information. | |||
594 | // | |||
595 | ||||
596 | // For a given instruction, check if there are any bits of RR that can remain | |||
597 | // unchanged across this def. | |||
598 | bool TargetOperandInfo::isPreserving(const MachineInstr &In, unsigned OpNum) | |||
599 | const { | |||
600 | return TII.isPredicated(In); | |||
601 | } | |||
602 | ||||
603 | // Check if the definition of RR produces an unspecified value. | |||
604 | bool TargetOperandInfo::isClobbering(const MachineInstr &In, unsigned OpNum) | |||
605 | const { | |||
606 | const MachineOperand &Op = In.getOperand(OpNum); | |||
607 | if (Op.isRegMask()) | |||
608 | return true; | |||
609 | assert(Op.isReg())((void)0); | |||
610 | if (In.isCall()) | |||
611 | if (Op.isDef() && Op.isDead()) | |||
612 | return true; | |||
613 | return false; | |||
614 | } | |||
615 | ||||
616 | // Check if the given instruction specifically requires | |||
617 | bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum) | |||
618 | const { | |||
619 | if (In.isCall() || In.isReturn() || In.isInlineAsm()) | |||
620 | return true; | |||
621 | // Check for a tail call. | |||
622 | if (In.isBranch()) | |||
623 | for (const MachineOperand &O : In.operands()) | |||
624 | if (O.isGlobal() || O.isSymbol()) | |||
625 | return true; | |||
626 | ||||
627 | const MCInstrDesc &D = In.getDesc(); | |||
628 | if (!D.getImplicitDefs() && !D.getImplicitUses()) | |||
629 | return false; | |||
630 | const MachineOperand &Op = In.getOperand(OpNum); | |||
631 | // If there is a sub-register, treat the operand as non-fixed. Currently, | |||
632 | // fixed registers are those that are listed in the descriptor as implicit | |||
633 | // uses or defs, and those lists do not allow sub-registers. | |||
634 | if (Op.getSubReg() != 0) | |||
635 | return false; | |||
636 | Register Reg = Op.getReg(); | |||
637 | const MCPhysReg *ImpR = Op.isDef() ? D.getImplicitDefs() | |||
638 | : D.getImplicitUses(); | |||
639 | if (!ImpR) | |||
640 | return false; | |||
641 | while (*ImpR) | |||
642 | if (*ImpR++ == Reg) | |||
643 | return true; | |||
644 | return false; | |||
645 | } | |||
646 | ||||
647 | // | |||
648 | // The data flow graph construction. | |||
649 | // | |||
650 | ||||
651 | DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii, | |||
652 | const TargetRegisterInfo &tri, const MachineDominatorTree &mdt, | |||
653 | const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi) | |||
654 | : MF(mf), TII(tii), TRI(tri), PRI(tri, mf), MDT(mdt), MDF(mdf), TOI(toi), | |||
655 | LiveIns(PRI) { | |||
656 | } | |||
657 | ||||
658 | // The implementation of the definition stack. | |||
659 | // Each register reference has its own definition stack. In particular, | |||
660 | // for a register references "Reg" and "Reg:subreg" will each have their | |||
661 | // own definition stacks. | |||
662 | ||||
663 | // Construct a stack iterator. | |||
664 | DataFlowGraph::DefStack::Iterator::Iterator(const DataFlowGraph::DefStack &S, | |||
665 | bool Top) : DS(S) { | |||
666 | if (!Top) { | |||
667 | // Initialize to bottom. | |||
668 | Pos = 0; | |||
669 | return; | |||
670 | } | |||
671 | // Initialize to the top, i.e. top-most non-delimiter (or 0, if empty). | |||
672 | Pos = DS.Stack.size(); | |||
673 | while (Pos > 0 && DS.isDelimiter(DS.Stack[Pos-1])) | |||
674 | Pos--; | |||
675 | } | |||
676 | ||||
677 | // Return the size of the stack, including block delimiters. | |||
678 | unsigned DataFlowGraph::DefStack::size() const { | |||
679 | unsigned S = 0; | |||
680 | for (auto I = top(), E = bottom(); I != E; I.down()) | |||
681 | S++; | |||
682 | return S; | |||
683 | } | |||
684 | ||||
685 | // Remove the top entry from the stack. Remove all intervening delimiters | |||
686 | // so that after this, the stack is either empty, or the top of the stack | |||
687 | // is a non-delimiter. | |||
688 | void DataFlowGraph::DefStack::pop() { | |||
689 | assert(!empty())((void)0); | |||
690 | unsigned P = nextDown(Stack.size()); | |||
691 | Stack.resize(P); | |||
692 | } | |||
693 | ||||
694 | // Push a delimiter for block node N on the stack. | |||
695 | void DataFlowGraph::DefStack::start_block(NodeId N) { | |||
696 | assert(N != 0)((void)0); | |||
697 | Stack.push_back(NodeAddr<DefNode*>(nullptr, N)); | |||
698 | } | |||
699 | ||||
700 | // Remove all nodes from the top of the stack, until the delimited for | |||
701 | // block node N is encountered. Remove the delimiter as well. In effect, | |||
702 | // this will remove from the stack all definitions from block N. | |||
703 | void DataFlowGraph::DefStack::clear_block(NodeId N) { | |||
704 | assert(N != 0)((void)0); | |||
705 | unsigned P = Stack.size(); | |||
706 | while (P > 0) { | |||
707 | bool Found = isDelimiter(Stack[P-1], N); | |||
708 | P--; | |||
709 | if (Found) | |||
710 | break; | |||
711 | } | |||
712 | // This will also remove the delimiter, if found. | |||
713 | Stack.resize(P); | |||
714 | } | |||
715 | ||||
716 | // Move the stack iterator up by one. | |||
717 | unsigned DataFlowGraph::DefStack::nextUp(unsigned P) const { | |||
718 | // Get the next valid position after P (skipping all delimiters). | |||
719 | // The input position P does not have to point to a non-delimiter. | |||
720 | unsigned SS = Stack.size(); | |||
721 | bool IsDelim; | |||
722 | assert(P < SS)((void)0); | |||
723 | do { | |||
724 | P++; | |||
725 | IsDelim = isDelimiter(Stack[P-1]); | |||
726 | } while (P < SS && IsDelim); | |||
727 | assert(!IsDelim)((void)0); | |||
728 | return P; | |||
729 | } | |||
730 | ||||
731 | // Move the stack iterator down by one. | |||
732 | unsigned DataFlowGraph::DefStack::nextDown(unsigned P) const { | |||
733 | // Get the preceding valid position before P (skipping all delimiters). | |||
734 | // The input position P does not have to point to a non-delimiter. | |||
735 | assert(P > 0 && P <= Stack.size())((void)0); | |||
736 | bool IsDelim = isDelimiter(Stack[P-1]); | |||
737 | do { | |||
738 | if (--P == 0) | |||
739 | break; | |||
740 | IsDelim = isDelimiter(Stack[P-1]); | |||
741 | } while (P > 0 && IsDelim); | |||
742 | assert(!IsDelim)((void)0); | |||
743 | return P; | |||
744 | } | |||
745 | ||||
746 | // Register information. | |||
747 | ||||
748 | RegisterSet DataFlowGraph::getLandingPadLiveIns() const { | |||
749 | RegisterSet LR; | |||
750 | const Function &F = MF.getFunction(); | |||
751 | const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn() | |||
752 | : nullptr; | |||
753 | const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); | |||
754 | if (RegisterId R = TLI.getExceptionPointerRegister(PF)) | |||
755 | LR.insert(RegisterRef(R)); | |||
756 | if (!isFuncletEHPersonality(classifyEHPersonality(PF))) { | |||
757 | if (RegisterId R = TLI.getExceptionSelectorRegister(PF)) | |||
758 | LR.insert(RegisterRef(R)); | |||
759 | } | |||
760 | return LR; | |||
761 | } | |||
762 | ||||
763 | // Node management functions. | |||
764 | ||||
765 | // Get the pointer to the node with the id N. | |||
766 | NodeBase *DataFlowGraph::ptr(NodeId N) const { | |||
767 | if (N == 0) | |||
768 | return nullptr; | |||
769 | return Memory.ptr(N); | |||
770 | } | |||
771 | ||||
772 | // Get the id of the node at the address P. | |||
773 | NodeId DataFlowGraph::id(const NodeBase *P) const { | |||
774 | if (P == nullptr) | |||
775 | return 0; | |||
776 | return Memory.id(P); | |||
777 | } | |||
778 | ||||
779 | // Allocate a new node and set the attributes to Attrs. | |||
780 | NodeAddr<NodeBase*> DataFlowGraph::newNode(uint16_t Attrs) { | |||
781 | NodeAddr<NodeBase*> P = Memory.New(); | |||
782 | P.Addr->init(); | |||
783 | P.Addr->setAttrs(Attrs); | |||
784 | return P; | |||
785 | } | |||
786 | ||||
787 | // Make a copy of the given node B, except for the data-flow links, which | |||
788 | // are set to 0. | |||
789 | NodeAddr<NodeBase*> DataFlowGraph::cloneNode(const NodeAddr<NodeBase*> B) { | |||
790 | NodeAddr<NodeBase*> NA = newNode(0); | |||
791 | memcpy(NA.Addr, B.Addr, sizeof(NodeBase)); | |||
792 | // Ref nodes need to have the data-flow links reset. | |||
793 | if (NA.Addr->getType() == NodeAttrs::Ref) { | |||
794 | NodeAddr<RefNode*> RA = NA; | |||
795 | RA.Addr->setReachingDef(0); | |||
796 | RA.Addr->setSibling(0); | |||
797 | if (NA.Addr->getKind() == NodeAttrs::Def) { | |||
798 | NodeAddr<DefNode*> DA = NA; | |||
799 | DA.Addr->setReachedDef(0); | |||
800 | DA.Addr->setReachedUse(0); | |||
801 | } | |||
802 | } | |||
803 | return NA; | |||
804 | } | |||
805 | ||||
806 | // Allocation routines for specific node types/kinds. | |||
807 | ||||
808 | NodeAddr<UseNode*> DataFlowGraph::newUse(NodeAddr<InstrNode*> Owner, | |||
809 | MachineOperand &Op, uint16_t Flags) { | |||
810 | NodeAddr<UseNode*> UA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags); | |||
811 | UA.Addr->setRegRef(&Op, *this); | |||
812 | return UA; | |||
813 | } | |||
814 | ||||
815 | NodeAddr<PhiUseNode*> DataFlowGraph::newPhiUse(NodeAddr<PhiNode*> Owner, | |||
816 | RegisterRef RR, NodeAddr<BlockNode*> PredB, uint16_t Flags) { | |||
817 | NodeAddr<PhiUseNode*> PUA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags); | |||
818 | assert(Flags & NodeAttrs::PhiRef)((void)0); | |||
819 | PUA.Addr->setRegRef(RR, *this); | |||
820 | PUA.Addr->setPredecessor(PredB.Id); | |||
821 | return PUA; | |||
822 | } | |||
823 | ||||
824 | NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner, | |||
825 | MachineOperand &Op, uint16_t Flags) { | |||
826 | NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags); | |||
827 | DA.Addr->setRegRef(&Op, *this); | |||
828 | return DA; | |||
829 | } | |||
830 | ||||
831 | NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner, | |||
832 | RegisterRef RR, uint16_t Flags) { | |||
833 | NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags); | |||
834 | assert(Flags & NodeAttrs::PhiRef)((void)0); | |||
835 | DA.Addr->setRegRef(RR, *this); | |||
836 | return DA; | |||
837 | } | |||
838 | ||||
839 | NodeAddr<PhiNode*> DataFlowGraph::newPhi(NodeAddr<BlockNode*> Owner) { | |||
840 | NodeAddr<PhiNode*> PA = newNode(NodeAttrs::Code | NodeAttrs::Phi); | |||
841 | Owner.Addr->addPhi(PA, *this); | |||
842 | return PA; | |||
843 | } | |||
844 | ||||
845 | NodeAddr<StmtNode*> DataFlowGraph::newStmt(NodeAddr<BlockNode*> Owner, | |||
846 | MachineInstr *MI) { | |||
847 | NodeAddr<StmtNode*> SA = newNode(NodeAttrs::Code | NodeAttrs::Stmt); | |||
848 | SA.Addr->setCode(MI); | |||
849 | Owner.Addr->addMember(SA, *this); | |||
850 | return SA; | |||
851 | } | |||
852 | ||||
853 | NodeAddr<BlockNode*> DataFlowGraph::newBlock(NodeAddr<FuncNode*> Owner, | |||
854 | MachineBasicBlock *BB) { | |||
855 | NodeAddr<BlockNode*> BA = newNode(NodeAttrs::Code | NodeAttrs::Block); | |||
856 | BA.Addr->setCode(BB); | |||
857 | Owner.Addr->addMember(BA, *this); | |||
858 | return BA; | |||
859 | } | |||
860 | ||||
861 | NodeAddr<FuncNode*> DataFlowGraph::newFunc(MachineFunction *MF) { | |||
862 | NodeAddr<FuncNode*> FA = newNode(NodeAttrs::Code | NodeAttrs::Func); | |||
863 | FA.Addr->setCode(MF); | |||
864 | return FA; | |||
865 | } | |||
866 | ||||
867 | // Build the data flow graph. | |||
868 | void DataFlowGraph::build(unsigned Options) { | |||
869 | reset(); | |||
870 | Func = newFunc(&MF); | |||
871 | ||||
872 | if (MF.empty()) | |||
| ||||
873 | return; | |||
874 | ||||
875 | for (MachineBasicBlock &B : MF) { | |||
876 | NodeAddr<BlockNode*> BA = newBlock(Func, &B); | |||
877 | BlockNodes.insert(std::make_pair(&B, BA)); | |||
878 | for (MachineInstr &I : B) { | |||
879 | if (I.isDebugInstr()) | |||
880 | continue; | |||
881 | buildStmt(BA, I); | |||
882 | } | |||
883 | } | |||
884 | ||||
885 | NodeAddr<BlockNode*> EA = Func.Addr->getEntryBlock(*this); | |||
886 | NodeList Blocks = Func.Addr->members(*this); | |||
887 | ||||
888 | // Collect information about block references. | |||
889 | RegisterSet AllRefs; | |||
890 | for (NodeAddr<BlockNode*> BA : Blocks) | |||
891 | for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) | |||
892 | for (NodeAddr<RefNode*> RA : IA.Addr->members(*this)) | |||
893 | AllRefs.insert(RA.Addr->getRegRef(*this)); | |||
894 | ||||
895 | // Collect function live-ins and entry block live-ins. | |||
896 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
897 | MachineBasicBlock &EntryB = *EA.Addr->getCode(); | |||
898 | assert(EntryB.pred_empty() && "Function entry block has predecessors")((void)0); | |||
899 | for (std::pair<unsigned,unsigned> P : MRI.liveins()) | |||
900 | LiveIns.insert(RegisterRef(P.first)); | |||
901 | if (MRI.tracksLiveness()) { | |||
902 | for (auto I : EntryB.liveins()) | |||
903 | LiveIns.insert(RegisterRef(I.PhysReg, I.LaneMask)); | |||
904 | } | |||
905 | ||||
906 | // Add function-entry phi nodes for the live-in registers. | |||
907 | //for (std::pair<RegisterId,LaneBitmask> P : LiveIns) { | |||
908 | for (auto I = LiveIns.rr_begin(), E = LiveIns.rr_end(); I != E; ++I) { | |||
909 | RegisterRef RR = *I; | |||
910 | NodeAddr<PhiNode*> PA = newPhi(EA); | |||
911 | uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; | |||
912 | NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); | |||
913 | PA.Addr->addMember(DA, *this); | |||
914 | } | |||
915 | ||||
916 | // Add phis for landing pads. | |||
917 | // Landing pads, unlike usual backs blocks, are not entered through | |||
918 | // branches in the program, or fall-throughs from other blocks. They | |||
919 | // are entered from the exception handling runtime and target's ABI | |||
920 | // may define certain registers as defined on entry to such a block. | |||
921 | RegisterSet EHRegs = getLandingPadLiveIns(); | |||
922 | if (!EHRegs.empty()) { | |||
923 | for (NodeAddr<BlockNode*> BA : Blocks) { | |||
924 | const MachineBasicBlock &B = *BA.Addr->getCode(); | |||
925 | if (!B.isEHPad()) | |||
926 | continue; | |||
927 | ||||
928 | // Prepare a list of NodeIds of the block's predecessors. | |||
929 | NodeList Preds; | |||
930 | for (MachineBasicBlock *PB : B.predecessors()) | |||
931 | Preds.push_back(findBlock(PB)); | |||
932 | ||||
933 | // Build phi nodes for each live-in. | |||
934 | for (RegisterRef RR : EHRegs) { | |||
935 | NodeAddr<PhiNode*> PA = newPhi(BA); | |||
936 | uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; | |||
937 | // Add def: | |||
938 | NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); | |||
939 | PA.Addr->addMember(DA, *this); | |||
940 | // Add uses (no reaching defs for phi uses): | |||
941 | for (NodeAddr<BlockNode*> PBA : Preds) { | |||
942 | NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA); | |||
943 | PA.Addr->addMember(PUA, *this); | |||
944 | } | |||
945 | } | |||
946 | } | |||
947 | } | |||
948 | ||||
949 | // Build a map "PhiM" which will contain, for each block, the set | |||
950 | // of references that will require phi definitions in that block. | |||
951 | BlockRefsMap PhiM; | |||
952 | for (NodeAddr<BlockNode*> BA : Blocks) | |||
953 | recordDefsForDF(PhiM, BA); | |||
954 | for (NodeAddr<BlockNode*> BA : Blocks) | |||
955 | buildPhis(PhiM, AllRefs, BA); | |||
956 | ||||
957 | // Link all the refs. This will recursively traverse the dominator tree. | |||
958 | DefStackMap DM; | |||
959 | linkBlockRefs(DM, EA); | |||
960 | ||||
961 | // Finally, remove all unused phi nodes. | |||
962 | if (!(Options & BuildOptions::KeepDeadPhis)) | |||
963 | removeUnusedPhis(); | |||
964 | } | |||
965 | ||||
966 | RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const { | |||
967 | assert(PhysicalRegisterInfo::isRegMaskId(Reg) ||((void)0) | |||
968 | Register::isPhysicalRegister(Reg))((void)0); | |||
969 | assert(Reg != 0)((void)0); | |||
970 | if (Sub != 0) | |||
971 | Reg = TRI.getSubReg(Reg, Sub); | |||
972 | return RegisterRef(Reg); | |||
973 | } | |||
974 | ||||
975 | RegisterRef DataFlowGraph::makeRegRef(const MachineOperand &Op) const { | |||
976 | assert(Op.isReg() || Op.isRegMask())((void)0); | |||
977 | if (Op.isReg()) | |||
978 | return makeRegRef(Op.getReg(), Op.getSubReg()); | |||
979 | return RegisterRef(PRI.getRegMaskId(Op.getRegMask()), LaneBitmask::getAll()); | |||
980 | } | |||
981 | ||||
982 | RegisterRef DataFlowGraph::restrictRef(RegisterRef AR, RegisterRef BR) const { | |||
983 | if (AR.Reg == BR.Reg) { | |||
984 | LaneBitmask M = AR.Mask & BR.Mask; | |||
985 | return M.any() ? RegisterRef(AR.Reg, M) : RegisterRef(); | |||
986 | } | |||
987 | // This isn't strictly correct, because the overlap may happen in the | |||
988 | // part masked out. | |||
989 | if (PRI.alias(AR, BR)) | |||
990 | return AR; | |||
991 | return RegisterRef(); | |||
992 | } | |||
993 | ||||
994 | // For each stack in the map DefM, push the delimiter for block B on it. | |||
995 | void DataFlowGraph::markBlock(NodeId B, DefStackMap &DefM) { | |||
996 | // Push block delimiters. | |||
997 | for (auto &P : DefM) | |||
998 | P.second.start_block(B); | |||
999 | } | |||
1000 | ||||
1001 | // Remove all definitions coming from block B from each stack in DefM. | |||
1002 | void DataFlowGraph::releaseBlock(NodeId B, DefStackMap &DefM) { | |||
1003 | // Pop all defs from this block from the definition stack. Defs that were | |||
1004 | // added to the map during the traversal of instructions will not have a | |||
1005 | // delimiter, but for those, the whole stack will be emptied. | |||
1006 | for (auto &P : DefM) | |||
1007 | P.second.clear_block(B); | |||
1008 | ||||
1009 | // Finally, remove empty stacks from the map. | |||
1010 | for (auto I = DefM.begin(), E = DefM.end(), NextI = I; I != E; I = NextI) { | |||
1011 | NextI = std::next(I); | |||
1012 | // This preserves the validity of iterators other than I. | |||
1013 | if (I->second.empty()) | |||
1014 | DefM.erase(I); | |||
1015 | } | |||
1016 | } | |||
1017 | ||||
1018 | // Push all definitions from the instruction node IA to an appropriate | |||
1019 | // stack in DefM. | |||
1020 | void DataFlowGraph::pushAllDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { | |||
1021 | pushClobbers(IA, DefM); | |||
1022 | pushDefs(IA, DefM); | |||
1023 | } | |||
1024 | ||||
1025 | // Push all definitions from the instruction node IA to an appropriate | |||
1026 | // stack in DefM. | |||
1027 | void DataFlowGraph::pushClobbers(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { | |||
1028 | NodeSet Visited; | |||
1029 | std::set<RegisterId> Defined; | |||
1030 | ||||
1031 | // The important objectives of this function are: | |||
1032 | // - to be able to handle instructions both while the graph is being | |||
1033 | // constructed, and after the graph has been constructed, and | |||
1034 | // - maintain proper ordering of definitions on the stack for each | |||
1035 | // register reference: | |||
1036 | // - if there are two or more related defs in IA (i.e. coming from | |||
1037 | // the same machine operand), then only push one def on the stack, | |||
1038 | // - if there are multiple unrelated defs of non-overlapping | |||
1039 | // subregisters of S, then the stack for S will have both (in an | |||
1040 | // unspecified order), but the order does not matter from the data- | |||
1041 | // -flow perspective. | |||
1042 | ||||
1043 | for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) { | |||
1044 | if (Visited.count(DA.Id)) | |||
1045 | continue; | |||
1046 | if (!(DA.Addr->getFlags() & NodeAttrs::Clobbering)) | |||
1047 | continue; | |||
1048 | ||||
1049 | NodeList Rel = getRelatedRefs(IA, DA); | |||
1050 | NodeAddr<DefNode*> PDA = Rel.front(); | |||
1051 | RegisterRef RR = PDA.Addr->getRegRef(*this); | |||
1052 | ||||
1053 | // Push the definition on the stack for the register and all aliases. | |||
1054 | // The def stack traversal in linkNodeUp will check the exact aliasing. | |||
1055 | DefM[RR.Reg].push(DA); | |||
1056 | Defined.insert(RR.Reg); | |||
1057 | for (RegisterId A : PRI.getAliasSet(RR.Reg)) { | |||
1058 | // Check that we don't push the same def twice. | |||
1059 | assert(A != RR.Reg)((void)0); | |||
1060 | if (!Defined.count(A)) | |||
1061 | DefM[A].push(DA); | |||
1062 | } | |||
1063 | // Mark all the related defs as visited. | |||
1064 | for (NodeAddr<NodeBase*> T : Rel) | |||
1065 | Visited.insert(T.Id); | |||
1066 | } | |||
1067 | } | |||
1068 | ||||
1069 | // Push all definitions from the instruction node IA to an appropriate | |||
1070 | // stack in DefM. | |||
1071 | void DataFlowGraph::pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { | |||
1072 | NodeSet Visited; | |||
1073 | #ifndef NDEBUG1 | |||
1074 | std::set<RegisterId> Defined; | |||
1075 | #endif | |||
1076 | ||||
1077 | // The important objectives of this function are: | |||
1078 | // - to be able to handle instructions both while the graph is being | |||
1079 | // constructed, and after the graph has been constructed, and | |||
1080 | // - maintain proper ordering of definitions on the stack for each | |||
1081 | // register reference: | |||
1082 | // - if there are two or more related defs in IA (i.e. coming from | |||
1083 | // the same machine operand), then only push one def on the stack, | |||
1084 | // - if there are multiple unrelated defs of non-overlapping | |||
1085 | // subregisters of S, then the stack for S will have both (in an | |||
1086 | // unspecified order), but the order does not matter from the data- | |||
1087 | // -flow perspective. | |||
1088 | ||||
1089 | for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) { | |||
1090 | if (Visited.count(DA.Id)) | |||
1091 | continue; | |||
1092 | if (DA.Addr->getFlags() & NodeAttrs::Clobbering) | |||
1093 | continue; | |||
1094 | ||||
1095 | NodeList Rel = getRelatedRefs(IA, DA); | |||
1096 | NodeAddr<DefNode*> PDA = Rel.front(); | |||
1097 | RegisterRef RR = PDA.Addr->getRegRef(*this); | |||
1098 | #ifndef NDEBUG1 | |||
1099 | // Assert if the register is defined in two or more unrelated defs. | |||
1100 | // This could happen if there are two or more def operands defining it. | |||
1101 | if (!Defined.insert(RR.Reg).second) { | |||
1102 | MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode(); | |||
1103 | dbgs() << "Multiple definitions of register: " | |||
1104 | << Print<RegisterRef>(RR, *this) << " in\n " << *MI << "in " | |||
1105 | << printMBBReference(*MI->getParent()) << '\n'; | |||
1106 | llvm_unreachable(nullptr)__builtin_unreachable(); | |||
1107 | } | |||
1108 | #endif | |||
1109 | // Push the definition on the stack for the register and all aliases. | |||
1110 | // The def stack traversal in linkNodeUp will check the exact aliasing. | |||
1111 | DefM[RR.Reg].push(DA); | |||
1112 | for (RegisterId A : PRI.getAliasSet(RR.Reg)) { | |||
1113 | // Check that we don't push the same def twice. | |||
1114 | assert(A != RR.Reg)((void)0); | |||
1115 | DefM[A].push(DA); | |||
1116 | } | |||
1117 | // Mark all the related defs as visited. | |||
1118 | for (NodeAddr<NodeBase*> T : Rel) | |||
1119 | Visited.insert(T.Id); | |||
1120 | } | |||
1121 | } | |||
1122 | ||||
1123 | // Return the list of all reference nodes related to RA, including RA itself. | |||
1124 | // See "getNextRelated" for the meaning of a "related reference". | |||
1125 | NodeList DataFlowGraph::getRelatedRefs(NodeAddr<InstrNode*> IA, | |||
1126 | NodeAddr<RefNode*> RA) const { | |||
1127 | assert(IA.Id != 0 && RA.Id != 0)((void)0); | |||
1128 | ||||
1129 | NodeList Refs; | |||
1130 | NodeId Start = RA.Id; | |||
1131 | do { | |||
1132 | Refs.push_back(RA); | |||
1133 | RA = getNextRelated(IA, RA); | |||
1134 | } while (RA.Id != 0 && RA.Id != Start); | |||
1135 | return Refs; | |||
1136 | } | |||
1137 | ||||
1138 | // Clear all information in the graph. | |||
1139 | void DataFlowGraph::reset() { | |||
1140 | Memory.clear(); | |||
1141 | BlockNodes.clear(); | |||
1142 | Func = NodeAddr<FuncNode*>(); | |||
1143 | } | |||
1144 | ||||
1145 | // Return the next reference node in the instruction node IA that is related | |||
1146 | // to RA. Conceptually, two reference nodes are related if they refer to the | |||
1147 | // same instance of a register access, but differ in flags or other minor | |||
1148 | // characteristics. Specific examples of related nodes are shadow reference | |||
1149 | // nodes. | |||
1150 | // Return the equivalent of nullptr if there are no more related references. | |||
1151 | NodeAddr<RefNode*> DataFlowGraph::getNextRelated(NodeAddr<InstrNode*> IA, | |||
1152 | NodeAddr<RefNode*> RA) const { | |||
1153 | assert(IA.Id != 0 && RA.Id != 0)((void)0); | |||
1154 | ||||
1155 | auto Related = [this,RA](NodeAddr<RefNode*> TA) -> bool { | |||
1156 | if (TA.Addr->getKind() != RA.Addr->getKind()) | |||
1157 | return false; | |||
1158 | if (TA.Addr->getRegRef(*this) != RA.Addr->getRegRef(*this)) | |||
1159 | return false; | |||
1160 | return true; | |||
1161 | }; | |||
1162 | auto RelatedStmt = [&Related,RA](NodeAddr<RefNode*> TA) -> bool { | |||
1163 | return Related(TA) && | |||
1164 | &RA.Addr->getOp() == &TA.Addr->getOp(); | |||
1165 | }; | |||
1166 | auto RelatedPhi = [&Related,RA](NodeAddr<RefNode*> TA) -> bool { | |||
1167 | if (!Related(TA)) | |||
1168 | return false; | |||
1169 | if (TA.Addr->getKind() != NodeAttrs::Use) | |||
1170 | return true; | |||
1171 | // For phi uses, compare predecessor blocks. | |||
1172 | const NodeAddr<const PhiUseNode*> TUA = TA; | |||
1173 | const NodeAddr<const PhiUseNode*> RUA = RA; | |||
1174 | return TUA.Addr->getPredecessor() == RUA.Addr->getPredecessor(); | |||
1175 | }; | |||
1176 | ||||
1177 | RegisterRef RR = RA.Addr->getRegRef(*this); | |||
1178 | if (IA.Addr->getKind() == NodeAttrs::Stmt) | |||
1179 | return RA.Addr->getNextRef(RR, RelatedStmt, true, *this); | |||
1180 | return RA.Addr->getNextRef(RR, RelatedPhi, true, *this); | |||
1181 | } | |||
1182 | ||||
1183 | // Find the next node related to RA in IA that satisfies condition P. | |||
1184 | // If such a node was found, return a pair where the second element is the | |||
1185 | // located node. If such a node does not exist, return a pair where the | |||
1186 | // first element is the element after which such a node should be inserted, | |||
1187 | // and the second element is a null-address. | |||
1188 | template <typename Predicate> | |||
1189 | std::pair<NodeAddr<RefNode*>,NodeAddr<RefNode*>> | |||
1190 | DataFlowGraph::locateNextRef(NodeAddr<InstrNode*> IA, NodeAddr<RefNode*> RA, | |||
1191 | Predicate P) const { | |||
1192 | assert(IA.Id != 0 && RA.Id != 0)((void)0); | |||
1193 | ||||
1194 | NodeAddr<RefNode*> NA; | |||
1195 | NodeId Start = RA.Id; | |||
1196 | while (true) { | |||
1197 | NA = getNextRelated(IA, RA); | |||
1198 | if (NA.Id == 0 || NA.Id == Start) | |||
1199 | break; | |||
1200 | if (P(NA)) | |||
1201 | break; | |||
1202 | RA = NA; | |||
1203 | } | |||
1204 | ||||
1205 | if (NA.Id != 0 && NA.Id != Start) | |||
1206 | return std::make_pair(RA, NA); | |||
1207 | return std::make_pair(RA, NodeAddr<RefNode*>()); | |||
1208 | } | |||
1209 | ||||
1210 | // Get the next shadow node in IA corresponding to RA, and optionally create | |||
1211 | // such a node if it does not exist. | |||
1212 | NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA, | |||
1213 | NodeAddr<RefNode*> RA, bool Create) { | |||
1214 | assert(IA.Id != 0 && RA.Id != 0)((void)0); | |||
1215 | ||||
1216 | uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow; | |||
1217 | auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool { | |||
1218 | return TA.Addr->getFlags() == Flags; | |||
1219 | }; | |||
1220 | auto Loc = locateNextRef(IA, RA, IsShadow); | |||
1221 | if (Loc.second.Id != 0 || !Create) | |||
1222 | return Loc.second; | |||
1223 | ||||
1224 | // Create a copy of RA and mark is as shadow. | |||
1225 | NodeAddr<RefNode*> NA = cloneNode(RA); | |||
1226 | NA.Addr->setFlags(Flags | NodeAttrs::Shadow); | |||
1227 | IA.Addr->addMemberAfter(Loc.first, NA, *this); | |||
1228 | return NA; | |||
1229 | } | |||
1230 | ||||
1231 | // Get the next shadow node in IA corresponding to RA. Return null-address | |||
1232 | // if such a node does not exist. | |||
1233 | NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA, | |||
1234 | NodeAddr<RefNode*> RA) const { | |||
1235 | assert(IA.Id != 0 && RA.Id != 0)((void)0); | |||
1236 | uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow; | |||
1237 | auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool { | |||
1238 | return TA.Addr->getFlags() == Flags; | |||
1239 | }; | |||
1240 | return locateNextRef(IA, RA, IsShadow).second; | |||
1241 | } | |||
1242 | ||||
1243 | // Create a new statement node in the block node BA that corresponds to | |||
1244 | // the machine instruction MI. | |||
1245 | void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) { | |||
1246 | NodeAddr<StmtNode*> SA = newStmt(BA, &In); | |||
1247 | ||||
1248 | auto isCall = [] (const MachineInstr &In) -> bool { | |||
1249 | if (In.isCall()) | |||
1250 | return true; | |||
1251 | // Is tail call? | |||
1252 | if (In.isBranch()) { | |||
1253 | for (const MachineOperand &Op : In.operands()) | |||
1254 | if (Op.isGlobal() || Op.isSymbol()) | |||
1255 | return true; | |||
1256 | // Assume indirect branches are calls. This is for the purpose of | |||
1257 | // keeping implicit operands, and so it won't hurt on intra-function | |||
1258 | // indirect branches. | |||
1259 | if (In.isIndirectBranch()) | |||
1260 | return true; | |||
1261 | } | |||
1262 | return false; | |||
1263 | }; | |||
1264 | ||||
1265 | auto isDefUndef = [this] (const MachineInstr &In, RegisterRef DR) -> bool { | |||
1266 | // This instruction defines DR. Check if there is a use operand that | |||
1267 | // would make DR live on entry to the instruction. | |||
1268 | for (const MachineOperand &Op : In.operands()) { | |||
1269 | if (!Op.isReg() || Op.getReg() == 0 || !Op.isUse() || Op.isUndef()) | |||
1270 | continue; | |||
1271 | RegisterRef UR = makeRegRef(Op); | |||
1272 | if (PRI.alias(DR, UR)) | |||
1273 | return false; | |||
1274 | } | |||
1275 | return true; | |||
1276 | }; | |||
1277 | ||||
1278 | bool IsCall = isCall(In); | |||
1279 | unsigned NumOps = In.getNumOperands(); | |||
1280 | ||||
1281 | // Avoid duplicate implicit defs. This will not detect cases of implicit | |||
1282 | // defs that define registers that overlap, but it is not clear how to | |||
1283 | // interpret that in the absence of explicit defs. Overlapping explicit | |||
1284 | // defs are likely illegal already. | |||
1285 | BitVector DoneDefs(TRI.getNumRegs()); | |||
1286 | // Process explicit defs first. | |||
1287 | for (unsigned OpN = 0; OpN < NumOps; ++OpN) { | |||
1288 | MachineOperand &Op = In.getOperand(OpN); | |||
1289 | if (!Op.isReg() || !Op.isDef() || Op.isImplicit()) | |||
1290 | continue; | |||
1291 | Register R = Op.getReg(); | |||
1292 | if (!R || !Register::isPhysicalRegister(R)) | |||
1293 | continue; | |||
1294 | uint16_t Flags = NodeAttrs::None; | |||
1295 | if (TOI.isPreserving(In, OpN)) { | |||
1296 | Flags |= NodeAttrs::Preserving; | |||
1297 | // If the def is preserving, check if it is also undefined. | |||
1298 | if (isDefUndef(In, makeRegRef(Op))) | |||
1299 | Flags |= NodeAttrs::Undef; | |||
1300 | } | |||
1301 | if (TOI.isClobbering(In, OpN)) | |||
1302 | Flags |= NodeAttrs::Clobbering; | |||
1303 | if (TOI.isFixedReg(In, OpN)) | |||
1304 | Flags |= NodeAttrs::Fixed; | |||
1305 | if (IsCall && Op.isDead()) | |||
1306 | Flags |= NodeAttrs::Dead; | |||
1307 | NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); | |||
1308 | SA.Addr->addMember(DA, *this); | |||
1309 | assert(!DoneDefs.test(R))((void)0); | |||
1310 | DoneDefs.set(R); | |||
1311 | } | |||
1312 | ||||
1313 | // Process reg-masks (as clobbers). | |||
1314 | BitVector DoneClobbers(TRI.getNumRegs()); | |||
1315 | for (unsigned OpN = 0; OpN < NumOps; ++OpN) { | |||
1316 | MachineOperand &Op = In.getOperand(OpN); | |||
1317 | if (!Op.isRegMask()) | |||
1318 | continue; | |||
1319 | uint16_t Flags = NodeAttrs::Clobbering | NodeAttrs::Fixed | | |||
1320 | NodeAttrs::Dead; | |||
1321 | NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); | |||
1322 | SA.Addr->addMember(DA, *this); | |||
1323 | // Record all clobbered registers in DoneDefs. | |||
1324 | const uint32_t *RM = Op.getRegMask(); | |||
1325 | for (unsigned i = 1, e = TRI.getNumRegs(); i != e; ++i) | |||
1326 | if (!(RM[i/32] & (1u << (i%32)))) | |||
1327 | DoneClobbers.set(i); | |||
1328 | } | |||
1329 | ||||
1330 | // Process implicit defs, skipping those that have already been added | |||
1331 | // as explicit. | |||
1332 | for (unsigned OpN = 0; OpN < NumOps; ++OpN) { | |||
1333 | MachineOperand &Op = In.getOperand(OpN); | |||
1334 | if (!Op.isReg() || !Op.isDef() || !Op.isImplicit()) | |||
1335 | continue; | |||
1336 | Register R = Op.getReg(); | |||
1337 | if (!R || !Register::isPhysicalRegister(R) || DoneDefs.test(R)) | |||
1338 | continue; | |||
1339 | RegisterRef RR = makeRegRef(Op); | |||
1340 | uint16_t Flags = NodeAttrs::None; | |||
1341 | if (TOI.isPreserving(In, OpN)) { | |||
1342 | Flags |= NodeAttrs::Preserving; | |||
1343 | // If the def is preserving, check if it is also undefined. | |||
1344 | if (isDefUndef(In, RR)) | |||
1345 | Flags |= NodeAttrs::Undef; | |||
1346 | } | |||
1347 | if (TOI.isClobbering(In, OpN)) | |||
1348 | Flags |= NodeAttrs::Clobbering; | |||
1349 | if (TOI.isFixedReg(In, OpN)) | |||
1350 | Flags |= NodeAttrs::Fixed; | |||
1351 | if (IsCall && Op.isDead()) { | |||
1352 | if (DoneClobbers.test(R)) | |||
1353 | continue; | |||
1354 | Flags |= NodeAttrs::Dead; | |||
1355 | } | |||
1356 | NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); | |||
1357 | SA.Addr->addMember(DA, *this); | |||
1358 | DoneDefs.set(R); | |||
1359 | } | |||
1360 | ||||
1361 | for (unsigned OpN = 0; OpN < NumOps; ++OpN) { | |||
1362 | MachineOperand &Op = In.getOperand(OpN); | |||
1363 | if (!Op.isReg() || !Op.isUse()) | |||
1364 | continue; | |||
1365 | Register R = Op.getReg(); | |||
1366 | if (!R || !Register::isPhysicalRegister(R)) | |||
1367 | continue; | |||
1368 | uint16_t Flags = NodeAttrs::None; | |||
1369 | if (Op.isUndef()) | |||
1370 | Flags |= NodeAttrs::Undef; | |||
1371 | if (TOI.isFixedReg(In, OpN)) | |||
1372 | Flags |= NodeAttrs::Fixed; | |||
1373 | NodeAddr<UseNode*> UA = newUse(SA, Op, Flags); | |||
1374 | SA.Addr->addMember(UA, *this); | |||
1375 | } | |||
1376 | } | |||
1377 | ||||
1378 | // Scan all defs in the block node BA and record in PhiM the locations of | |||
1379 | // phi nodes corresponding to these defs. | |||
1380 | void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, | |||
1381 | NodeAddr<BlockNode*> BA) { | |||
1382 | // Check all defs from block BA and record them in each block in BA's | |||
1383 | // iterated dominance frontier. This information will later be used to | |||
1384 | // create phi nodes. | |||
1385 | MachineBasicBlock *BB = BA.Addr->getCode(); | |||
1386 | assert(BB)((void)0); | |||
1387 | auto DFLoc = MDF.find(BB); | |||
1388 | if (DFLoc == MDF.end() || DFLoc->second.empty()) | |||
1389 | return; | |||
1390 | ||||
1391 | // Traverse all instructions in the block and collect the set of all | |||
1392 | // defined references. For each reference there will be a phi created | |||
1393 | // in the block's iterated dominance frontier. | |||
1394 | // This is done to make sure that each defined reference gets only one | |||
1395 | // phi node, even if it is defined multiple times. | |||
1396 | RegisterSet Defs; | |||
1397 | for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) | |||
1398 | for (NodeAddr<RefNode*> RA : IA.Addr->members_if(IsDef, *this)) | |||
1399 | Defs.insert(RA.Addr->getRegRef(*this)); | |||
1400 | ||||
1401 | // Calculate the iterated dominance frontier of BB. | |||
1402 | const MachineDominanceFrontier::DomSetType &DF = DFLoc->second; | |||
1403 | SetVector<MachineBasicBlock*> IDF(DF.begin(), DF.end()); | |||
1404 | for (unsigned i = 0; i < IDF.size(); ++i) { | |||
1405 | auto F = MDF.find(IDF[i]); | |||
1406 | if (F != MDF.end()) | |||
1407 | IDF.insert(F->second.begin(), F->second.end()); | |||
1408 | } | |||
1409 | ||||
1410 | // Finally, add the set of defs to each block in the iterated dominance | |||
1411 | // frontier. | |||
1412 | for (auto DB : IDF) { | |||
1413 | NodeAddr<BlockNode*> DBA = findBlock(DB); | |||
1414 | PhiM[DBA.Id].insert(Defs.begin(), Defs.end()); | |||
1415 | } | |||
1416 | } | |||
1417 | ||||
1418 | // Given the locations of phi nodes in the map PhiM, create the phi nodes | |||
1419 | // that are located in the block node BA. | |||
1420 | void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, RegisterSet &AllRefs, | |||
1421 | NodeAddr<BlockNode*> BA) { | |||
1422 | // Check if this blocks has any DF defs, i.e. if there are any defs | |||
1423 | // that this block is in the iterated dominance frontier of. | |||
1424 | auto HasDF = PhiM.find(BA.Id); | |||
1425 | if (HasDF == PhiM.end() || HasDF->second.empty()) | |||
1426 | return; | |||
1427 | ||||
1428 | // First, remove all R in Refs in such that there exists T in Refs | |||
1429 | // such that T covers R. In other words, only leave those refs that | |||
1430 | // are not covered by another ref (i.e. maximal with respect to covering). | |||
1431 | ||||
1432 | auto MaxCoverIn = [this] (RegisterRef RR, RegisterSet &RRs) -> RegisterRef { | |||
1433 | for (RegisterRef I : RRs) | |||
1434 | if (I != RR && RegisterAggr::isCoverOf(I, RR, PRI)) | |||
1435 | RR = I; | |||
1436 | return RR; | |||
1437 | }; | |||
1438 | ||||
1439 | RegisterSet MaxDF; | |||
1440 | for (RegisterRef I : HasDF->second) | |||
1441 | MaxDF.insert(MaxCoverIn(I, HasDF->second)); | |||
1442 | ||||
1443 | std::vector<RegisterRef> MaxRefs; | |||
1444 | for (RegisterRef I : MaxDF) | |||
1445 | MaxRefs.push_back(MaxCoverIn(I, AllRefs)); | |||
1446 | ||||
1447 | // Now, for each R in MaxRefs, get the alias closure of R. If the closure | |||
1448 | // only has R in it, create a phi a def for R. Otherwise, create a phi, | |||
1449 | // and add a def for each S in the closure. | |||
1450 | ||||
1451 | // Sort the refs so that the phis will be created in a deterministic order. | |||
1452 | llvm::sort(MaxRefs); | |||
1453 | // Remove duplicates. | |||
1454 | auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end()); | |||
1455 | MaxRefs.erase(NewEnd, MaxRefs.end()); | |||
1456 | ||||
1457 | auto Aliased = [this,&MaxRefs](RegisterRef RR, | |||
1458 | std::vector<unsigned> &Closure) -> bool { | |||
1459 | for (unsigned I : Closure) | |||
1460 | if (PRI.alias(RR, MaxRefs[I])) | |||
1461 | return true; | |||
1462 | return false; | |||
1463 | }; | |||
1464 | ||||
1465 | // Prepare a list of NodeIds of the block's predecessors. | |||
1466 | NodeList Preds; | |||
1467 | const MachineBasicBlock *MBB = BA.Addr->getCode(); | |||
1468 | for (MachineBasicBlock *PB : MBB->predecessors()) | |||
1469 | Preds.push_back(findBlock(PB)); | |||
1470 | ||||
1471 | while (!MaxRefs.empty()) { | |||
1472 | // Put the first element in the closure, and then add all subsequent | |||
1473 | // elements from MaxRefs to it, if they alias at least one element | |||
1474 | // already in the closure. | |||
1475 | // ClosureIdx: vector of indices in MaxRefs of members of the closure. | |||
1476 | std::vector<unsigned> ClosureIdx = { 0 }; | |||
1477 | for (unsigned i = 1; i != MaxRefs.size(); ++i) | |||
1478 | if (Aliased(MaxRefs[i], ClosureIdx)) | |||
1479 | ClosureIdx.push_back(i); | |||
1480 | ||||
1481 | // Build a phi for the closure. | |||
1482 | unsigned CS = ClosureIdx.size(); | |||
1483 | NodeAddr<PhiNode*> PA = newPhi(BA); | |||
1484 | ||||
1485 | // Add defs. | |||
1486 | for (unsigned X = 0; X != CS; ++X) { | |||
1487 | RegisterRef RR = MaxRefs[ClosureIdx[X]]; | |||
1488 | uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; | |||
1489 | NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); | |||
1490 | PA.Addr->addMember(DA, *this); | |||
1491 | } | |||
1492 | // Add phi uses. | |||
1493 | for (NodeAddr<BlockNode*> PBA : Preds) { | |||
1494 | for (unsigned X = 0; X != CS; ++X) { | |||
1495 | RegisterRef RR = MaxRefs[ClosureIdx[X]]; | |||
1496 | NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA); | |||
1497 | PA.Addr->addMember(PUA, *this); | |||
1498 | } | |||
1499 | } | |||
1500 | ||||
1501 | // Erase from MaxRefs all elements in the closure. | |||
1502 | auto Begin = MaxRefs.begin(); | |||
1503 | for (unsigned i = ClosureIdx.size(); i != 0; --i) | |||
1504 | MaxRefs.erase(Begin + ClosureIdx[i-1]); | |||
1505 | } | |||
1506 | } | |||
1507 | ||||
1508 | // Remove any unneeded phi nodes that were created during the build process. | |||
1509 | void DataFlowGraph::removeUnusedPhis() { | |||
1510 | // This will remove unused phis, i.e. phis where each def does not reach | |||
1511 | // any uses or other defs. This will not detect or remove circular phi | |||
1512 | // chains that are otherwise dead. Unused/dead phis are created during | |||
1513 | // the build process and this function is intended to remove these cases | |||
1514 | // that are easily determinable to be unnecessary. | |||
1515 | ||||
1516 | SetVector<NodeId> PhiQ; | |||
1517 | for (NodeAddr<BlockNode*> BA : Func.Addr->members(*this)) { | |||
1518 | for (auto P : BA.Addr->members_if(IsPhi, *this)) | |||
1519 | PhiQ.insert(P.Id); | |||
1520 | } | |||
1521 | ||||
1522 | static auto HasUsedDef = [](NodeList &Ms) -> bool { | |||
1523 | for (NodeAddr<NodeBase*> M : Ms) { | |||
1524 | if (M.Addr->getKind() != NodeAttrs::Def) | |||
1525 | continue; | |||
1526 | NodeAddr<DefNode*> DA = M; | |||
1527 | if (DA.Addr->getReachedDef() != 0 || DA.Addr->getReachedUse() != 0) | |||
1528 | return true; | |||
1529 | } | |||
1530 | return false; | |||
1531 | }; | |||
1532 | ||||
1533 | // Any phi, if it is removed, may affect other phis (make them dead). | |||
1534 | // For each removed phi, collect the potentially affected phis and add | |||
1535 | // them back to the queue. | |||
1536 | while (!PhiQ.empty()) { | |||
1537 | auto PA = addr<PhiNode*>(PhiQ[0]); | |||
1538 | PhiQ.remove(PA.Id); | |||
1539 | NodeList Refs = PA.Addr->members(*this); | |||
1540 | if (HasUsedDef(Refs)) | |||
1541 | continue; | |||
1542 | for (NodeAddr<RefNode*> RA : Refs) { | |||
1543 | if (NodeId RD = RA.Addr->getReachingDef()) { | |||
1544 | auto RDA = addr<DefNode*>(RD); | |||
1545 | NodeAddr<InstrNode*> OA = RDA.Addr->getOwner(*this); | |||
1546 | if (IsPhi(OA)) | |||
1547 | PhiQ.insert(OA.Id); | |||
1548 | } | |||
1549 | if (RA.Addr->isDef()) | |||
1550 | unlinkDef(RA, true); | |||
1551 | else | |||
1552 | unlinkUse(RA, true); | |||
1553 | } | |||
1554 | NodeAddr<BlockNode*> BA = PA.Addr->getOwner(*this); | |||
1555 | BA.Addr->removeMember(PA, *this); | |||
1556 | } | |||
1557 | } | |||
1558 | ||||
1559 | // For a given reference node TA in an instruction node IA, connect the | |||
1560 | // reaching def of TA to the appropriate def node. Create any shadow nodes | |||
1561 | // as appropriate. | |||
1562 | template <typename T> | |||
1563 | void DataFlowGraph::linkRefUp(NodeAddr<InstrNode*> IA, NodeAddr<T> TA, | |||
1564 | DefStack &DS) { | |||
1565 | if (DS.empty()) | |||
1566 | return; | |||
1567 | RegisterRef RR = TA.Addr->getRegRef(*this); | |||
1568 | NodeAddr<T> TAP; | |||
1569 | ||||
1570 | // References from the def stack that have been examined so far. | |||
1571 | RegisterAggr Defs(PRI); | |||
1572 | ||||
1573 | for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()) { | |||
1574 | RegisterRef QR = I->Addr->getRegRef(*this); | |||
1575 | ||||
1576 | // Skip all defs that are aliased to any of the defs that we have already | |||
1577 | // seen. If this completes a cover of RR, stop the stack traversal. | |||
1578 | bool Alias = Defs.hasAliasOf(QR); | |||
1579 | bool Cover = Defs.insert(QR).hasCoverOf(RR); | |||
1580 | if (Alias) { | |||
1581 | if (Cover) | |||
1582 | break; | |||
1583 | continue; | |||
1584 | } | |||
1585 | ||||
1586 | // The reaching def. | |||
1587 | NodeAddr<DefNode*> RDA = *I; | |||
1588 | ||||
1589 | // Pick the reached node. | |||
1590 | if (TAP.Id == 0) { | |||
1591 | TAP = TA; | |||
1592 | } else { | |||
1593 | // Mark the existing ref as "shadow" and create a new shadow. | |||
1594 | TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow); | |||
1595 | TAP = getNextShadow(IA, TAP, true); | |||
1596 | } | |||
1597 | ||||
1598 | // Create the link. | |||
1599 | TAP.Addr->linkToDef(TAP.Id, RDA); | |||
1600 | ||||
1601 | if (Cover) | |||
1602 | break; | |||
1603 | } | |||
1604 | } | |||
1605 | ||||
1606 | // Create data-flow links for all reference nodes in the statement node SA. | |||
1607 | template <typename Predicate> | |||
1608 | void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, NodeAddr<StmtNode*> SA, | |||
1609 | Predicate P) { | |||
1610 | #ifndef NDEBUG1 | |||
1611 | RegisterSet Defs; | |||
1612 | #endif | |||
1613 | ||||
1614 | // Link all nodes (upwards in the data-flow) with their reaching defs. | |||
1615 | for (NodeAddr<RefNode*> RA : SA.Addr->members_if(P, *this)) { | |||
1616 | uint16_t Kind = RA.Addr->getKind(); | |||
1617 | assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use)((void)0); | |||
1618 | RegisterRef RR = RA.Addr->getRegRef(*this); | |||
1619 | #ifndef NDEBUG1 | |||
1620 | // Do not expect multiple defs of the same reference. | |||
1621 | assert(Kind != NodeAttrs::Def || !Defs.count(RR))((void)0); | |||
1622 | Defs.insert(RR); | |||
1623 | #endif | |||
1624 | ||||
1625 | auto F = DefM.find(RR.Reg); | |||
1626 | if (F == DefM.end()) | |||
1627 | continue; | |||
1628 | DefStack &DS = F->second; | |||
1629 | if (Kind == NodeAttrs::Use) | |||
1630 | linkRefUp<UseNode*>(SA, RA, DS); | |||
1631 | else if (Kind == NodeAttrs::Def) | |||
1632 | linkRefUp<DefNode*>(SA, RA, DS); | |||
1633 | else | |||
1634 | llvm_unreachable("Unexpected node in instruction")__builtin_unreachable(); | |||
1635 | } | |||
1636 | } | |||
1637 | ||||
1638 | // Create data-flow links for all instructions in the block node BA. This | |||
1639 | // will include updating any phi nodes in BA. | |||
1640 | void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) { | |||
1641 | // Push block delimiters. | |||
1642 | markBlock(BA.Id, DefM); | |||
1643 | ||||
1644 | auto IsClobber = [] (NodeAddr<RefNode*> RA) -> bool { | |||
1645 | return IsDef(RA) && (RA.Addr->getFlags() & NodeAttrs::Clobbering); | |||
1646 | }; | |||
1647 | auto IsNoClobber = [] (NodeAddr<RefNode*> RA) -> bool { | |||
1648 | return IsDef(RA) && !(RA.Addr->getFlags() & NodeAttrs::Clobbering); | |||
1649 | }; | |||
1650 | ||||
1651 | assert(BA.Addr && "block node address is needed to create a data-flow link")((void)0); | |||
1652 | // For each non-phi instruction in the block, link all the defs and uses | |||
1653 | // to their reaching defs. For any member of the block (including phis), | |||
1654 | // push the defs on the corresponding stacks. | |||
1655 | for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) { | |||
1656 | // Ignore phi nodes here. They will be linked part by part from the | |||
1657 | // predecessors. | |||
1658 | if (IA.Addr->getKind() == NodeAttrs::Stmt) { | |||
1659 | linkStmtRefs(DefM, IA, IsUse); | |||
1660 | linkStmtRefs(DefM, IA, IsClobber); | |||
1661 | } | |||
1662 | ||||
1663 | // Push the definitions on the stack. | |||
1664 | pushClobbers(IA, DefM); | |||
1665 | ||||
1666 | if (IA.Addr->getKind() == NodeAttrs::Stmt) | |||
1667 | linkStmtRefs(DefM, IA, IsNoClobber); | |||
1668 | ||||
1669 | pushDefs(IA, DefM); | |||
1670 | } | |||
1671 | ||||
1672 | // Recursively process all children in the dominator tree. | |||
1673 | MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode()); | |||
1674 | for (auto I : *N) { | |||
1675 | MachineBasicBlock *SB = I->getBlock(); | |||
1676 | NodeAddr<BlockNode*> SBA = findBlock(SB); | |||
1677 | linkBlockRefs(DefM, SBA); | |||
1678 | } | |||
1679 | ||||
1680 | // Link the phi uses from the successor blocks. | |||
1681 | auto IsUseForBA = [BA](NodeAddr<NodeBase*> NA) -> bool { | |||
1682 | if (NA.Addr->getKind() != NodeAttrs::Use) | |||
1683 | return false; | |||
1684 | assert(NA.Addr->getFlags() & NodeAttrs::PhiRef)((void)0); | |||
1685 | NodeAddr<PhiUseNode*> PUA = NA; | |||
1686 | return PUA.Addr->getPredecessor() == BA.Id; | |||
1687 | }; | |||
1688 | ||||
1689 | RegisterSet EHLiveIns = getLandingPadLiveIns(); | |||
1690 | MachineBasicBlock *MBB = BA.Addr->getCode(); | |||
1691 | ||||
1692 | for (MachineBasicBlock *SB : MBB->successors()) { | |||
1693 | bool IsEHPad = SB->isEHPad(); | |||
1694 | NodeAddr<BlockNode*> SBA = findBlock(SB); | |||
1695 | for (NodeAddr<InstrNode*> IA : SBA.Addr->members_if(IsPhi, *this)) { | |||
1696 | // Do not link phi uses for landing pad live-ins. | |||
1697 | if (IsEHPad) { | |||
1698 | // Find what register this phi is for. | |||
1699 | NodeAddr<RefNode*> RA = IA.Addr->getFirstMember(*this); | |||
1700 | assert(RA.Id != 0)((void)0); | |||
1701 | if (EHLiveIns.count(RA.Addr->getRegRef(*this))) | |||
| ||||
1702 | continue; | |||
1703 | } | |||
1704 | // Go over each phi use associated with MBB, and link it. | |||
1705 | for (auto U : IA.Addr->members_if(IsUseForBA, *this)) { | |||
1706 | NodeAddr<PhiUseNode*> PUA = U; | |||
1707 | RegisterRef RR = PUA.Addr->getRegRef(*this); | |||
1708 | linkRefUp<UseNode*>(IA, PUA, DefM[RR.Reg]); | |||
1709 | } | |||
1710 | } | |||
1711 | } | |||
1712 | ||||
1713 | // Pop all defs from this block from the definition stacks. | |||
1714 | releaseBlock(BA.Id, DefM); | |||
1715 | } | |||
1716 | ||||
1717 | // Remove the use node UA from any data-flow and structural links. | |||
1718 | void DataFlowGraph::unlinkUseDF(NodeAddr<UseNode*> UA) { | |||
1719 | NodeId RD = UA.Addr->getReachingDef(); | |||
1720 | NodeId Sib = UA.Addr->getSibling(); | |||
1721 | ||||
1722 | if (RD == 0) { | |||
1723 | assert(Sib == 0)((void)0); | |||
1724 | return; | |||
1725 | } | |||
1726 | ||||
1727 | auto RDA = addr<DefNode*>(RD); | |||
1728 | auto TA = addr<UseNode*>(RDA.Addr->getReachedUse()); | |||
1729 | if (TA.Id == UA.Id) { | |||
1730 | RDA.Addr->setReachedUse(Sib); | |||
1731 | return; | |||
1732 | } | |||
1733 | ||||
1734 | while (TA.Id != 0) { | |||
1735 | NodeId S = TA.Addr->getSibling(); | |||
1736 | if (S == UA.Id) { | |||
1737 | TA.Addr->setSibling(UA.Addr->getSibling()); | |||
1738 | return; | |||
1739 | } | |||
1740 | TA = addr<UseNode*>(S); | |||
1741 | } | |||
1742 | } | |||
1743 | ||||
1744 | // Remove the def node DA from any data-flow and structural links. | |||
1745 | void DataFlowGraph::unlinkDefDF(NodeAddr<DefNode*> DA) { | |||
1746 | // | |||
1747 | // RD | |||
1748 | // | reached | |||
1749 | // | def | |||
1750 | // : | |||
1751 | // . | |||
1752 | // +----+ | |||
1753 | // ... -- | DA | -- ... -- 0 : sibling chain of DA | |||
1754 | // +----+ | |||
1755 | // | | reached | |||
1756 | // | : def | |||
1757 | // | . | |||
1758 | // | ... : Siblings (defs) | |||
1759 | // | | |||
1760 | // : reached | |||
1761 | // . use | |||
1762 | // ... : sibling chain of reached uses | |||
1763 | ||||
1764 | NodeId RD = DA.Addr->getReachingDef(); | |||
1765 | ||||
1766 | // Visit all siblings of the reached def and reset their reaching defs. | |||
1767 | // Also, defs reached by DA are now "promoted" to being reached by RD, | |||
1768 | // so all of them will need to be spliced into the sibling chain where | |||
1769 | // DA belongs. | |||
1770 | auto getAllNodes = [this] (NodeId N) -> NodeList { | |||
1771 | NodeList Res; | |||
1772 | while (N) { | |||
1773 | auto RA = addr<RefNode*>(N); | |||
1774 | // Keep the nodes in the exact sibling order. | |||
1775 | Res.push_back(RA); | |||
1776 | N = RA.Addr->getSibling(); | |||
1777 | } | |||
1778 | return Res; | |||
1779 | }; | |||
1780 | NodeList ReachedDefs = getAllNodes(DA.Addr->getReachedDef()); | |||
1781 | NodeList ReachedUses = getAllNodes(DA.Addr->getReachedUse()); | |||
1782 | ||||
1783 | if (RD == 0) { | |||
1784 | for (NodeAddr<RefNode*> I : ReachedDefs) | |||
1785 | I.Addr->setSibling(0); | |||
1786 | for (NodeAddr<RefNode*> I : ReachedUses) | |||
1787 | I.Addr->setSibling(0); | |||
1788 | } | |||
1789 | for (NodeAddr<DefNode*> I : ReachedDefs) | |||
1790 | I.Addr->setReachingDef(RD); | |||
1791 | for (NodeAddr<UseNode*> I : ReachedUses) | |||
1792 | I.Addr->setReachingDef(RD); | |||
1793 | ||||
1794 | NodeId Sib = DA.Addr->getSibling(); | |||
1795 | if (RD == 0) { | |||
1796 | assert(Sib == 0)((void)0); | |||
1797 | return; | |||
1798 | } | |||
1799 | ||||
1800 | // Update the reaching def node and remove DA from the sibling list. | |||
1801 | auto RDA = addr<DefNode*>(RD); | |||
1802 | auto TA = addr<DefNode*>(RDA.Addr->getReachedDef()); | |||
1803 | if (TA.Id == DA.Id) { | |||
1804 | // If DA is the first reached def, just update the RD's reached def | |||
1805 | // to the DA's sibling. | |||
1806 | RDA.Addr->setReachedDef(Sib); | |||
1807 | } else { | |||
1808 | // Otherwise, traverse the sibling list of the reached defs and remove | |||
1809 | // DA from it. | |||
1810 | while (TA.Id != 0) { | |||
1811 | NodeId S = TA.Addr->getSibling(); | |||
1812 | if (S == DA.Id) { | |||
1813 | TA.Addr->setSibling(Sib); | |||
1814 | break; | |||
1815 | } | |||
1816 | TA = addr<DefNode*>(S); | |||
1817 | } | |||
1818 | } | |||
1819 | ||||
1820 | // Splice the DA's reached defs into the RDA's reached def chain. | |||
1821 | if (!ReachedDefs.empty()) { | |||
1822 | auto Last = NodeAddr<DefNode*>(ReachedDefs.back()); | |||
1823 | Last.Addr->setSibling(RDA.Addr->getReachedDef()); | |||
1824 | RDA.Addr->setReachedDef(ReachedDefs.front().Id); | |||
1825 | } | |||
1826 | // Splice the DA's reached uses into the RDA's reached use chain. | |||
1827 | if (!ReachedUses.empty()) { | |||
1828 | auto Last = NodeAddr<UseNode*>(ReachedUses.back()); | |||
1829 | Last.Addr->setSibling(RDA.Addr->getReachedUse()); | |||
1830 | RDA.Addr->setReachedUse(ReachedUses.front().Id); | |||
1831 | } | |||
1832 | } |
1 | //===- RDFGraph.h -----------------------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Target-independent, SSA-based data flow graph for register data flow (RDF) |
10 | // for a non-SSA program representation (e.g. post-RA machine code). |
11 | // |
12 | // |
13 | // *** Introduction |
14 | // |
15 | // The RDF graph is a collection of nodes, each of which denotes some element |
16 | // of the program. There are two main types of such elements: code and refe- |
17 | // rences. Conceptually, "code" is something that represents the structure |
18 | // of the program, e.g. basic block or a statement, while "reference" is an |
19 | // instance of accessing a register, e.g. a definition or a use. Nodes are |
20 | // connected with each other based on the structure of the program (such as |
21 | // blocks, instructions, etc.), and based on the data flow (e.g. reaching |
22 | // definitions, reached uses, etc.). The single-reaching-definition principle |
23 | // of SSA is generally observed, although, due to the non-SSA representation |
24 | // of the program, there are some differences between the graph and a "pure" |
25 | // SSA representation. |
26 | // |
27 | // |
28 | // *** Implementation remarks |
29 | // |
30 | // Since the graph can contain a large number of nodes, memory consumption |
31 | // was one of the major design considerations. As a result, there is a single |
32 | // base class NodeBase which defines all members used by all possible derived |
33 | // classes. The members are arranged in a union, and a derived class cannot |
34 | // add any data members of its own. Each derived class only defines the |
35 | // functional interface, i.e. member functions. NodeBase must be a POD, |
36 | // which implies that all of its members must also be PODs. |
37 | // Since nodes need to be connected with other nodes, pointers have been |
38 | // replaced with 32-bit identifiers: each node has an id of type NodeId. |
39 | // There are mapping functions in the graph that translate between actual |
40 | // memory addresses and the corresponding identifiers. |
41 | // A node id of 0 is equivalent to nullptr. |
42 | // |
43 | // |
44 | // *** Structure of the graph |
45 | // |
46 | // A code node is always a collection of other nodes. For example, a code |
47 | // node corresponding to a basic block will contain code nodes corresponding |
48 | // to instructions. In turn, a code node corresponding to an instruction will |
49 | // contain a list of reference nodes that correspond to the definitions and |
50 | // uses of registers in that instruction. The members are arranged into a |
51 | // circular list, which is yet another consequence of the effort to save |
52 | // memory: for each member node it should be possible to obtain its owner, |
53 | // and it should be possible to access all other members. There are other |
54 | // ways to accomplish that, but the circular list seemed the most natural. |
55 | // |
56 | // +- CodeNode -+ |
57 | // | | <---------------------------------------------------+ |
58 | // +-+--------+-+ | |
59 | // |FirstM |LastM | |
60 | // | +-------------------------------------+ | |
61 | // | | | |
62 | // V V | |
63 | // +----------+ Next +----------+ Next Next +----------+ Next | |
64 | // | |----->| |-----> ... ----->| |----->-+ |
65 | // +- Member -+ +- Member -+ +- Member -+ |
66 | // |
67 | // The order of members is such that related reference nodes (see below) |
68 | // should be contiguous on the member list. |
69 | // |
70 | // A reference node is a node that encapsulates an access to a register, |
71 | // in other words, data flowing into or out of a register. There are two |
72 | // major kinds of reference nodes: defs and uses. A def node will contain |
73 | // the id of the first reached use, and the id of the first reached def. |
74 | // Each def and use will contain the id of the reaching def, and also the |
75 | // id of the next reached def (for def nodes) or use (for use nodes). |
76 | // The "next node sharing the same reaching def" is denoted as "sibling". |
77 | // In summary: |
78 | // - Def node contains: reaching def, sibling, first reached def, and first |
79 | // reached use. |
80 | // - Use node contains: reaching def and sibling. |
81 | // |
82 | // +-- DefNode --+ |
83 | // | R2 = ... | <---+--------------------+ |
84 | // ++---------+--+ | | |
85 | // |Reached |Reached | | |
86 | // |Def |Use | | |
87 | // | | |Reaching |Reaching |
88 | // | V |Def |Def |
89 | // | +-- UseNode --+ Sib +-- UseNode --+ Sib Sib |
90 | // | | ... = R2 |----->| ... = R2 |----> ... ----> 0 |
91 | // | +-------------+ +-------------+ |
92 | // V |
93 | // +-- DefNode --+ Sib |
94 | // | R2 = ... |----> ... |
95 | // ++---------+--+ |
96 | // | | |
97 | // | | |
98 | // ... ... |
99 | // |
100 | // To get a full picture, the circular lists connecting blocks within a |
101 | // function, instructions within a block, etc. should be superimposed with |
102 | // the def-def, def-use links shown above. |
103 | // To illustrate this, consider a small example in a pseudo-assembly: |
104 | // foo: |
105 | // add r2, r0, r1 ; r2 = r0+r1 |
106 | // addi r0, r2, 1 ; r0 = r2+1 |
107 | // ret r0 ; return value in r0 |
108 | // |
109 | // The graph (in a format used by the debugging functions) would look like: |
110 | // |
111 | // DFG dump:[ |
112 | // f1: Function foo |
113 | // b2: === %bb.0 === preds(0), succs(0): |
114 | // p3: phi [d4<r0>(,d12,u9):] |
115 | // p5: phi [d6<r1>(,,u10):] |
116 | // s7: add [d8<r2>(,,u13):, u9<r0>(d4):, u10<r1>(d6):] |
117 | // s11: addi [d12<r0>(d4,,u15):, u13<r2>(d8):] |
118 | // s14: ret [u15<r0>(d12):] |
119 | // ] |
120 | // |
121 | // The f1, b2, p3, etc. are node ids. The letter is prepended to indicate the |
122 | // kind of the node (i.e. f - function, b - basic block, p - phi, s - state- |
123 | // ment, d - def, u - use). |
124 | // The format of a def node is: |
125 | // dN<R>(rd,d,u):sib, |
126 | // where |
127 | // N - numeric node id, |
128 | // R - register being defined |
129 | // rd - reaching def, |
130 | // d - reached def, |
131 | // u - reached use, |
132 | // sib - sibling. |
133 | // The format of a use node is: |
134 | // uN<R>[!](rd):sib, |
135 | // where |
136 | // N - numeric node id, |
137 | // R - register being used, |
138 | // rd - reaching def, |
139 | // sib - sibling. |
140 | // Possible annotations (usually preceding the node id): |
141 | // + - preserving def, |
142 | // ~ - clobbering def, |
143 | // " - shadow ref (follows the node id), |
144 | // ! - fixed register (appears after register name). |
145 | // |
146 | // The circular lists are not explicit in the dump. |
147 | // |
148 | // |
149 | // *** Node attributes |
150 | // |
151 | // NodeBase has a member "Attrs", which is the primary way of determining |
152 | // the node's characteristics. The fields in this member decide whether |
153 | // the node is a code node or a reference node (i.e. node's "type"), then |
154 | // within each type, the "kind" determines what specifically this node |
155 | // represents. The remaining bits, "flags", contain additional information |
156 | // that is even more detailed than the "kind". |
157 | // CodeNode's kinds are: |
158 | // - Phi: Phi node, members are reference nodes. |
159 | // - Stmt: Statement, members are reference nodes. |
160 | // - Block: Basic block, members are instruction nodes (i.e. Phi or Stmt). |
161 | // - Func: The whole function. The members are basic block nodes. |
162 | // RefNode's kinds are: |
163 | // - Use. |
164 | // - Def. |
165 | // |
166 | // Meaning of flags: |
167 | // - Preserving: applies only to defs. A preserving def is one that can |
168 | // preserve some of the original bits among those that are included in |
169 | // the register associated with that def. For example, if R0 is a 32-bit |
170 | // register, but a def can only change the lower 16 bits, then it will |
171 | // be marked as preserving. |
172 | // - Shadow: a reference that has duplicates holding additional reaching |
173 | // defs (see more below). |
174 | // - Clobbering: applied only to defs, indicates that the value generated |
175 | // by this def is unspecified. A typical example would be volatile registers |
176 | // after function calls. |
177 | // - Fixed: the register in this def/use cannot be replaced with any other |
178 | // register. A typical case would be a parameter register to a call, or |
179 | // the register with the return value from a function. |
180 | // - Undef: the register in this reference the register is assumed to have |
181 | // no pre-existing value, even if it appears to be reached by some def. |
182 | // This is typically used to prevent keeping registers artificially live |
183 | // in cases when they are defined via predicated instructions. For example: |
184 | // r0 = add-if-true cond, r10, r11 (1) |
185 | // r0 = add-if-false cond, r12, r13, implicit r0 (2) |
186 | // ... = r0 (3) |
187 | // Before (1), r0 is not intended to be live, and the use of r0 in (3) is |
188 | // not meant to be reached by any def preceding (1). However, since the |
189 | // defs in (1) and (2) are both preserving, these properties alone would |
190 | // imply that the use in (3) may indeed be reached by some prior def. |
191 | // Adding Undef flag to the def in (1) prevents that. The Undef flag |
192 | // may be applied to both defs and uses. |
193 | // - Dead: applies only to defs. The value coming out of a "dead" def is |
194 | // assumed to be unused, even if the def appears to be reaching other defs |
195 | // or uses. The motivation for this flag comes from dead defs on function |
196 | // calls: there is no way to determine if such a def is dead without |
197 | // analyzing the target's ABI. Hence the graph should contain this info, |
198 | // as it is unavailable otherwise. On the other hand, a def without any |
199 | // uses on a typical instruction is not the intended target for this flag. |
200 | // |
201 | // *** Shadow references |
202 | // |
203 | // It may happen that a super-register can have two (or more) non-overlapping |
204 | // sub-registers. When both of these sub-registers are defined and followed |
205 | // by a use of the super-register, the use of the super-register will not |
206 | // have a unique reaching def: both defs of the sub-registers need to be |
207 | // accounted for. In such cases, a duplicate use of the super-register is |
208 | // added and it points to the extra reaching def. Both uses are marked with |
209 | // a flag "shadow". Example: |
210 | // Assume t0 is a super-register of r0 and r1, r0 and r1 do not overlap: |
211 | // set r0, 1 ; r0 = 1 |
212 | // set r1, 1 ; r1 = 1 |
213 | // addi t1, t0, 1 ; t1 = t0+1 |
214 | // |
215 | // The DFG: |
216 | // s1: set [d2<r0>(,,u9):] |
217 | // s3: set [d4<r1>(,,u10):] |
218 | // s5: addi [d6<t1>(,,):, u7"<t0>(d2):, u8"<t0>(d4):] |
219 | // |
220 | // The statement s5 has two use nodes for t0: u7" and u9". The quotation |
221 | // mark " indicates that the node is a shadow. |
222 | // |
223 | |
224 | #ifndef LLVM_CODEGEN_RDFGRAPH_H |
225 | #define LLVM_CODEGEN_RDFGRAPH_H |
226 | |
227 | #include "RDFRegisters.h" |
228 | #include "llvm/ADT/SmallVector.h" |
229 | #include "llvm/MC/LaneBitmask.h" |
230 | #include "llvm/Support/Allocator.h" |
231 | #include "llvm/Support/MathExtras.h" |
232 | #include <cassert> |
233 | #include <cstdint> |
234 | #include <cstring> |
235 | #include <map> |
236 | #include <set> |
237 | #include <unordered_map> |
238 | #include <utility> |
239 | #include <vector> |
240 | |
241 | // RDF uses uint32_t to refer to registers. This is to ensure that the type |
242 | // size remains specific. In other places, registers are often stored using |
243 | // unsigned. |
244 | static_assert(sizeof(uint32_t) == sizeof(unsigned), "Those should be equal"); |
245 | |
246 | namespace llvm { |
247 | |
248 | class MachineBasicBlock; |
249 | class MachineDominanceFrontier; |
250 | class MachineDominatorTree; |
251 | class MachineFunction; |
252 | class MachineInstr; |
253 | class MachineOperand; |
254 | class raw_ostream; |
255 | class TargetInstrInfo; |
256 | class TargetRegisterInfo; |
257 | |
258 | namespace rdf { |
259 | |
260 | using NodeId = uint32_t; |
261 | |
262 | struct DataFlowGraph; |
263 | |
264 | struct NodeAttrs { |
265 | enum : uint16_t { |
266 | None = 0x0000, // Nothing |
267 | |
268 | // Types: 2 bits |
269 | TypeMask = 0x0003, |
270 | Code = 0x0001, // 01, Container |
271 | Ref = 0x0002, // 10, Reference |
272 | |
273 | // Kind: 3 bits |
274 | KindMask = 0x0007 << 2, |
275 | Def = 0x0001 << 2, // 001 |
276 | Use = 0x0002 << 2, // 010 |
277 | Phi = 0x0003 << 2, // 011 |
278 | Stmt = 0x0004 << 2, // 100 |
279 | Block = 0x0005 << 2, // 101 |
280 | Func = 0x0006 << 2, // 110 |
281 | |
282 | // Flags: 7 bits for now |
283 | FlagMask = 0x007F << 5, |
284 | Shadow = 0x0001 << 5, // 0000001, Has extra reaching defs. |
285 | Clobbering = 0x0002 << 5, // 0000010, Produces unspecified values. |
286 | PhiRef = 0x0004 << 5, // 0000100, Member of PhiNode. |
287 | Preserving = 0x0008 << 5, // 0001000, Def can keep original bits. |
288 | Fixed = 0x0010 << 5, // 0010000, Fixed register. |
289 | Undef = 0x0020 << 5, // 0100000, Has no pre-existing value. |
290 | Dead = 0x0040 << 5, // 1000000, Does not define a value. |
291 | }; |
292 | |
293 | static uint16_t type(uint16_t T) { return T & TypeMask; } |
294 | static uint16_t kind(uint16_t T) { return T & KindMask; } |
295 | static uint16_t flags(uint16_t T) { return T & FlagMask; } |
296 | |
297 | static uint16_t set_type(uint16_t A, uint16_t T) { |
298 | return (A & ~TypeMask) | T; |
299 | } |
300 | |
301 | static uint16_t set_kind(uint16_t A, uint16_t K) { |
302 | return (A & ~KindMask) | K; |
303 | } |
304 | |
305 | static uint16_t set_flags(uint16_t A, uint16_t F) { |
306 | return (A & ~FlagMask) | F; |
307 | } |
308 | |
309 | // Test if A contains B. |
310 | static bool contains(uint16_t A, uint16_t B) { |
311 | if (type(A) != Code) |
312 | return false; |
313 | uint16_t KB = kind(B); |
314 | switch (kind(A)) { |
315 | case Func: |
316 | return KB == Block; |
317 | case Block: |
318 | return KB == Phi || KB == Stmt; |
319 | case Phi: |
320 | case Stmt: |
321 | return type(B) == Ref; |
322 | } |
323 | return false; |
324 | } |
325 | }; |
326 | |
327 | struct BuildOptions { |
328 | enum : unsigned { |
329 | None = 0x00, |
330 | KeepDeadPhis = 0x01, // Do not remove dead phis during build. |
331 | }; |
332 | }; |
333 | |
334 | template <typename T> struct NodeAddr { |
335 | NodeAddr() = default; |
336 | NodeAddr(T A, NodeId I) : Addr(A), Id(I) {} |
337 | |
338 | // Type cast (casting constructor). The reason for having this class |
339 | // instead of std::pair. |
340 | template <typename S> NodeAddr(const NodeAddr<S> &NA) |
341 | : Addr(static_cast<T>(NA.Addr)), Id(NA.Id) {} |
342 | |
343 | bool operator== (const NodeAddr<T> &NA) const { |
344 | assert((Addr == NA.Addr) == (Id == NA.Id))((void)0); |
345 | return Addr == NA.Addr; |
346 | } |
347 | bool operator!= (const NodeAddr<T> &NA) const { |
348 | return !operator==(NA); |
349 | } |
350 | |
351 | T Addr = nullptr; |
352 | NodeId Id = 0; |
353 | }; |
354 | |
355 | struct NodeBase; |
356 | |
357 | // Fast memory allocation and translation between node id and node address. |
358 | // This is really the same idea as the one underlying the "bump pointer |
359 | // allocator", the difference being in the translation. A node id is |
360 | // composed of two components: the index of the block in which it was |
361 | // allocated, and the index within the block. With the default settings, |
362 | // where the number of nodes per block is 4096, the node id (minus 1) is: |
363 | // |
364 | // bit position: 11 0 |
365 | // +----------------------------+--------------+ |
366 | // | Index of the block |Index in block| |
367 | // +----------------------------+--------------+ |
368 | // |
369 | // The actual node id is the above plus 1, to avoid creating a node id of 0. |
370 | // |
371 | // This method significantly improved the build time, compared to using maps |
372 | // (std::unordered_map or DenseMap) to translate between pointers and ids. |
373 | struct NodeAllocator { |
374 | // Amount of storage for a single node. |
375 | enum { NodeMemSize = 32 }; |
376 | |
377 | NodeAllocator(uint32_t NPB = 4096) |
378 | : NodesPerBlock(NPB), BitsPerIndex(Log2_32(NPB)), |
379 | IndexMask((1 << BitsPerIndex)-1) { |
380 | assert(isPowerOf2_32(NPB))((void)0); |
381 | } |
382 | |
383 | NodeBase *ptr(NodeId N) const { |
384 | uint32_t N1 = N-1; |
385 | uint32_t BlockN = N1 >> BitsPerIndex; |
386 | uint32_t Offset = (N1 & IndexMask) * NodeMemSize; |
387 | return reinterpret_cast<NodeBase*>(Blocks[BlockN]+Offset); |
388 | } |
389 | |
390 | NodeId id(const NodeBase *P) const; |
391 | NodeAddr<NodeBase*> New(); |
392 | void clear(); |
393 | |
394 | private: |
395 | void startNewBlock(); |
396 | bool needNewBlock(); |
397 | |
398 | uint32_t makeId(uint32_t Block, uint32_t Index) const { |
399 | // Add 1 to the id, to avoid the id of 0, which is treated as "null". |
400 | return ((Block << BitsPerIndex) | Index) + 1; |
401 | } |
402 | |
403 | const uint32_t NodesPerBlock; |
404 | const uint32_t BitsPerIndex; |
405 | const uint32_t IndexMask; |
406 | char *ActiveEnd = nullptr; |
407 | std::vector<char*> Blocks; |
408 | using AllocatorTy = BumpPtrAllocatorImpl<MallocAllocator, 65536>; |
409 | AllocatorTy MemPool; |
410 | }; |
411 | |
412 | using RegisterSet = std::set<RegisterRef>; |
413 | |
414 | struct TargetOperandInfo { |
415 | TargetOperandInfo(const TargetInstrInfo &tii) : TII(tii) {} |
416 | virtual ~TargetOperandInfo() = default; |
417 | |
418 | virtual bool isPreserving(const MachineInstr &In, unsigned OpNum) const; |
419 | virtual bool isClobbering(const MachineInstr &In, unsigned OpNum) const; |
420 | virtual bool isFixedReg(const MachineInstr &In, unsigned OpNum) const; |
421 | |
422 | const TargetInstrInfo &TII; |
423 | }; |
424 | |
425 | // Packed register reference. Only used for storage. |
426 | struct PackedRegisterRef { |
427 | RegisterId Reg; |
428 | uint32_t MaskId; |
429 | }; |
430 | |
431 | struct LaneMaskIndex : private IndexedSet<LaneBitmask> { |
432 | LaneMaskIndex() = default; |
433 | |
434 | LaneBitmask getLaneMaskForIndex(uint32_t K) const { |
435 | return K == 0 ? LaneBitmask::getAll() : get(K); |
436 | } |
437 | |
438 | uint32_t getIndexForLaneMask(LaneBitmask LM) { |
439 | assert(LM.any())((void)0); |
440 | return LM.all() ? 0 : insert(LM); |
441 | } |
442 | |
443 | uint32_t getIndexForLaneMask(LaneBitmask LM) const { |
444 | assert(LM.any())((void)0); |
445 | return LM.all() ? 0 : find(LM); |
446 | } |
447 | }; |
448 | |
449 | struct NodeBase { |
450 | public: |
451 | // Make sure this is a POD. |
452 | NodeBase() = default; |
453 | |
454 | uint16_t getType() const { return NodeAttrs::type(Attrs); } |
455 | uint16_t getKind() const { return NodeAttrs::kind(Attrs); } |
456 | uint16_t getFlags() const { return NodeAttrs::flags(Attrs); } |
457 | NodeId getNext() const { return Next; } |
458 | |
459 | uint16_t getAttrs() const { return Attrs; } |
460 | void setAttrs(uint16_t A) { Attrs = A; } |
461 | void setFlags(uint16_t F) { setAttrs(NodeAttrs::set_flags(getAttrs(), F)); } |
462 | |
463 | // Insert node NA after "this" in the circular chain. |
464 | void append(NodeAddr<NodeBase*> NA); |
465 | |
466 | // Initialize all members to 0. |
467 | void init() { memset(this, 0, sizeof *this); } |
468 | |
469 | void setNext(NodeId N) { Next = N; } |
470 | |
471 | protected: |
472 | uint16_t Attrs; |
473 | uint16_t Reserved; |
474 | NodeId Next; // Id of the next node in the circular chain. |
475 | // Definitions of nested types. Using anonymous nested structs would make |
476 | // this class definition clearer, but unnamed structs are not a part of |
477 | // the standard. |
478 | struct Def_struct { |
479 | NodeId DD, DU; // Ids of the first reached def and use. |
480 | }; |
481 | struct PhiU_struct { |
482 | NodeId PredB; // Id of the predecessor block for a phi use. |
483 | }; |
484 | struct Code_struct { |
485 | void *CP; // Pointer to the actual code. |
486 | NodeId FirstM, LastM; // Id of the first member and last. |
487 | }; |
488 | struct Ref_struct { |
489 | NodeId RD, Sib; // Ids of the reaching def and the sibling. |
490 | union { |
491 | Def_struct Def; |
492 | PhiU_struct PhiU; |
493 | }; |
494 | union { |
495 | MachineOperand *Op; // Non-phi refs point to a machine operand. |
496 | PackedRegisterRef PR; // Phi refs store register info directly. |
497 | }; |
498 | }; |
499 | |
500 | // The actual payload. |
501 | union { |
502 | Ref_struct Ref; |
503 | Code_struct Code; |
504 | }; |
505 | }; |
506 | // The allocator allocates chunks of 32 bytes for each node. The fact that |
507 | // each node takes 32 bytes in memory is used for fast translation between |
508 | // the node id and the node address. |
509 | static_assert(sizeof(NodeBase) <= NodeAllocator::NodeMemSize, |
510 | "NodeBase must be at most NodeAllocator::NodeMemSize bytes"); |
511 | |
512 | using NodeList = SmallVector<NodeAddr<NodeBase *>, 4>; |
513 | using NodeSet = std::set<NodeId>; |
514 | |
515 | struct RefNode : public NodeBase { |
516 | RefNode() = default; |
517 | |
518 | RegisterRef getRegRef(const DataFlowGraph &G) const; |
519 | |
520 | MachineOperand &getOp() { |
521 | assert(!(getFlags() & NodeAttrs::PhiRef))((void)0); |
522 | return *Ref.Op; |
523 | } |
524 | |
525 | void setRegRef(RegisterRef RR, DataFlowGraph &G); |
526 | void setRegRef(MachineOperand *Op, DataFlowGraph &G); |
527 | |
528 | NodeId getReachingDef() const { |
529 | return Ref.RD; |
530 | } |
531 | void setReachingDef(NodeId RD) { |
532 | Ref.RD = RD; |
533 | } |
534 | |
535 | NodeId getSibling() const { |
536 | return Ref.Sib; |
537 | } |
538 | void setSibling(NodeId Sib) { |
539 | Ref.Sib = Sib; |
540 | } |
541 | |
542 | bool isUse() const { |
543 | assert(getType() == NodeAttrs::Ref)((void)0); |
544 | return getKind() == NodeAttrs::Use; |
545 | } |
546 | |
547 | bool isDef() const { |
548 | assert(getType() == NodeAttrs::Ref)((void)0); |
549 | return getKind() == NodeAttrs::Def; |
550 | } |
551 | |
552 | template <typename Predicate> |
553 | NodeAddr<RefNode*> getNextRef(RegisterRef RR, Predicate P, bool NextOnly, |
554 | const DataFlowGraph &G); |
555 | NodeAddr<NodeBase*> getOwner(const DataFlowGraph &G); |
556 | }; |
557 | |
558 | struct DefNode : public RefNode { |
559 | NodeId getReachedDef() const { |
560 | return Ref.Def.DD; |
561 | } |
562 | void setReachedDef(NodeId D) { |
563 | Ref.Def.DD = D; |
564 | } |
565 | NodeId getReachedUse() const { |
566 | return Ref.Def.DU; |
567 | } |
568 | void setReachedUse(NodeId U) { |
569 | Ref.Def.DU = U; |
570 | } |
571 | |
572 | void linkToDef(NodeId Self, NodeAddr<DefNode*> DA); |
573 | }; |
574 | |
575 | struct UseNode : public RefNode { |
576 | void linkToDef(NodeId Self, NodeAddr<DefNode*> DA); |
577 | }; |
578 | |
579 | struct PhiUseNode : public UseNode { |
580 | NodeId getPredecessor() const { |
581 | assert(getFlags() & NodeAttrs::PhiRef)((void)0); |
582 | return Ref.PhiU.PredB; |
583 | } |
584 | void setPredecessor(NodeId B) { |
585 | assert(getFlags() & NodeAttrs::PhiRef)((void)0); |
586 | Ref.PhiU.PredB = B; |
587 | } |
588 | }; |
589 | |
590 | struct CodeNode : public NodeBase { |
591 | template <typename T> T getCode() const { |
592 | return static_cast<T>(Code.CP); |
593 | } |
594 | void setCode(void *C) { |
595 | Code.CP = C; |
596 | } |
597 | |
598 | NodeAddr<NodeBase*> getFirstMember(const DataFlowGraph &G) const; |
599 | NodeAddr<NodeBase*> getLastMember(const DataFlowGraph &G) const; |
600 | void addMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G); |
601 | void addMemberAfter(NodeAddr<NodeBase*> MA, NodeAddr<NodeBase*> NA, |
602 | const DataFlowGraph &G); |
603 | void removeMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G); |
604 | |
605 | NodeList members(const DataFlowGraph &G) const; |
606 | template <typename Predicate> |
607 | NodeList members_if(Predicate P, const DataFlowGraph &G) const; |
608 | }; |
609 | |
610 | struct InstrNode : public CodeNode { |
611 | NodeAddr<NodeBase*> getOwner(const DataFlowGraph &G); |
612 | }; |
613 | |
614 | struct PhiNode : public InstrNode { |
615 | MachineInstr *getCode() const { |
616 | return nullptr; |
617 | } |
618 | }; |
619 | |
620 | struct StmtNode : public InstrNode { |
621 | MachineInstr *getCode() const { |
622 | return CodeNode::getCode<MachineInstr*>(); |
623 | } |
624 | }; |
625 | |
626 | struct BlockNode : public CodeNode { |
627 | MachineBasicBlock *getCode() const { |
628 | return CodeNode::getCode<MachineBasicBlock*>(); |
629 | } |
630 | |
631 | void addPhi(NodeAddr<PhiNode*> PA, const DataFlowGraph &G); |
632 | }; |
633 | |
634 | struct FuncNode : public CodeNode { |
635 | MachineFunction *getCode() const { |
636 | return CodeNode::getCode<MachineFunction*>(); |
637 | } |
638 | |
639 | NodeAddr<BlockNode*> findBlock(const MachineBasicBlock *BB, |
640 | const DataFlowGraph &G) const; |
641 | NodeAddr<BlockNode*> getEntryBlock(const DataFlowGraph &G); |
642 | }; |
643 | |
644 | struct DataFlowGraph { |
645 | DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii, |
646 | const TargetRegisterInfo &tri, const MachineDominatorTree &mdt, |
647 | const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi); |
648 | |
649 | NodeBase *ptr(NodeId N) const; |
650 | template <typename T> T ptr(NodeId N) const { |
651 | return static_cast<T>(ptr(N)); |
652 | } |
653 | |
654 | NodeId id(const NodeBase *P) const; |
655 | |
656 | template <typename T> NodeAddr<T> addr(NodeId N) const { |
657 | return { ptr<T>(N), N }; |
658 | } |
659 | |
660 | NodeAddr<FuncNode*> getFunc() const { return Func; } |
661 | MachineFunction &getMF() const { return MF; } |
662 | const TargetInstrInfo &getTII() const { return TII; } |
663 | const TargetRegisterInfo &getTRI() const { return TRI; } |
664 | const PhysicalRegisterInfo &getPRI() const { return PRI; } |
665 | const MachineDominatorTree &getDT() const { return MDT; } |
666 | const MachineDominanceFrontier &getDF() const { return MDF; } |
667 | const RegisterAggr &getLiveIns() const { return LiveIns; } |
668 | |
669 | struct DefStack { |
670 | DefStack() = default; |
671 | |
672 | bool empty() const { return Stack.empty() || top() == bottom(); } |
673 | |
674 | private: |
675 | using value_type = NodeAddr<DefNode *>; |
676 | struct Iterator { |
677 | using value_type = DefStack::value_type; |
678 | |
679 | Iterator &up() { Pos = DS.nextUp(Pos); return *this; } |
680 | Iterator &down() { Pos = DS.nextDown(Pos); return *this; } |
681 | |
682 | value_type operator*() const { |
683 | assert(Pos >= 1)((void)0); |
684 | return DS.Stack[Pos-1]; |
685 | } |
686 | const value_type *operator->() const { |
687 | assert(Pos >= 1)((void)0); |
688 | return &DS.Stack[Pos-1]; |
689 | } |
690 | bool operator==(const Iterator &It) const { return Pos == It.Pos; } |
691 | bool operator!=(const Iterator &It) const { return Pos != It.Pos; } |
692 | |
693 | private: |
694 | friend struct DefStack; |
695 | |
696 | Iterator(const DefStack &S, bool Top); |
697 | |
698 | // Pos-1 is the index in the StorageType object that corresponds to |
699 | // the top of the DefStack. |
700 | const DefStack &DS; |
701 | unsigned Pos; |
702 | }; |
703 | |
704 | public: |
705 | using iterator = Iterator; |
706 | |
707 | iterator top() const { return Iterator(*this, true); } |
708 | iterator bottom() const { return Iterator(*this, false); } |
709 | unsigned size() const; |
710 | |
711 | void push(NodeAddr<DefNode*> DA) { Stack.push_back(DA); } |
712 | void pop(); |
713 | void start_block(NodeId N); |
714 | void clear_block(NodeId N); |
715 | |
716 | private: |
717 | friend struct Iterator; |
718 | |
719 | using StorageType = std::vector<value_type>; |
720 | |
721 | bool isDelimiter(const StorageType::value_type &P, NodeId N = 0) const { |
722 | return (P.Addr == nullptr) && (N == 0 || P.Id == N); |
723 | } |
724 | |
725 | unsigned nextUp(unsigned P) const; |
726 | unsigned nextDown(unsigned P) const; |
727 | |
728 | StorageType Stack; |
729 | }; |
730 | |
731 | // Make this std::unordered_map for speed of accessing elements. |
732 | // Map: Register (physical or virtual) -> DefStack |
733 | using DefStackMap = std::unordered_map<RegisterId, DefStack>; |
734 | |
735 | void build(unsigned Options = BuildOptions::None); |
736 | void pushAllDefs(NodeAddr<InstrNode*> IA, DefStackMap &DM); |
737 | void markBlock(NodeId B, DefStackMap &DefM); |
738 | void releaseBlock(NodeId B, DefStackMap &DefM); |
739 | |
740 | PackedRegisterRef pack(RegisterRef RR) { |
741 | return { RR.Reg, LMI.getIndexForLaneMask(RR.Mask) }; |
742 | } |
743 | PackedRegisterRef pack(RegisterRef RR) const { |
744 | return { RR.Reg, LMI.getIndexForLaneMask(RR.Mask) }; |
745 | } |
746 | RegisterRef unpack(PackedRegisterRef PR) const { |
747 | return RegisterRef(PR.Reg, LMI.getLaneMaskForIndex(PR.MaskId)); |
748 | } |
749 | |
750 | RegisterRef makeRegRef(unsigned Reg, unsigned Sub) const; |
751 | RegisterRef makeRegRef(const MachineOperand &Op) const; |
752 | RegisterRef restrictRef(RegisterRef AR, RegisterRef BR) const; |
753 | |
754 | NodeAddr<RefNode*> getNextRelated(NodeAddr<InstrNode*> IA, |
755 | NodeAddr<RefNode*> RA) const; |
756 | NodeAddr<RefNode*> getNextShadow(NodeAddr<InstrNode*> IA, |
757 | NodeAddr<RefNode*> RA, bool Create); |
758 | NodeAddr<RefNode*> getNextShadow(NodeAddr<InstrNode*> IA, |
759 | NodeAddr<RefNode*> RA) const; |
760 | |
761 | NodeList getRelatedRefs(NodeAddr<InstrNode*> IA, |
762 | NodeAddr<RefNode*> RA) const; |
763 | |
764 | NodeAddr<BlockNode*> findBlock(MachineBasicBlock *BB) const { |
765 | return BlockNodes.at(BB); |
766 | } |
767 | |
768 | void unlinkUse(NodeAddr<UseNode*> UA, bool RemoveFromOwner) { |
769 | unlinkUseDF(UA); |
770 | if (RemoveFromOwner) |
771 | removeFromOwner(UA); |
772 | } |
773 | |
774 | void unlinkDef(NodeAddr<DefNode*> DA, bool RemoveFromOwner) { |
775 | unlinkDefDF(DA); |
776 | if (RemoveFromOwner) |
777 | removeFromOwner(DA); |
778 | } |
779 | |
780 | // Some useful filters. |
781 | template <uint16_t Kind> |
782 | static bool IsRef(const NodeAddr<NodeBase*> BA) { |
783 | return BA.Addr->getType() == NodeAttrs::Ref && |
784 | BA.Addr->getKind() == Kind; |
785 | } |
786 | |
787 | template <uint16_t Kind> |
788 | static bool IsCode(const NodeAddr<NodeBase*> BA) { |
789 | return BA.Addr->getType() == NodeAttrs::Code && |
790 | BA.Addr->getKind() == Kind; |
791 | } |
792 | |
793 | static bool IsDef(const NodeAddr<NodeBase*> BA) { |
794 | return BA.Addr->getType() == NodeAttrs::Ref && |
795 | BA.Addr->getKind() == NodeAttrs::Def; |
796 | } |
797 | |
798 | static bool IsUse(const NodeAddr<NodeBase*> BA) { |
799 | return BA.Addr->getType() == NodeAttrs::Ref && |
800 | BA.Addr->getKind() == NodeAttrs::Use; |
801 | } |
802 | |
803 | static bool IsPhi(const NodeAddr<NodeBase*> BA) { |
804 | return BA.Addr->getType() == NodeAttrs::Code && |
805 | BA.Addr->getKind() == NodeAttrs::Phi; |
806 | } |
807 | |
808 | static bool IsPreservingDef(const NodeAddr<DefNode*> DA) { |
809 | uint16_t Flags = DA.Addr->getFlags(); |
810 | return (Flags & NodeAttrs::Preserving) && !(Flags & NodeAttrs::Undef); |
811 | } |
812 | |
813 | private: |
814 | void reset(); |
815 | |
816 | RegisterSet getLandingPadLiveIns() const; |
817 | |
818 | NodeAddr<NodeBase*> newNode(uint16_t Attrs); |
819 | NodeAddr<NodeBase*> cloneNode(const NodeAddr<NodeBase*> B); |
820 | NodeAddr<UseNode*> newUse(NodeAddr<InstrNode*> Owner, |
821 | MachineOperand &Op, uint16_t Flags = NodeAttrs::None); |
822 | NodeAddr<PhiUseNode*> newPhiUse(NodeAddr<PhiNode*> Owner, |
823 | RegisterRef RR, NodeAddr<BlockNode*> PredB, |
824 | uint16_t Flags = NodeAttrs::PhiRef); |
825 | NodeAddr<DefNode*> newDef(NodeAddr<InstrNode*> Owner, |
826 | MachineOperand &Op, uint16_t Flags = NodeAttrs::None); |
827 | NodeAddr<DefNode*> newDef(NodeAddr<InstrNode*> Owner, |
828 | RegisterRef RR, uint16_t Flags = NodeAttrs::PhiRef); |
829 | NodeAddr<PhiNode*> newPhi(NodeAddr<BlockNode*> Owner); |
830 | NodeAddr<StmtNode*> newStmt(NodeAddr<BlockNode*> Owner, |
831 | MachineInstr *MI); |
832 | NodeAddr<BlockNode*> newBlock(NodeAddr<FuncNode*> Owner, |
833 | MachineBasicBlock *BB); |
834 | NodeAddr<FuncNode*> newFunc(MachineFunction *MF); |
835 | |
836 | template <typename Predicate> |
837 | std::pair<NodeAddr<RefNode*>,NodeAddr<RefNode*>> |
838 | locateNextRef(NodeAddr<InstrNode*> IA, NodeAddr<RefNode*> RA, |
839 | Predicate P) const; |
840 | |
841 | using BlockRefsMap = std::map<NodeId, RegisterSet>; |
842 | |
843 | void buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In); |
844 | void recordDefsForDF(BlockRefsMap &PhiM, NodeAddr<BlockNode*> BA); |
845 | void buildPhis(BlockRefsMap &PhiM, RegisterSet &AllRefs, |
846 | NodeAddr<BlockNode*> BA); |
847 | void removeUnusedPhis(); |
848 | |
849 | void pushClobbers(NodeAddr<InstrNode*> IA, DefStackMap &DM); |
850 | void pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DM); |
851 | template <typename T> void linkRefUp(NodeAddr<InstrNode*> IA, |
852 | NodeAddr<T> TA, DefStack &DS); |
853 | template <typename Predicate> void linkStmtRefs(DefStackMap &DefM, |
854 | NodeAddr<StmtNode*> SA, Predicate P); |
855 | void linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA); |
856 | |
857 | void unlinkUseDF(NodeAddr<UseNode*> UA); |
858 | void unlinkDefDF(NodeAddr<DefNode*> DA); |
859 | |
860 | void removeFromOwner(NodeAddr<RefNode*> RA) { |
861 | NodeAddr<InstrNode*> IA = RA.Addr->getOwner(*this); |
862 | IA.Addr->removeMember(RA, *this); |
863 | } |
864 | |
865 | MachineFunction &MF; |
866 | const TargetInstrInfo &TII; |
867 | const TargetRegisterInfo &TRI; |
868 | const PhysicalRegisterInfo PRI; |
869 | const MachineDominatorTree &MDT; |
870 | const MachineDominanceFrontier &MDF; |
871 | const TargetOperandInfo &TOI; |
872 | |
873 | RegisterAggr LiveIns; |
874 | NodeAddr<FuncNode*> Func; |
875 | NodeAllocator Memory; |
876 | // Local map: MachineBasicBlock -> NodeAddr<BlockNode*> |
877 | std::map<MachineBasicBlock*,NodeAddr<BlockNode*>> BlockNodes; |
878 | // Lane mask map. |
879 | LaneMaskIndex LMI; |
880 | }; // struct DataFlowGraph |
881 | |
882 | template <typename Predicate> |
883 | NodeAddr<RefNode*> RefNode::getNextRef(RegisterRef RR, Predicate P, |
884 | bool NextOnly, const DataFlowGraph &G) { |
885 | // Get the "Next" reference in the circular list that references RR and |
886 | // satisfies predicate "Pred". |
887 | auto NA = G.addr<NodeBase*>(getNext()); |
888 | |
889 | while (NA.Addr != this) { |
890 | if (NA.Addr->getType() == NodeAttrs::Ref) { |
891 | NodeAddr<RefNode*> RA = NA; |
892 | if (RA.Addr->getRegRef(G) == RR && P(NA)) |
893 | return NA; |
894 | if (NextOnly) |
895 | break; |
896 | NA = G.addr<NodeBase*>(NA.Addr->getNext()); |
897 | } else { |
898 | // We've hit the beginning of the chain. |
899 | assert(NA.Addr->getType() == NodeAttrs::Code)((void)0); |
900 | NodeAddr<CodeNode*> CA = NA; |
901 | NA = CA.Addr->getFirstMember(G); |
902 | } |
903 | } |
904 | // Return the equivalent of "nullptr" if such a node was not found. |
905 | return NodeAddr<RefNode*>(); |
906 | } |
907 | |
908 | template <typename Predicate> |
909 | NodeList CodeNode::members_if(Predicate P, const DataFlowGraph &G) const { |
910 | NodeList MM; |
911 | auto M = getFirstMember(G); |
912 | if (M.Id == 0) |
913 | return MM; |
914 | |
915 | while (M.Addr != this) { |
916 | if (P(M)) |
917 | MM.push_back(M); |
918 | M = G.addr<NodeBase*>(M.Addr->getNext()); |
919 | } |
920 | return MM; |
921 | } |
922 | |
923 | template <typename T> |
924 | struct Print { |
925 | Print(const T &x, const DataFlowGraph &g) : Obj(x), G(g) {} |
926 | |
927 | const T &Obj; |
928 | const DataFlowGraph &G; |
929 | }; |
930 | |
931 | template <typename T> |
932 | struct PrintNode : Print<NodeAddr<T>> { |
933 | PrintNode(const NodeAddr<T> &x, const DataFlowGraph &g) |
934 | : Print<NodeAddr<T>>(x, g) {} |
935 | }; |
936 | |
937 | raw_ostream &operator<<(raw_ostream &OS, const Print<RegisterRef> &P); |
938 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeId> &P); |
939 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<DefNode *>> &P); |
940 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<UseNode *>> &P); |
941 | raw_ostream &operator<<(raw_ostream &OS, |
942 | const Print<NodeAddr<PhiUseNode *>> &P); |
943 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<RefNode *>> &P); |
944 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeList> &P); |
945 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeSet> &P); |
946 | raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<PhiNode *>> &P); |
947 | raw_ostream &operator<<(raw_ostream &OS, |
948 | const Print<NodeAddr<StmtNode *>> &P); |
949 | raw_ostream &operator<<(raw_ostream &OS, |
950 | const Print<NodeAddr<InstrNode *>> &P); |
951 | raw_ostream &operator<<(raw_ostream &OS, |
952 | const Print<NodeAddr<BlockNode *>> &P); |
953 | raw_ostream &operator<<(raw_ostream &OS, |
954 | const Print<NodeAddr<FuncNode *>> &P); |
955 | raw_ostream &operator<<(raw_ostream &OS, const Print<RegisterSet> &P); |
956 | raw_ostream &operator<<(raw_ostream &OS, const Print<RegisterAggr> &P); |
957 | raw_ostream &operator<<(raw_ostream &OS, |
958 | const Print<DataFlowGraph::DefStack> &P); |
959 | |
960 | } // end namespace rdf |
961 | |
962 | } // end namespace llvm |
963 | |
964 | #endif // LLVM_CODEGEN_RDFGRAPH_H |